From a09b9071dcd421f53f871358acdfe4c9bbd380b9 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 11 Dec 2024 19:41:37 +0000 Subject: [PATCH 01/20] Update to latest models --- .../api-change-artifact-68323.json | 5 + .../api-change-cloudtrail-39887.json | 5 + .../api-change-cognitoidp-8257.json | 5 + .../api-change-controlcatalog-50136.json | 5 + .../api-change-emrserverless-60875.json | 5 + .../next-release/api-change-mgh-14397.json | 5 + .../next-release/api-change-sesv2-43901.json | 5 + .../api-change-timestreaminfluxdb-1068.json | 5 + .../artifact/2018-05-10/paginators-1.json | 6 + .../data/artifact/2018-05-10/service-2.json | 136 +++++ .../data/cloudtrail/2013-11-01/service-2.json | 4 +- .../cognito-idp/2016-04-18/service-2.json | 540 +++++++++--------- .../controlcatalog/2018-05-10/service-2.json | 2 +- .../emr-serverless/2021-07-13/service-2.json | 8 +- .../mgh/2017-05-31/endpoint-rule-set-1.json | 365 ++++++------ .../data/mgh/2017-05-31/paginators-1.json | 12 + botocore/data/mgh/2017-05-31/service-2.json | 287 +++++++++- .../sesv2/2019-09-27/endpoint-rule-set-1.json | 190 ++++++ .../data/sesv2/2019-09-27/paginators-1.json | 9 +- botocore/data/sesv2/2019-09-27/service-2.json | 322 +++++++++++ .../2023-01-27/service-2.json | 48 +- .../endpoint-rules/mgh/endpoint-tests-1.json | 291 +++++----- .../sesv2/endpoint-tests-1.json | 157 +++++ 23 files changed, 1794 insertions(+), 623 deletions(-) create mode 100644 .changes/next-release/api-change-artifact-68323.json create mode 100644 .changes/next-release/api-change-cloudtrail-39887.json create mode 100644 .changes/next-release/api-change-cognitoidp-8257.json create mode 100644 .changes/next-release/api-change-controlcatalog-50136.json create mode 100644 .changes/next-release/api-change-emrserverless-60875.json create mode 100644 .changes/next-release/api-change-mgh-14397.json create mode 100644 .changes/next-release/api-change-sesv2-43901.json create mode 100644 .changes/next-release/api-change-timestreaminfluxdb-1068.json diff --git a/.changes/next-release/api-change-artifact-68323.json b/.changes/next-release/api-change-artifact-68323.json new file mode 100644 index 0000000000..c305773505 --- /dev/null +++ b/.changes/next-release/api-change-artifact-68323.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``artifact``", + "description": "Add support for listing active customer agreements for the calling AWS Account." +} diff --git a/.changes/next-release/api-change-cloudtrail-39887.json b/.changes/next-release/api-change-cloudtrail-39887.json new file mode 100644 index 0000000000..021601dbaf --- /dev/null +++ b/.changes/next-release/api-change-cloudtrail-39887.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloudtrail``", + "description": "Doc-only updates for CloudTrail." +} diff --git a/.changes/next-release/api-change-cognitoidp-8257.json b/.changes/next-release/api-change-cognitoidp-8257.json new file mode 100644 index 0000000000..a50fd9e39d --- /dev/null +++ b/.changes/next-release/api-change-cognitoidp-8257.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cognito-idp``", + "description": "Updated descriptions for some API operations and parameters, corrected some errors in Cognito user pools" +} diff --git a/.changes/next-release/api-change-controlcatalog-50136.json b/.changes/next-release/api-change-controlcatalog-50136.json new file mode 100644 index 0000000000..dcf87dd4f2 --- /dev/null +++ b/.changes/next-release/api-change-controlcatalog-50136.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``controlcatalog``", + "description": "Minor documentation updates to the content of ImplementationDetails object part of the Control Catalog GetControl API" +} diff --git a/.changes/next-release/api-change-emrserverless-60875.json b/.changes/next-release/api-change-emrserverless-60875.json new file mode 100644 index 0000000000..30954ae85c --- /dev/null +++ b/.changes/next-release/api-change-emrserverless-60875.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``emr-serverless``", + "description": "This release adds support for accessing system profile logs in Lake Formation-enabled jobs." +} diff --git a/.changes/next-release/api-change-mgh-14397.json b/.changes/next-release/api-change-mgh-14397.json new file mode 100644 index 0000000000..7d821c0250 --- /dev/null +++ b/.changes/next-release/api-change-mgh-14397.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mgh``", + "description": "API and documentation updates for AWS MigrationHub related to adding support for listing migration task updates and associating, disassociating and listing source resources" +} diff --git a/.changes/next-release/api-change-sesv2-43901.json b/.changes/next-release/api-change-sesv2-43901.json new file mode 100644 index 0000000000..b63d229ddb --- /dev/null +++ b/.changes/next-release/api-change-sesv2-43901.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sesv2``", + "description": "Introduces support for multi-region endpoint." +} diff --git a/.changes/next-release/api-change-timestreaminfluxdb-1068.json b/.changes/next-release/api-change-timestreaminfluxdb-1068.json new file mode 100644 index 0000000000..5cde1fc625 --- /dev/null +++ b/.changes/next-release/api-change-timestreaminfluxdb-1068.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``timestream-influxdb``", + "description": "Adds networkType parameter to CreateDbInstance API which allows IPv6 support to the InfluxDB endpoint" +} diff --git a/botocore/data/artifact/2018-05-10/paginators-1.json b/botocore/data/artifact/2018-05-10/paginators-1.json index f8c8514425..ba4271a9e5 100644 --- a/botocore/data/artifact/2018-05-10/paginators-1.json +++ b/botocore/data/artifact/2018-05-10/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "reports" + }, + "ListCustomerAgreements": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "customerAgreements" } } } diff --git a/botocore/data/artifact/2018-05-10/service-2.json b/botocore/data/artifact/2018-05-10/service-2.json index 21144648ac..f7b10da971 100644 --- a/botocore/data/artifact/2018-05-10/service-2.json +++ b/botocore/data/artifact/2018-05-10/service-2.json @@ -92,6 +92,23 @@ ], "documentation":"

Get the Term content associated with a single report.

" }, + "ListCustomerAgreements":{ + "name":"ListCustomerAgreements", + "http":{ + "method":"GET", + "requestUri":"/v1/customer-agreement/list", + "responseCode":200 + }, + "input":{"shape":"ListCustomerAgreementsRequest"}, + "output":{"shape":"ListCustomerAgreementsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

List active customer-agreements applicable to calling identity.

" + }, "ListReports":{ "name":"ListReports", "http":{ @@ -164,6 +181,20 @@ }, "documentation":"

Account settings for the customer.

" }, + "AgreementTerms":{ + "type":"list", + "member":{"shape":"LongStringAttribute"}, + "max":10, + "min":0 + }, + "AgreementType":{ + "type":"string", + "enum":[ + "CUSTOM", + "DEFAULT", + "MODIFIED" + ] + }, "ConflictException":{ "type":"structure", "required":[ @@ -189,6 +220,80 @@ }, "exception":true }, + "CustomerAgreementIdAttribute":{ + "type":"string", + "pattern":"customer-agreement-[a-zA-Z0-9]{16}" + }, + "CustomerAgreementList":{ + "type":"list", + "member":{"shape":"CustomerAgreementSummary"} + }, + "CustomerAgreementState":{ + "type":"string", + "enum":[ + "ACTIVE", + "CUSTOMER_TERMINATED", + "AWS_TERMINATED" + ] + }, + "CustomerAgreementSummary":{ + "type":"structure", + "members":{ + "name":{ + "shape":"LongStringAttribute", + "documentation":"

Name of the customer-agreement resource.

" + }, + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN of the customer-agreement resource.

" + }, + "id":{ + "shape":"CustomerAgreementIdAttribute", + "documentation":"

Identifier of the customer-agreement resource.

" + }, + "agreementArn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN of the agreement resource the customer-agreement resource represents.

" + }, + "awsAccountId":{ + "shape":"ShortStringAttribute", + "documentation":"

AWS account Id that owns the resource.

" + }, + "organizationArn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN of the organization that owns the resource.

" + }, + "effectiveStart":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the agreement became effective.

" + }, + "effectiveEnd":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the agreement was terminated.

" + }, + "state":{ + "shape":"CustomerAgreementState", + "documentation":"

State of the resource.

" + }, + "description":{ + "shape":"LongStringAttribute", + "documentation":"

Description of the resource.

" + }, + "acceptanceTerms":{ + "shape":"AgreementTerms", + "documentation":"

Terms required to accept the agreement resource.

" + }, + "terminateTerms":{ + "shape":"AgreementTerms", + "documentation":"

Terms required to terminate the customer-agreement resource.

" + }, + "type":{ + "shape":"AgreementType", + "documentation":"

Type of the customer-agreement resource.

" + } + }, + "documentation":"

Summary for customer-agreement resource.

" + }, "GetAccountSettingsRequest":{ "type":"structure", "members":{ @@ -326,6 +431,37 @@ "fault":true, "retryable":{"throttling":false} }, + "ListCustomerAgreementsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResultsAttribute", + "documentation":"

Maximum number of resources to return in the paginated response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListCustomerAgreementsResponse":{ + "type":"structure", + "required":["customerAgreements"], + "members":{ + "customerAgreements":{ + "shape":"CustomerAgreementList", + "documentation":"

List of customer-agreement resources.

" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

" + } + } + }, "ListReportsRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index c40007a132..524d093523 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -1349,7 +1349,7 @@ "documentation":"

Contains all selector statements in an advanced event selector.

" } }, - "documentation":"

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those events that are important to you. For more information about configuring advanced event selectors, see the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

You cannot apply both event selectors and advanced event selectors to a trail.

Supported CloudTrail event record fields for management events

The following additional fields are available for event data stores:

Supported CloudTrail event record fields for data events

The following additional fields are available for event data stores:

Supported CloudTrail event record fields for network activity events

Network activity events is in preview release for CloudTrail and is subject to change.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

" + "documentation":"

Advanced event selectors let you create fine-grained selectors for CloudTrail management, data, and network activity events. They help you control costs by logging only those events that are important to you. For more information about configuring advanced event selectors, see the Logging data events, Logging network activity events, and Logging management events topics in the CloudTrail User Guide.

You cannot apply both event selectors and advanced event selectors to a trail.

For information about configurable advanced event selector fields, see AdvancedEventSelector in the CloudTrailUser Guide.

" }, "AdvancedEventSelectors":{ "type":"list", @@ -1361,7 +1361,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include eventCategory (required), eventSource, and readOnly. The following additional fields are available for event data stores: eventName, eventType, sessionCredentialFromConsole, and userIdentity.arn.

For CloudTrail data events, supported fields include eventCategory (required), resources.type (required), eventName, readOnly, and resources.ARN. The following additional fields are available for event data stores: eventSource, eventType, sessionCredentialFromConsole, and userIdentity.arn.

For CloudTrail network activity events, supported fields include eventCategory (required), eventSource (required), eventName, errorCode, and vpcEndpointId.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

" + "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For more information, see AdvancedFieldSelector in the CloudTrailUser Guide.

" }, "Equals":{ "shape":"Operator", diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index fe6dac1fed..dbc18432f6 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -30,7 +30,7 @@ {"shape":"UserImportInProgressException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Adds additional user attributes to the user pool schema.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Adds additional user attributes to the user pool schema. Custom attributes can be mutable or immutable and have a custom: or dev: prefix. For more information, see Custom attributes.

You can also create custom attributes in the Schema parameter of CreateUserPool and UpdateUserPool. You can't delete custom attributes after you create them.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminAddUserToGroup":{ "name":"AdminAddUserToGroup", @@ -70,7 +70,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required.

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Confirms user sign-up as an administrator. Unlike ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation code is required.

This request sets a user account active in a user pool that requires confirmation of new user accounts before they can sign in. You can configure your user pool to not send confirmation codes to new users and instead confirm them with this API operation on the back end.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

To configure your user pool to require administrative confirmation of users, set AllowAdminCreateUserOnly to true in a CreateUserPool or UpdateUserPool request.

" }, "AdminCreateUser":{ "name":"AdminCreateUser", @@ -115,7 +115,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a user as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Deletes a user profile in your user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDeleteUserAttributes":{ "name":"AdminDeleteUserAttributes", @@ -133,7 +133,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes the user attributes in a user pool as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Deletes attribute values from a user. This operation doesn't affect tokens for existing user sessions. The next ID token that the user receives will no longer have this attribute.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminDisableProviderForUser":{ "name":"AdminDisableProviderForUser", @@ -170,7 +170,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deactivates a user and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to GetUser and ListUsers API requests.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Deactivates a user profile and revokes all access tokens for the user. A deactivated user can't sign in, but still appears in the responses to ListUsers API requests.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminEnableUser":{ "name":"AdminEnableUser", @@ -188,7 +188,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Enables the specified user as an administrator. Works on any user.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Activate sign-in for a user profile that previously had sign-in access disabled.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminForgetDevice":{ "name":"AdminForgetDevice", @@ -206,7 +206,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Forgets the device, as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Forgets, or deletes, a remembered device from a user's profile. After you forget the device, the user can no longer complete device authentication with that device and when applicable, must submit MFA codes again. For more information, see Working with devices.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminGetDevice":{ "name":"AdminGetDevice", @@ -224,7 +224,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Gets the device, as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Given the device key, returns details for a user' device. For more information, see Working with devices.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminGetUser":{ "name":"AdminGetUser", @@ -242,7 +242,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Gets the specified user by user name in a user pool as an administrator. Works on any user. This operation contributes to your monthly active user (MAU) count for the purpose of billing.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Given the username, returns details about a user profile in a user pool. This operation contributes to your monthly active user (MAU) count for the purpose of billing. You can specify alias attributes in the Username parameter.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminInitiateAuth":{ "name":"AdminInitiateAuth", @@ -270,7 +270,7 @@ {"shape":"UserNotFoundException"}, {"shape":"UserNotConfirmedException"} ], - "documentation":"

Initiates the authentication flow, as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Starts sign-in for applications with a server-side component, for example a traditional web application. This operation specifies the authentication flow that you'd like to begin. The authentication flow that you specify must be supported in your app client configuration. For more information about authentication flows, see Authentication flows.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminLinkProviderForUser":{ "name":"AdminLinkProviderForUser", @@ -308,7 +308,7 @@ {"shape":"InternalErrorException"}, {"shape":"NotAuthorizedException"} ], - "documentation":"

Lists a user's registered devices.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Lists a user's registered devices. Remembered devices are used in authentication services where you offer a \"Remember me\" option for users who you want to permit to sign in without MFA from a trusted device. Users can bypass MFA while your application performs device SRP authentication on the back end. For more information, see Working with devices.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminListGroupsForUser":{ "name":"AdminListGroupsForUser", @@ -326,7 +326,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Lists the groups that a user belongs to.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Lists the groups that a user belongs to. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminListUserAuthEvents":{ "name":"AdminListUserAuthEvents", @@ -345,7 +345,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

A history of user activity and any risks detected as part of Amazon Cognito advanced security.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Requests a history of user activity and any risks detected as part of Amazon Cognito threat protection. For more information, see Viewing user event history.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminRemoveUserFromGroup":{ "name":"AdminRemoveUserFromGroup", @@ -362,7 +362,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Removes the specified user from the specified group.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Given a username and a group name. removes them from the group. User pool groups are identifiers that you can reference from the contents of ID and access tokens, and set preferred IAM roles for identity-pool authentication. For more information, see Adding groups to a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminResetUserPassword":{ "name":"AdminResetUserPassword", @@ -387,7 +387,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Resets the specified user's password in a user pool as an administrator. Works on any user.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Deactivates a user's password, requiring them to change it. If a user tries to sign in after the API is called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then perform the actions that reset your user's password: the forgot-password flow. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Resets the specified user's password in a user pool. This operation doesn't change the user's password, but sends a password-reset code. This operation is the administrative authentication API equivalent to ForgotPassword.

This operation deactivates a user's password, requiring them to change it. If a user tries to sign in after the API request, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then complete the forgot-password flow by prompting the user for their code and a new password, then submitting those values in a ConfirmForgotPassword request. In addition, if the user pool has phone verification selected and a verified phone number exists for the user, or if email verification is selected and a verified email exists for the user, calling this API will also result in sending a message to the end user with the code to change their password.

To use this API operation, your user pool must have self-service account recovery configured. Use AdminSetUserPassword if you manage passwords as an administrator.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminRespondToAuthChallenge":{ "name":"AdminRespondToAuthChallenge", @@ -440,7 +440,7 @@ {"shape":"UserNotConfirmedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Sets the user's multi-factor authentication (MFA) preference, including which MFA options are activated, and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in.

This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserPassword":{ "name":"AdminSetUserPassword", @@ -460,7 +460,7 @@ {"shape":"InvalidPasswordException"}, {"shape":"PasswordHistoryPolicyViolationException"} ], - "documentation":"

Sets the specified user's password in a user pool as an administrator. Works on any user.

The password can be temporary or permanent. If it is temporary, the user status enters the FORCE_CHANGE_PASSWORD state. When the user next tries to sign in, the InitiateAuth/AdminInitiateAuth response will contain the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before it expires, the user won't be able to sign in, and an administrator must reset their password.

Once the user has set a new password, or the password is permanent, the user status is set to Confirmed.

AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Sets the specified user's password in a user pool. This operation administratively sets a temporary or permanent password for a user. With this operation, you can bypass self-service password changes and permit immediate sign-in with the password that you set. To do this, set Permanent to true.

You can also set a new temporary password in this request, send it to a user, and require them to choose a new password on their next sign-in. To do this, set Permanent to false.

If the password is temporary, the user's Status becomes FORCE_CHANGE_PASSWORD. When the user next tries to sign in, the InitiateAuth or AdminInitiateAuth response includes the NEW_PASSWORD_REQUIRED challenge. If the user doesn't sign in before the temporary password expires, they can no longer sign in and you must repeat this operation to set a temporary or permanent password for them.

After the user sets a new password, or if you set a permanent password, their status becomes Confirmed.

AdminSetUserPassword can set a password for the user profile that Amazon Cognito creates for third-party federated users. When you set a password, the federated user's status changes from EXTERNAL_PROVIDER to CONFIRMED. A user in this state can sign in as a federated user, and initiate authentication flows in the API like a linked native user. They can also modify their password and attributes in token-authenticated API requests like ChangePassword and UpdateUserAttributes. As a best security practice and to keep users in sync with your external IdP, don't set passwords on federated user profiles. To set up a federated user for native sign-in with a linked native user, refer to Linking federated users to an existing user profile.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminSetUserSettings":{ "name":"AdminSetUserSettings", @@ -496,7 +496,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Provides feedback for an authentication event indicating if it was from a valid user. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito threat protection. To train the threat-protection model to recognize trusted and untrusted sign-in characteristics, configure threat protection in audit-only mode and provide a mechanism for users or administrators to submit feedback. Your feedback can tell Amazon Cognito that a risk rating was assigned at a level you don't agree with.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUpdateDeviceStatus":{ "name":"AdminUpdateDeviceStatus", @@ -515,7 +515,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Updates the device status as an administrator.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Updates the status of a user's device so that it is marked as remembered or not remembered for the purpose of device authentication. Device authentication is a \"remember me\" mechanism that silently completes sign-in from trusted devices with a device key instead of a user-provided MFA code. This operation changes the status of a device without deleting it, so you can enable it again later. For more information about device authentication, see Working with devices.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUpdateUserAttributes":{ "name":"AdminUpdateUserAttributes", @@ -540,7 +540,7 @@ {"shape":"InvalidEmailRoleAccessPolicyException"}, {"shape":"InvalidSmsRoleTrustRelationshipException"} ], - "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user's attributes, including developer attributes, as an administrator. Works on any user. To delete an attribute from your user, submit the attribute in your API request with a blank value.

For custom attributes, you must prepend the custom: prefix to the attribute name.

In addition to updating user attributes, this API can also be used to mark phone and email as verified.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Updates the specified user's attributes. To delete an attribute from your user, submit the attribute in your API request with a blank value.

For custom attributes, you must prepend the custom: prefix to the attribute name.

This operation can set a user's email address or phone number as verified and permit immediate sign-in in user pools that require verification of these attributes. To do this, set the email_verified or phone_number_verified attribute to true.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AdminUserGlobalSignOut":{ "name":"AdminUserGlobalSignOut", @@ -558,7 +558,7 @@ {"shape":"UserNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation with your administrative credentials when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "AssociateSoftwareToken":{ "name":"AssociateSoftwareToken", @@ -577,7 +577,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -656,7 +656,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Confirms a device that a user wants to remember. A remembered device is a \"Remember me on this device\" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -687,7 +687,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

This public API operation accepts a confirmation code that Amazon Cognito sent to a user and accepts a new password for that user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -716,7 +716,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

This public API operation submits a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -737,7 +737,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new group in the specified user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates a new group in the specified user pool. For more information about user pool groups see Adding groups to a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateIdentityProvider":{ "name":"CreateIdentityProvider", @@ -756,7 +756,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool. Amazon Cognito accepts sign-in with third-party identity providers through managed login and OIDC relying-party libraries. For more information, see Third-party IdP sign-in.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateManagedLoginBranding":{ "name":"CreateManagedLoginBranding", @@ -776,7 +776,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer.

Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK.

This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit.

For more information, see API and SDK operations for managed login branding

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates a new set of branding settings for a user pool style and associates it with an app client. This operation is the programmatic option for the creation of a new style in the branding designer.

Provides values for UI customization in a Settings JSON object and image files in an Assets array. To send the JSON object Document type parameter in Settings, you might need to update to the most recent version of your Amazon Web Services SDK. To create a new style with default settings, set UseCognitoProvidedValues to true and don't provide values for any other options.

This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit.

As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateResourceServer":{ "name":"CreateResourceServer", @@ -794,7 +794,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a new OAuth2.0 resource server and defines custom scopes within it.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates a new OAuth2.0 resource server and defines custom scopes within it. Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserImportJob":{ "name":"CreateUserImportJob", @@ -813,7 +813,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates a user import job.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates a user import job. You can import users into user pools from a comma-separated values (CSV) file without adding Amazon Cognito MAU costs to your Amazon Web Services bill. To generate a template for your import, see GetCSVHeader. To learn more about CSV import, see Importing users from a CSV file.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPool":{ "name":"CreateUserPool", @@ -836,7 +836,7 @@ {"shape":"TierChangeNotAllowedException"}, {"shape":"FeatureUnavailableInTierException"} ], - "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Creates a new Amazon Cognito user pool and sets the password policy for the pool.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

Creates a new Amazon Cognito user pool. This operation sets basic and advanced configuration options. You can create a user pool in the Amazon Cognito console to your preferences and use the output of DescribeUserPool to generate requests from that baseline.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPoolClient":{ "name":"CreateUserPoolClient", @@ -856,7 +856,7 @@ {"shape":"InvalidOAuthFlowException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates the user pool client.

When you create a new user pool client, token revocation is automatically activated. For more information about revoking tokens, see RevokeToken.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Creates an app client in a user pool. This operation sets basic and advanced configuration options. You can create an app client in the Amazon Cognito console to your preferences and use the output of DescribeUserPoolClient to generate requests from that baseline.

New app clients activate token revocation by default. For more information about revoking tokens, see RevokeToken.

If you don't provide a value for an attribute, Amazon Cognito sets it to its default value.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateUserPoolDomain":{ "name":"CreateUserPoolDomain", @@ -874,7 +874,7 @@ {"shape":"InternalErrorException"}, {"shape":"FeatureUnavailableInTierException"} ], - "documentation":"

Creates a new domain for a user pool. The domain hosts user pool domain services like managed login, the hosted UI (classic), and the user pool authorization server.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation creates a new user pool prefix or custom domain and sets the managed login branding version. Set the branding version to 1 for hosted UI (classic) or 2 for managed login. When you choose a custom domain, you must provide an SSL certificate in the US East (N. Virginia) Amazon Web Services Region in your request.

Your prefix domain might take up to one minute to take effect. Your custom domain is online within five minutes, but it can take up to one hour to distribute your SSL certificate.

For more information about adding a custom domain to your user pool, see Configuring a user pool domain.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteGroup":{ "name":"DeleteGroup", @@ -890,7 +890,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a group.

Calling this action requires developer credentials.

" + "documentation":"

Deletes a group from the specified user pool. When you delete a group, that group no longer contributes to users' cognito:preferred_group or cognito:groups claims, and no longer influence access-control decision that are based on group membership. For more information about user pool groups, see Adding groups to a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteIdentityProvider":{ "name":"DeleteIdentityProvider", @@ -908,7 +908,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes an IdP for a user pool.

" + "documentation":"

Deletes a user pool identity provider (IdP). After you delete an IdP, users can no longer sign in to your user pool through that IdP. For more information about user pool IdPs, see Third-party IdP sign-in.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteManagedLoginBranding":{ "name":"DeleteManagedLoginBranding", @@ -925,7 +925,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client and restore it to default settings.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Deletes a managed login branding style. When you delete a style, you delete the branding association for an app client. When an app client doesn't have a style assigned, your managed login pages for that app client are nonfunctional until you create a new style or switch the domain branding version.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteResourceServer":{ "name":"DeleteResourceServer", @@ -941,7 +941,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a resource server.

" + "documentation":"

Deletes a resource server. After you delete a resource server, users can no longer generate access tokens with scopes that are associate with that resource server.

Resource servers are associated with custom scopes and machine-to-machine (M2M) authorization. For more information, see Access control with resource servers.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DeleteUser":{ "name":"DeleteUser", @@ -961,7 +961,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to delete their own user profile.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Self-deletes a user profile. A deleted user profile can no longer be used to sign in and can't be restored.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -984,7 +984,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Deletes the attributes for a user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Self-deletes attributes for a user. For example, your application can submit a request to this operation when a user wants to remove their birthdate attribute value.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -1003,7 +1003,7 @@ {"shape":"UserImportInProgressException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes the specified Amazon Cognito user pool.

" + "documentation":"

Deletes a user pool. After you delete a user pool, users can no longer sign in to any associated applications.

" }, "DeleteUserPoolClient":{ "name":"DeleteUserPoolClient", @@ -1020,7 +1020,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Allows the developer to delete the user pool client.

" + "documentation":"

Deletes a user pool app client. After you delete an app client, users can no longer sign in to the associated application.

" }, "DeleteUserPoolDomain":{ "name":"DeleteUserPoolDomain", @@ -1036,7 +1036,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Deletes a domain for a user pool.

" + "documentation":"

Given a user pool ID and domain identifier, deletes a user pool domain. After you delete a user pool domain, your managed login pages and authorization server are no longer available.

" }, "DeleteWebAuthnCredential":{ "name":"DeleteWebAuthnCredential", @@ -1053,7 +1053,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a registered passkey, or webauthN, device for the currently signed-in user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

", + "documentation":"

Deletes a registered passkey, or webauthN, authenticator for the currently signed-in user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -1072,7 +1072,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Gets information about a specific IdP.

" + "documentation":"

Given a user pool ID and identity provider (IdP) name, returns details about the IdP.

" }, "DescribeManagedLoginBranding":{ "name":"DescribeManagedLoginBranding", @@ -1089,7 +1089,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

When given the ID of a managed login branding style, returns detailed information about the style.

" + "documentation":"

Given the ID of a managed login branding style, returns detailed information about the style.

" }, "DescribeManagedLoginBrandingByClient":{ "name":"DescribeManagedLoginBrandingByClient", @@ -1106,7 +1106,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

When given the ID of a user pool app client, returns detailed information about the style assigned to the app client.

" + "documentation":"

Given the ID of a user pool app client, returns detailed information about the style assigned to the app client.

" }, "DescribeResourceServer":{ "name":"DescribeResourceServer", @@ -1123,7 +1123,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Describes a resource server.

" + "documentation":"

Describes a resource server. For more information about resource servers, see Access control with resource servers.

" }, "DescribeRiskConfiguration":{ "name":"DescribeRiskConfiguration", @@ -1141,7 +1141,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Describes the risk configuration.

" + "documentation":"

Given an app client or user pool ID where threat protection is configured, describes the risk configuration. This operation returns details about adaptive authentication, compromised credentials, and IP-address allow- and denylists. For more information about threat protection, see Threat protection.

" }, "DescribeUserImportJob":{ "name":"DescribeUserImportJob", @@ -1158,7 +1158,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Describes the user import job.

" + "documentation":"

Describes a user import job. For more information about user CSV import, see Importing users from a CSV file.

" }, "DescribeUserPool":{ "name":"DescribeUserPool", @@ -1176,7 +1176,7 @@ {"shape":"UserPoolTaggingException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Returns the configuration information and metadata of the specified user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Given a user pool ID, returns configuration information. This operation is useful when you want to inspect an existing user pool and programmatically replicate the configuration to another user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DescribeUserPoolClient":{ "name":"DescribeUserPoolClient", @@ -1193,7 +1193,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Client method for returning the configuration information and metadata of the specified user pool app client.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Given an app client ID, returns configuration information. This operation is useful when you want to inspect an existing app client and programmatically replicate the configuration to another app client. For more information about app clients, see App clients.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "DescribeUserPoolDomain":{ "name":"DescribeUserPoolDomain", @@ -1209,7 +1209,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Gets information about a domain.

" + "documentation":"

Given a user pool domain name, returns information about the domain configuration.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "ForgetDevice":{ "name":"ForgetDevice", @@ -1499,7 +1499,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires. This operation doesn't clear the managed login session cookie. To clear the session for a user who signed in with managed login or the classic hosted UI, direct their browser session to the logout endpoint.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -1887,7 +1887,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", + "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

This operation doesn't reset an existing TOTP MFA for a user. To register a new TOTP factor for a user, make an AssociateSoftwareToken request. For more information, see TOTP software token MFA.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none", "auth":["smithy.api#noAuth"] }, @@ -2153,7 +2153,7 @@ {"shape":"NotAuthorizedException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer.

Provides values for UI customization in a Settings JSON object and image files in an Assets array.

This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit.

For more information, see API and SDK operations for managed login branding.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Configures the branding settings for a user pool style. This operation is the programmatic option for the configuration of a style in the branding designer.

Provides values for UI customization in a Settings JSON object and image files in an Assets array.

This operation has a 2-megabyte request-size limit and include the CSS settings and image assets for your app client. Your branding settings might exceed 2MB in size. Amazon Cognito doesn't require that you pass all parameters in one request and preserves existing style settings that you don't specify. If your request is larger than 2MB, separate it into multiple requests, each with a size smaller than the limit.

As a best practice, modify the output of DescribeManagedLoginBrandingByClient into the request parameters for this operation. To get all settings, set ReturnMergedResources to true. For more information, see API and SDK operations for managed login branding

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "UpdateResourceServer":{ "name":"UpdateResourceServer", @@ -2266,7 +2266,7 @@ {"shape":"InternalErrorException"}, {"shape":"FeatureUnavailableInTierException"} ], - "documentation":"

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You can't use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the Amazon Web Services Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

A user pool domain hosts managed login, an authorization server and web server for authentication in your application. This operation updates the branding version for user pool domains between 1 for hosted UI (classic) and 2 for managed login. It also updates the SSL certificate for user pool custom domains.

Changes to the domain branding version take up to one minute to take effect for a prefix domain and up to five minutes for a custom domain.

This operation doesn't change the name of your user pool domain. To change your domain, delete it with DeleteUserPoolDomain and create a new domain with CreateUserPoolDomain.

You can pass the ARN of a new Certificate Manager certificate in this request. Typically, ACM certificates automatically renew and you user pool can continue to use the same ARN. But if you generate a new certificate for your custom domain name, replace the original configuration with the new ARN in this request.

ACM certificates for custom domains must be in the US East (N. Virginia) Amazon Web Services Region. After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Configuring a user pool domain.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "VerifySoftwareToken":{ "name":"VerifySoftwareToken", @@ -2415,11 +2415,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to add custom attributes.

" + "documentation":"

The ID of the user pool where you want to add custom attributes.

" }, "CustomAttributes":{ "shape":"CustomAttributesListType", - "documentation":"

An array of custom attributes, such as Mutable and Name.

" + "documentation":"

An array of custom attribute names and other properties. Sets the following characteristics:

AttributeDataType

The expected data type. Can be a string, a number, a date and time, or a boolean.

Mutable

If true, you can grant app clients write access to the attribute value. If false, the attribute value can only be set up on sign-up or administrator creation of users.

Name

The attribute name. For an attribute like custom:myAttribute, enter myAttribute for this field.

Required

When true, users who sign up or are created must set a value for the attribute.

NumberAttributeConstraints

The minimum and maximum length of accepted values for a Number-type attribute.

StringAttributeConstraints

The minimum and maximum length of accepted values for a String-type attribute.

DeveloperOnlyAttribute

This legacy option creates an attribute with a dev: prefix. You can only set the value of a developer-only attribute with administrative IAM credentials.

" } }, "documentation":"

Represents the request to add custom attributes.

" @@ -2440,7 +2440,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool that contains the group that you want to add the user to.

" }, "Username":{ "shape":"UsernameType", @@ -2461,7 +2461,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for which you want to confirm user registration.

" + "documentation":"

The ID of the user pool where you want to confirm a user's sign-up request.

" }, "Username":{ "shape":"UsernameType", @@ -2469,7 +2469,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

If your user pool configuration includes triggers, the AdminConfirmSignUp API action invokes the Lambda function that is specified for the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. In this payload, the clientMetadata attribute provides the data that you assigned to the ClientMetadata parameter in your AdminConfirmSignUp request. In your function code in Lambda, you can process the ClientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Confirm a user's registration as a user pool administrator.

" @@ -2507,7 +2507,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where the user will be created.

" + "documentation":"

The ID of the user pool where you want to create a user.

" }, "Username":{ "shape":"UsernameType", @@ -2527,19 +2527,19 @@ }, "ForceAliasCreation":{ "shape":"ForceAliasCreation", - "documentation":"

This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored.

If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.

If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False.

" + "documentation":"

This parameter is used only if the phone_number_verified or email_verified attribute is set to True. Otherwise, it is ignored.

If this parameter is set to True and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, this request migrates the alias from the previous user to the newly-created user. The previous user will no longer be able to log in using that alias.

If this parameter is set to False, the API throws an AliasExistsException error if the alias already exists. The default value is False.

" }, "MessageAction":{ "shape":"MessageActionType", - "documentation":"

Set to RESEND to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to SUPPRESS to suppress sending the message. You can specify only one value.

" + "documentation":"

Set to RESEND to resend the invitation message to a user that already exists, and to reset the temporary-password duration with a new temporary password. Set to SUPPRESS to suppress sending the message. You can specify only one value.

" }, "DesiredDeliveryMediums":{ "shape":"DeliveryMediumListType", - "documentation":"

Specify \"EMAIL\" if email will be used to send the welcome message. Specify \"SMS\" if the phone number will be used. The default value is \"SMS\". You can specify more than one value.

" + "documentation":"

Specify EMAIL if email will be used to send the welcome message. Specify SMS if the phone number will be used. The default value is SMS. You can specify more than one value.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminCreateUser API action, Amazon Cognito invokes the function that is assigned to the pre sign-up trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a ClientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminCreateUser request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Creates a new user in the specified user pool.

" @@ -2549,7 +2549,7 @@ "members":{ "User":{ "shape":"UserType", - "documentation":"

The newly created user.

" + "documentation":"

The new user's profile details.

" } }, "documentation":"

Represents the response from the server to the request to create the user.

" @@ -2569,7 +2569,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to delete user attributes.

" + "documentation":"

The ID of the user pool where you want to delete user attributes.

" }, "Username":{ "shape":"UsernameType", @@ -2597,7 +2597,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to delete the user.

" + "documentation":"

The ID of the user pool where you want to delete the user.

" }, "Username":{ "shape":"UsernameType", @@ -2615,11 +2615,11 @@ "members":{ "UserPoolId":{ "shape":"StringType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to delete the user's linked identities.

" }, "User":{ "shape":"ProviderUserIdentifierType", - "documentation":"

The user to be disabled.

" + "documentation":"

The user profile that you want to delete a linked identity from.

" } } }, @@ -2637,7 +2637,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to disable the user.

" + "documentation":"

The ID of the user pool where you want to disable the user.

" }, "Username":{ "shape":"UsernameType", @@ -2661,7 +2661,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to enable the user.

" + "documentation":"

The ID of the user pool where you want to activate sign-in for the user.

" }, "Username":{ "shape":"UsernameType", @@ -2686,7 +2686,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where the device owner is a user.

" }, "Username":{ "shape":"UsernameType", @@ -2694,7 +2694,7 @@ }, "DeviceKey":{ "shape":"DeviceKeyType", - "documentation":"

The device key.

" + "documentation":"

The key ID of the device that you want to delete. You can get device keys in the response to an AdminListDevices request.

" } }, "documentation":"

Sends the forgot device request, as an administrator.

" @@ -2709,11 +2709,11 @@ "members":{ "DeviceKey":{ "shape":"DeviceKeyType", - "documentation":"

The device key.

" + "documentation":"

The key of the device that you want to delete. You can get device IDs in the response to an AdminListDevices request.

" }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where the device owner is a user.

" }, "Username":{ "shape":"UsernameType", @@ -2728,7 +2728,7 @@ "members":{ "Device":{ "shape":"DeviceType", - "documentation":"

The device.

" + "documentation":"

Details of the requested device. Includes device information, last-accessed and created dates, and the device key.

" } }, "documentation":"

Gets the device response, as an administrator.

" @@ -2742,7 +2742,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to get information about the user.

" + "documentation":"

The ID of the user pool where you want to get information about the user.

" }, "Username":{ "shape":"UsernameType", @@ -2761,11 +2761,11 @@ }, "UserAttributes":{ "shape":"AttributeListType", - "documentation":"

An array of name-value pairs representing user attributes.

" + "documentation":"

An array of name-value pairs of user attributes and their values, for example \"email\": \"testuser@example.com\".

" }, "UserCreateDate":{ "shape":"DateType", - "documentation":"

The date the user was created.

" + "documentation":"

The date and time when the item was created. Amazon Cognito returns this timestamp in UNIX epoch time format. Your SDK might render the output in a human-readable format like ISO 8601 or a Java Date object.

" }, "UserLastModifiedDate":{ "shape":"DateType", @@ -2773,11 +2773,11 @@ }, "Enabled":{ "shape":"BooleanType", - "documentation":"

Indicates that the status is enabled.

" + "documentation":"

Indicates whether the user is activated for sign-in. The AdminDisableUser and AdminEnableUser API operations deactivate and activate user sign-in, respectively.

" }, "UserStatus":{ "shape":"UserStatusType", - "documentation":"

The user status. Can be one of the following:

" + "documentation":"

The user's status. Can be one of the following:

" }, "MFAOptions":{ "shape":"MFAOptionListType", @@ -2785,11 +2785,11 @@ }, "PreferredMfaSetting":{ "shape":"StringType", - "documentation":"

The user's preferred MFA setting.

" + "documentation":"

The user's preferred MFA. Users can prefer SMS message, email message, or TOTP MFA.

" }, "UserMFASettingList":{ "shape":"UserMFASettingListType", - "documentation":"

The MFA options that are activated for the user. The possible values in this list are SMS_MFA, EMAIL_OTP, and SOFTWARE_TOKEN_MFA.

" + "documentation":"

The MFA options that are activated for the user. The possible values in this list are SMS_MFA, EMAIL_OTP, and SOFTWARE_TOKEN_MFA. You can change the MFA preference for users who have more than one available MFA factor with AdminSetUserMFAPreference or SetUserMFAPreference.

" } }, "documentation":"

Represents the response from the server from the request to get the specified user as an administrator.

" @@ -2804,15 +2804,15 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The ID of the Amazon Cognito user pool.

" + "documentation":"

The ID of the user pool where the user wants to sign in.

" }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID.

" + "documentation":"

The ID of the app client where the user wants to sign in.

" }, "AuthFlow":{ "shape":"AuthFlowType", - "documentation":"

The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example:

Valid values include the following:

USER_AUTH

The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys.

USER_SRP_AUTH

Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow.

REFRESH_TOKEN_AUTH and REFRESH_TOKEN

Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token.

CUSTOM_AUTH

Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers.

ADMIN_USER_PASSWORD_AUTH

Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow.

USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth.

" + "documentation":"

The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters.

All flows

USER_AUTH

The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators.

USER_SRP_AUTH

Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow.

REFRESH_TOKEN_AUTH and REFRESH_TOKEN

Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token.

CUSTOM_AUTH

Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers.

ADMIN_USER_PASSWORD_AUTH

Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow.

USER_PASSWORD_AUTH is a flow type of InitiateAuth and isn't valid for AdminInitiateAuth.

" }, "AuthParameters":{ "shape":"AuthParametersType", @@ -2820,19 +2820,19 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input:

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminInitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input:

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" }, "AnalyticsMetadata":{ "shape":"AnalyticsMetadataType", - "documentation":"

The analytics metadata for collecting Amazon Pinpoint metrics for AdminInitiateAuth calls.

" + "documentation":"

The analytics metadata for collecting Amazon Pinpoint metrics.

" }, "ContextData":{ "shape":"ContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "Session":{ "shape":"SessionType", - "documentation":"

The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with the USER_AUTH authentication flow.

" + "documentation":"

The optional session ID from a ConfirmSignUp API request. You can sign in a user directly from the sign-up process with an AuthFlow of USER_AUTH and AuthParameters of EMAIL_OTP or SMS_OTP, depending on how your user pool sent the confirmation-code message.

" } }, "documentation":"

Initiates the authorization request, as an administrator.

" @@ -2846,7 +2846,7 @@ }, "Session":{ "shape":"SessionType", - "documentation":"

The session that should be passed both ways in challenge-response calls to the service. If AdminInitiateAuth or AdminRespondToAuthChallenge API call determines that the caller must pass another challenge, they return a session with other challenge parameters. This session should be passed as it is to the next AdminRespondToAuthChallenge API call.

" + "documentation":"

The session that must be passed to challenge-response requests. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request determines that the caller must pass another challenge, Amazon Cognito returns a session ID and the parameters of the next challenge. Pass this session Id in the Session parameter of AdminRespondToAuthChallenge.

" }, "ChallengeParameters":{ "shape":"ChallengeParametersType", @@ -2854,7 +2854,7 @@ }, "AuthenticationResult":{ "shape":"AuthenticationResultType", - "documentation":"

The result of the authentication response. This is only returned if the caller doesn't need to pass another challenge. If the caller does need to pass another challenge before it gets tokens, ChallengeName, ChallengeParameters, and Session are returned.

" + "documentation":"

The outcome of successful authentication. This is only returned if the user pool has no additional challenges to return. If Amazon Cognito returns another challenge, the response includes ChallengeName, ChallengeParameters, and Session so that your user can answer the challenge.

" } }, "documentation":"

Initiates the authentication response, as an administrator.

" @@ -2869,7 +2869,7 @@ "members":{ "UserPoolId":{ "shape":"StringType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to link a federated identity.

" }, "DestinationUser":{ "shape":"ProviderUserIdentifierType", @@ -2895,7 +2895,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where the device owner is a user.

" }, "Username":{ "shape":"UsernameType", @@ -2903,7 +2903,7 @@ }, "Limit":{ "shape":"QueryLimitType", - "documentation":"

The limit of the devices request.

" + "documentation":"

The maximum number of devices that you want Amazon Cognito to return in the response.

" }, "PaginationToken":{ "shape":"SearchPaginationTokenType", @@ -2917,7 +2917,7 @@ "members":{ "Devices":{ "shape":"DeviceListType", - "documentation":"

The devices in the list of devices response.

" + "documentation":"

An array of devices and their information. Each entry that's returned includes device information, last-accessed and created dates, and the device key.

" }, "PaginationToken":{ "shape":"SearchPaginationTokenType", @@ -2939,15 +2939,15 @@ }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to view a user's groups.

" }, "Limit":{ "shape":"QueryLimitType", - "documentation":"

The limit of the request to list groups.

" + "documentation":"

The maximum number of groups that you want Amazon Cognito to return in the response.

" }, "NextToken":{ "shape":"PaginationKey", - "documentation":"

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

" + "documentation":"

This API operation returns a limited number of results. The pagination token is an identifier that you can present in an additional API request with the same parameters. When you include the pagination token, Amazon Cognito returns the next set of items after the current list. Subsequent requests return a new pagination token. By use of this token, you can paginate through the full list of items.

" } } }, @@ -2956,11 +2956,11 @@ "members":{ "Groups":{ "shape":"GroupListType", - "documentation":"

The groups that the user belongs to.

" + "documentation":"

An array of groups and information about them.

" }, "NextToken":{ "shape":"PaginationKey", - "documentation":"

An identifier that was returned from the previous call to this operation, which can be used to return the next set of items in the list.

" + "documentation":"

The identifier that Amazon Cognito returned with the previous request to this operation. When you include a pagination token in your request, Amazon Cognito returns the next set of items in the list. By use of this token, you can paginate through the full list of items.

" } } }, @@ -2973,7 +2973,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The Id of the user pool that contains the user profile with the logged events.

" }, "Username":{ "shape":"UsernameType", @@ -2985,7 +2985,7 @@ }, "NextToken":{ "shape":"PaginationKey", - "documentation":"

A pagination token.

" + "documentation":"

This API operation returns a limited number of results. The pagination token is an identifier that you can present in an additional API request with the same parameters. When you include the pagination token, Amazon Cognito returns the next set of items after the current list. Subsequent requests return a new pagination token. By use of this token, you can paginate through the full list of items.

" } } }, @@ -2998,7 +2998,7 @@ }, "NextToken":{ "shape":"PaginationKey", - "documentation":"

A pagination token.

" + "documentation":"

The identifier that Amazon Cognito returned with the previous request to this operation. When you include a pagination token in your request, Amazon Cognito returns the next set of items in the list. By use of this token, you can paginate through the full list of items.

" } } }, @@ -3012,7 +3012,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool that contains the group and the user that you want to remove.

" }, "Username":{ "shape":"UsernameType", @@ -3020,7 +3020,7 @@ }, "GroupName":{ "shape":"GroupNameType", - "documentation":"

The group name.

" + "documentation":"

The name of the group that you want to remove the user from, for example MyTestGroup.

" } } }, @@ -3033,7 +3033,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to reset the user's password.

" + "documentation":"

The ID of the user pool where you want to reset the user's password.

" }, "Username":{ "shape":"UsernameType", @@ -3041,7 +3041,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. The AdminResetUserPassword API operation invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to reset a user's password as an administrator.

" @@ -3062,15 +3062,15 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The ID of the Amazon Cognito user pool.

" + "documentation":"

The ID of the user pool where you want to respond to an authentication challenge.

" }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID.

" + "documentation":"

The ID of the app client where you initiated sign-in.

" }, "ChallengeName":{ "shape":"ChallengeNameType", - "documentation":"

The challenge name. For more information, see AdminInitiateAuth.

" + "documentation":"

The name of the challenge that you are responding to. You can find more information about values for ChallengeName in the response parameters of AdminInitiateAuth.

" }, "ChallengeResponses":{ "shape":"ChallengeResponsesType", @@ -3078,7 +3078,7 @@ }, "Session":{ "shape":"SessionType", - "documentation":"

The session that should be passed both ways in challenge-response calls to the service. If an InitiateAuth or RespondToAuthChallenge API call determines that the caller must pass another challenge, it returns a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call.

" + "documentation":"

The session identifier that maintains the state of authentication requests and challenge responses. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request results in a determination that your application must pass another challenge, Amazon Cognito returns a session with other challenge parameters. Send this session identifier, unmodified, to the next AdminRespondToAuthChallenge request.

" }, "AnalyticsMetadata":{ "shape":"AnalyticsMetadataType", @@ -3086,11 +3086,11 @@ }, "ContextData":{ "shape":"ContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers:

When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that you have assigned to the following triggers:

When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute that provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

The request to respond to the authentication challenge, as an administrator.

" @@ -3100,19 +3100,19 @@ "members":{ "ChallengeName":{ "shape":"ChallengeNameType", - "documentation":"

The name of the challenge. For more information, see AdminInitiateAuth.

" + "documentation":"

The name of the challenge that you must next respond to. You can find more information about values for ChallengeName in the response parameters of AdminInitiateAuth.

" }, "Session":{ "shape":"SessionType", - "documentation":"

The session that should be passed both ways in challenge-response calls to the service. If the caller must pass another challenge, they return a session with other challenge parameters. This session should be passed as it is to the next RespondToAuthChallenge API call.

" + "documentation":"

The session identifier that maintains the state of authentication requests and challenge responses. If an AdminInitiateAuth or AdminRespondToAuthChallenge API request results in a determination that your application must pass another challenge, Amazon Cognito returns a session with other challenge parameters. Send this session identifier, unmodified, to the next AdminRespondToAuthChallenge request.

" }, "ChallengeParameters":{ "shape":"ChallengeParametersType", - "documentation":"

The challenge parameters. For more information, see AdminInitiateAuth.

" + "documentation":"

The parameters that define your response to the next challenge. Take the values in ChallengeParameters and provide values for them in the ChallengeResponses of the next AdminRespondToAuthChallenge request.

" }, "AuthenticationResult":{ "shape":"AuthenticationResultType", - "documentation":"

The result returned by the server in response to the authentication request.

" + "documentation":"

The outcome of a successful authentication process. After your application has passed all challenges, Amazon Cognito returns an AuthenticationResult with the JSON web tokens (JWTs) that indicate successful sign-in.

" } }, "documentation":"

Responds to the authentication challenge, as an administrator.

" @@ -3161,7 +3161,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to set the user's password.

" + "documentation":"

The ID of the user pool where you want to set the user's password.

" }, "Username":{ "shape":"UsernameType", @@ -3169,11 +3169,11 @@ }, "Password":{ "shape":"PasswordType", - "documentation":"

The password for the user.

" + "documentation":"

The new temporary or permanent password that you want to set for the user. You can't remove the password for a user who already has a password so that they can only sign in with passwordless methods. In this scenario, you must create a new user without a password.

" }, "Permanent":{ "shape":"BooleanType", - "documentation":"

True if the password is permanent, False if it is temporary.

" + "documentation":"

Set to true to set a password that the user can immediately sign in with. Set to false to set a temporary password that the user must change on their next sign-in.

" } } }, @@ -3222,7 +3222,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where you want to submit authentication-event feedback.

" }, "Username":{ "shape":"UsernameType", @@ -3230,7 +3230,7 @@ }, "EventId":{ "shape":"EventIdType", - "documentation":"

The authentication event ID.

" + "documentation":"

The authentication event ID. To query authentication events for a user, see AdminListUserAuthEvents.

" }, "FeedbackValue":{ "shape":"FeedbackValueType", @@ -3253,7 +3253,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where you want to change a user's device status.

" }, "Username":{ "shape":"UsernameType", @@ -3261,11 +3261,11 @@ }, "DeviceKey":{ "shape":"DeviceKeyType", - "documentation":"

The device key.

" + "documentation":"

The unique identifier, or device key, of the device that you want to update the status for.

" }, "DeviceRememberedStatus":{ "shape":"DeviceRememberedStatusType", - "documentation":"

The status indicating whether a device has been remembered or not.

" + "documentation":"

To enable device authentication with the specified device, set to remembered.To disable, set to not_remembered.

" } }, "documentation":"

The request to update the device status, as an administrator.

" @@ -3286,7 +3286,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to update user attributes.

" + "documentation":"

The ID of the user pool where you want to update user attributes.

" }, "Username":{ "shape":"UsernameType", @@ -3298,7 +3298,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to update the user's attributes as an administrator.

" @@ -3318,7 +3318,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where you want to sign out a user.

" }, "Username":{ "shape":"UsernameType", @@ -3505,11 +3505,11 @@ "members":{ "AccessToken":{ "shape":"TokenModelType", - "documentation":"

A valid access token that Amazon Cognito issued to the user whose software token you want to generate.

" + "documentation":"

A valid access token that Amazon Cognito issued to the user whose software token you want to generate. You can provide either an access token or a session ID in the request.

" }, "Session":{ "shape":"SessionType", - "documentation":"

The session that should be passed both ways in challenge-response calls to the service. This allows authentication of the user as part of the MFA setup process.

" + "documentation":"

The session identifier that maintains the state of authentication requests and challenge responses. In AssociateSoftwareToken, this is the session ID from a successful sign-in. You can provide either an access token or a session ID in the request.

" } } }, @@ -3518,11 +3518,11 @@ "members":{ "SecretCode":{ "shape":"SecretCodeType", - "documentation":"

A unique generated shared secret code that is used in the TOTP algorithm to generate a one-time code.

" + "documentation":"

A unique generated shared secret code that is used by the TOTP algorithm to generate a one-time code.

" }, "Session":{ "shape":"SessionType", - "documentation":"

The session that should be passed both ways in challenge-response calls to the service. This allows authentication of the user as part of the MFA setup process.

" + "documentation":"

The session identifier that maintains the state of authentication requests and challenge responses. This session ID is valid for the next request in this flow, VerifySoftwareToken.

" } } }, @@ -3787,7 +3787,7 @@ }, "ProposedPassword":{ "shape":"PasswordType", - "documentation":"

The new password.

" + "documentation":"

A new password that you prompted the user to enter in your application.

" }, "AccessToken":{ "shape":"TokenModelType", @@ -3907,7 +3907,7 @@ "members":{ "AccessToken":{ "shape":"TokenModelType", - "documentation":"

A valid access token that Amazon Cognito issued to the user whose passkey registration you want to verify.

" + "documentation":"

A valid access token that Amazon Cognito issued to the user whose passkey registration you want to complete.

" }, "Credential":{ "shape":"Document", @@ -3989,7 +3989,7 @@ }, "DeviceKey":{ "shape":"DeviceKeyType", - "documentation":"

The device key.

" + "documentation":"

The unique identifier, or device key, of the device that you want to update the status for.

" }, "DeviceSecretVerifierConfig":{ "shape":"DeviceSecretVerifierConfigType", @@ -3997,20 +3997,20 @@ }, "DeviceName":{ "shape":"DeviceNameType", - "documentation":"

The device name.

" + "documentation":"

A friendly name for the device, for example MyMobilePhone.

" } }, - "documentation":"

Confirms the device request.

" + "documentation":"

The confirm-device request.

" }, "ConfirmDeviceResponse":{ "type":"structure", "members":{ "UserConfirmationNecessary":{ "shape":"BooleanType", - "documentation":"

Indicates whether the user confirmation must confirm the device response.

" + "documentation":"

When true, your user must confirm that they want to remember the device. Prompt the user for an answer. You must then make an UpdateUserDevice request that sets the device to remembered or not_remembered.

When false, immediately sets the device as remembered and eligible for device authentication.

You can configure your user pool to always remember devices, in which case this response is false, or to allow users to opt in, in which case this response is true. Configure this option under Device tracking in the Sign-in menu of your user pool. You can also configure this option with the DeviceConfiguration parameter of a CreateUserPool or UpdateUserPool request.

" } }, - "documentation":"

Confirms the device response.

" + "documentation":"

The confirm-device response.

" }, "ConfirmForgotPasswordRequest":{ "type":"structure", @@ -4023,7 +4023,7 @@ "members":{ "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID of the app associated with the user pool.

" + "documentation":"

The ID of the app client where the user wants to reset their password. This parameter is an identifier of the client application that users are resetting their password from, but this operation resets users' passwords for all app clients in the user pool.

" }, "SecretHash":{ "shape":"SecretHashType", @@ -4035,7 +4035,7 @@ }, "ConfirmationCode":{ "shape":"ConfirmationCodeType", - "documentation":"

The confirmation code from your user's request to reset their password. For more information, see ForgotPassword.

" + "documentation":"

The confirmation code that your user pool sent in response to an AdminResetUserPassword or a ForgotPassword request.

" }, "Password":{ "shape":"PasswordType", @@ -4047,11 +4047,11 @@ }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

The request representing the confirmation for a password reset.

" @@ -4076,7 +4076,7 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

" + "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.

" }, "Username":{ "shape":"UsernameType", @@ -4084,11 +4084,11 @@ }, "ConfirmationCode":{ "shape":"ConfirmationCodeType", - "documentation":"

The confirmation code sent by a user's request to confirm registration.

" + "documentation":"

The confirmation code that your user pool sent in response to the SignUp request.

" }, "ForceAliasCreation":{ "shape":"ForceAliasCreation", - "documentation":"

Boolean to be specified to force user confirmation irrespective of existing alias. By default set to False. If this parameter is set to True and the phone number/email used for sign up confirmation already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user being confirmed. If set to False, the API will throw an AliasExistsException error.

" + "documentation":"

When true, forces user confirmation despite any existing aliases. Defaults to false. A value of true migrates the alias from an existing user to the new user if an existing user already has the phone number or email address as an alias.

Say, for example, that an existing user has an email attribute of bob@example.com and email is an alias in your user pool. If the new user also has an email of bob@example.com and your ConfirmSignUp response sets ForceAliasCreation to true, the new user can sign in with a username of bob@example.com and the existing user can no longer do so.

If false and an attribute belongs to an existing alias, this request returns an AliasExistsException error.

For more information about sign-in aliases, see Customizing sign-in attributes.

" }, "AnalyticsMetadata":{ "shape":"AnalyticsMetadataType", @@ -4096,11 +4096,11 @@ }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" }, "Session":{ "shape":"SessionType", @@ -4114,7 +4114,7 @@ "members":{ "Session":{ "shape":"SessionType", - "documentation":"

You can automatically sign users in with the one-time password that they provided in a successful ConfirmSignUp request. To do this, pass the Session parameter from the ConfirmSignUp response in the Session parameter of an InitiateAuth or AdminInitiateAuth request.

" + "documentation":"

A session identifier that you can use to immediately sign in the confirmed user. You can automatically sign users in with the one-time password that they provided in a successful ConfirmSignUp request. To do this, pass the Session parameter from this response in the Session parameter of an InitiateAuth or AdminInitiateAuth request.

" } }, "documentation":"

Represents the response from the server for the registration confirmation.

" @@ -4166,19 +4166,19 @@ "members":{ "GroupName":{ "shape":"GroupNameType", - "documentation":"

The name of the group. Must be unique.

" + "documentation":"

A name for the group. This name must be unique in your user pool.

" }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to create a user group.

" }, "Description":{ "shape":"DescriptionType", - "documentation":"

A string containing the description of the group.

" + "documentation":"

A description of the group that you're creating.

" }, "RoleArn":{ "shape":"ArnType", - "documentation":"

The role Amazon Resource Name (ARN) for the group.

" + "documentation":"

The Amazon Resource Name (ARN) for the IAM role that you want to associate with the group. A group role primarily declares a preferred role for the credentials that you get from an identity pool. Amazon Cognito ID tokens have a cognito:preferred_role claim that presents the highest-precedence group that a user belongs to. Both ID and access tokens also contain a cognito:groups claim that list all the groups that a user is a member of.

" }, "Precedence":{ "shape":"PrecedenceType", @@ -4191,7 +4191,7 @@ "members":{ "Group":{ "shape":"GroupType", - "documentation":"

The group object for the group.

" + "documentation":"

The response object for a created group.

" } } }, @@ -4206,15 +4206,15 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The Id of the user pool where you want to create an IdP.

" }, "ProviderName":{ "shape":"ProviderNameTypeV2", - "documentation":"

The IdP name.

" + "documentation":"

The name that you want to assign to the IdP. You can pass the identity provider name in the identity_provider query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP.

" }, "ProviderType":{ "shape":"IdentityProviderTypeType", - "documentation":"

The IdP type.

" + "documentation":"

The type of IdP that you want to add. Amazon Cognito supports OIDC, SAML 2.0, Login With Amazon, Sign In With Apple, Google, and Facebook IdPs.

" }, "ProviderDetails":{ "shape":"ProviderDetailsType", @@ -4222,11 +4222,11 @@ }, "AttributeMapping":{ "shape":"AttributeMappingType", - "documentation":"

A mapping of IdP attributes to standard and custom user pool attributes.

" + "documentation":"

A mapping of IdP attributes to standard and custom user pool attributes. Specify a user pool attribute as the key of the key-value pair, and the IdP attribute claim name as the value.

" }, "IdpIdentifiers":{ "shape":"IdpIdentifiersListType", - "documentation":"

A list of IdP identifiers.

" + "documentation":"

An array of IdP identifiers, for example \"IdPIdentifiers\": [ \"MyIdP\", \"MyIdP2\" ]. Identifiers are friendly names that you can pass in the idp_identifier query parameter of requests to the Authorize endpoint to silently redirect to sign-in with the associated IdP. Identifiers in a domain format also enable the use of email-address matching with SAML providers.

" } } }, @@ -4236,7 +4236,7 @@ "members":{ "IdentityProvider":{ "shape":"IdentityProviderType", - "documentation":"

The newly created IdP object.

" + "documentation":"

The details of the new user pool IdP.

" } } }, @@ -4257,7 +4257,7 @@ }, "UseCognitoProvidedValues":{ "shape":"BooleanType", - "documentation":"

When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer.

When you specify true for this option, you must also omit values for Settings and Assets in the request.

" + "documentation":"

When true, applies the default branding style options. These default options are managed by Amazon Cognito. You can modify them later in the branding designer.

When you specify true for this option, you must also omit values for Settings and Assets in the request.

" }, "Settings":{ "shape":"Document", @@ -4288,7 +4288,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to create a resource server.

" }, "Identifier":{ "shape":"ResourceServerIdentifierType", @@ -4300,7 +4300,7 @@ }, "Scopes":{ "shape":"ResourceServerScopeListType", - "documentation":"

A list of scopes. Each scope is a key-value map with the keys name and description.

" + "documentation":"

A list of custom scopes. Each scope is a key-value map with the keys ScopeName and ScopeDescription. The name of a custom scope is a combination of ScopeName and the resource server Name in this request, for example MyResourceServerName/MyScopeName.

" } } }, @@ -4310,7 +4310,7 @@ "members":{ "ResourceServer":{ "shape":"ResourceServerType", - "documentation":"

The newly created resource server.

" + "documentation":"

The details of the new resource server.

" } } }, @@ -4324,15 +4324,15 @@ "members":{ "JobName":{ "shape":"UserImportJobNameType", - "documentation":"

The job name for the user import job.

" + "documentation":"

A friendly name for the user import job.

" }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are being imported into.

" + "documentation":"

The ID of the user pool that you want to import users into.

" }, "CloudWatchLogsRoleArn":{ "shape":"ArnType", - "documentation":"

The role ARN for the Amazon CloudWatch Logs Logging role for the user import job.

" + "documentation":"

You must specify an IAM role that has permission to log import-job results to Amazon CloudWatch Logs. This parameter is the ARN of that role.

" } }, "documentation":"

Represents the request to create the user import job.

" @@ -4342,7 +4342,7 @@ "members":{ "UserImportJob":{ "shape":"UserImportJobType", - "documentation":"

The job object that represents the user import job.

" + "documentation":"

The details of the user import job.

" } }, "documentation":"

Represents the response from the server to the request to create the user import job.

" @@ -4356,15 +4356,15 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to create a user pool client.

" + "documentation":"

The ID of the user pool where you want to create an app client.

" }, "ClientName":{ "shape":"ClientNameType", - "documentation":"

The client name for the user pool client you would like to create.

" + "documentation":"

A friendly name for the app client that you want to create.

" }, "GenerateSecret":{ "shape":"GenerateSecret", - "documentation":"

Boolean to specify whether you want to generate a secret for the user pool client being created.

" + "documentation":"

When true, generates a client secret for the app client. Client secrets are used with server-side and machine-to-machine applications. For more information, see App client types.

" }, "RefreshTokenValidity":{ "shape":"RefreshTokenValidityType", @@ -4380,7 +4380,7 @@ }, "TokenValidityUnits":{ "shape":"TokenValidityUnitsType", - "documentation":"

The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.

" + "documentation":"

The units that validity times are represented in. The default unit for refresh tokens is days, and the default for ID and access tokens are hours.

" }, "ReadAttributes":{ "shape":"ClientPermissionListType", @@ -4396,19 +4396,19 @@ }, "SupportedIdentityProviders":{ "shape":"SupportedIdentityProvidersListType", - "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" + "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" }, "CallbackURLs":{ "shape":"CallbackURLsListType", - "documentation":"

A list of allowed redirect (callback) URLs for the IdPs.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" + "documentation":"

A list of allowed redirect (callback) URLs for the IdPs.

A redirect URI must:

See OAuth 2.0 - Redirection Endpoint.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" }, "LogoutURLs":{ "shape":"LogoutURLsListType", - "documentation":"

A list of allowed logout URLs for the IdPs.

" + "documentation":"

A list of allowed logout URLs for managed login authentication. For more information, see Logout endpoint.

" }, "DefaultRedirectURI":{ "shape":"RedirectUrlType", - "documentation":"

The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list.

A redirect URI must:

For more information, see Default redirect URI.

Amazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.

App callback URLs such as myapp://example are also supported.

" + "documentation":"

The default redirect URI. In app clients with one assigned IdP, replaces redirect_uri in authentication requests. Must be in the CallbackURLs list.

" }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", @@ -4416,7 +4416,7 @@ }, "AllowedOAuthScopes":{ "shape":"ScopeListType", - "documentation":"

The allowed OAuth scopes. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" + "documentation":"

The OAuth 2.0 scopes that you want to permit your app client to authorize. Scopes govern access control to user pool self-service API operations, user data from the userInfo endpoint, and third-party APIs. Possible values provided by OAuth are phone, email, openid, and profile. Possible values provided by Amazon Web Services are aws.cognito.signin.user.admin. Custom scopes created in Resource Servers are also supported.

" }, "AllowedOAuthFlowsUserPoolClient":{ "shape":"BooleanType", @@ -4424,7 +4424,7 @@ }, "AnalyticsConfiguration":{ "shape":"AnalyticsConfigurationType", - "documentation":"

The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.

In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools only support sending events to Amazon Pinpoint projects in Amazon Web Services Region us-east-1. In Regions where Amazon Pinpoint is available, user pools support sending events to Amazon Pinpoint projects within that same Region.

" + "documentation":"

The user pool analytics configuration for collecting metrics and sending them to your Amazon Pinpoint campaign.

In Amazon Web Services Regions where Amazon Pinpoint isn't available, user pools might not have access to analytics or might be configurable with campaigns in the US East (N. Virginia) Region. For more information, see Using Amazon Pinpoint analytics.

" }, "PreventUserExistenceErrors":{ "shape":"PreventUserExistenceErrorTypes", @@ -4450,7 +4450,7 @@ "members":{ "UserPoolClient":{ "shape":"UserPoolClientType", - "documentation":"

The user pool client that was just created.

" + "documentation":"

The details of the new app client.

" } }, "documentation":"

Represents the response from the server to create a user pool client.

" @@ -4464,7 +4464,7 @@ "members":{ "Domain":{ "shape":"DomainType", - "documentation":"

The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth.

" + "documentation":"

The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For prefix domains, this is the prefix alone, such as myprefix. A prefix value of myprefix for a user pool in the us-east-1 Region results in a domain of myprefix.auth.us-east-1.amazoncognito.com.

" }, "UserPoolId":{ "shape":"UserPoolIdType", @@ -4472,11 +4472,11 @@ }, "ManagedLoginVersion":{ "shape":"WrappedIntegerType", - "documentation":"

The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) branding and a version of 2 indicates managed login branding.

Managed login requires that your user pool be configured for any feature plan other than Lite.

" + "documentation":"

The version of managed login branding that you want to apply to your domain. A value of 1 indicates hosted UI (classic) and a version of 2 indicates managed login.

Managed login requires that your user pool be configured for any feature plan other than Lite.

" }, "CustomDomainConfig":{ "shape":"CustomDomainConfigType", - "documentation":"

The configuration for a custom domain that hosts the sign-up and sign-in webpages for your application.

Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use the Amazon Cognito hosted domain instead.

For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.

" + "documentation":"

The configuration for a custom domain. Configures your domain with an Certificate Manager certificate in the us-east-1 Region.

Provide this parameter only if you want to use a custom domain for your user pool. Otherwise, you can exclude this parameter and use a prefix domain instead.

For more information about the hosted domain and custom domains, see Configuring a User Pool Domain.

" } } }, @@ -4485,7 +4485,7 @@ "members":{ "ManagedLoginVersion":{ "shape":"WrappedIntegerType", - "documentation":"

The version of managed login branding applied your domain. A value of 1 indicates hosted UI (classic) branding and a version of 2 indicates managed login branding.

" + "documentation":"

The version of managed login branding applied your domain. A value of 1 indicates hosted UI (classic) and a version of 2 indicates managed login.

" }, "CloudFrontDomain":{ "shape":"DomainType", @@ -4499,11 +4499,11 @@ "members":{ "PoolName":{ "shape":"UserPoolNameType", - "documentation":"

A string used to name the user pool.

" + "documentation":"

A friendlhy name for your user pool.

" }, "Policies":{ "shape":"UserPoolPolicyType", - "documentation":"

The policies associated with the new user pool.

" + "documentation":"

The password policy and sign-in policy in the user pool. The password policy sets options like password complexity requirements and password history. The sign-in policy sets the options available to applications in choice-based authentication.

" }, "DeletionProtection":{ "shape":"DeletionProtectionType", @@ -4515,15 +4515,15 @@ }, "AutoVerifiedAttributes":{ "shape":"VerifiedAttributesListType", - "documentation":"

The attributes to be auto-verified. Possible values: email, phone_number.

" + "documentation":"

The attributes that you want your user pool to automatically verify. Possible values: email, phone_number. For more information see Verifying contact information at sign-up.

" }, "AliasAttributes":{ "shape":"AliasAttributesListType", - "documentation":"

Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username.

" + "documentation":"

Attributes supported as an alias for this user pool. Possible values: phone_number, email, or preferred_username. For more information about alias attributes, see Customizing sign-in attributes.

" }, "UsernameAttributes":{ "shape":"UsernameAttributesListType", - "documentation":"

Specifies whether a user can use an email address or phone number as a username when they sign up.

" + "documentation":"

Specifies whether a user can use an email address or phone number as a username when they sign up. For more information, see Customizing sign-in attributes.

" }, "SmsVerificationMessage":{ "shape":"SmsVerificationMessageType", @@ -4547,7 +4547,7 @@ }, "MfaConfiguration":{ "shape":"UserPoolMfaType", - "documentation":"

Specifies MFA configuration details.

" + "documentation":"

Sets multi-factor authentication (MFA) to be on, off, or optional. When ON, all users must set up MFA before they can sign in. When OPTIONAL, your application must make a client-side determination of whether a user wants to register an MFA device. For user pools with adaptive authentication with threat protection, choose OPTIONAL.

" }, "UserAttributeUpdateSettings":{ "shape":"UserAttributeUpdateSettingsType", @@ -4555,7 +4555,7 @@ }, "DeviceConfiguration":{ "shape":"DeviceConfigurationType", - "documentation":"

The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.

When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature.

" + "documentation":"

The device-remembering configuration for a user pool. Device remembering or device tracking is a \"Remember me on this device\" option for user pools that perform authentication with the device key of a trusted device in the back end, instead of a user-provided MFA code. For more information about device authentication, see Working with user devices in your user pool. A null value indicates that you have deactivated device remembering in your user pool.

When you provide a value for any DeviceConfiguration field, you activate the Amazon Cognito device-remembering feature. For more infor

" }, "EmailConfiguration":{ "shape":"EmailConfigurationType", @@ -4563,7 +4563,7 @@ }, "SmsConfiguration":{ "shape":"SmsConfigurationType", - "documentation":"

The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account.

" + "documentation":"

The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your Amazon Web Services account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the Amazon Web Services Region that you want, the Amazon Cognito user pool uses an Identity and Access Management (IAM) role in your Amazon Web Services account. For more information see SMS message settings.

" }, "UserPoolTags":{ "shape":"UserPoolTagsType", @@ -4571,11 +4571,11 @@ }, "AdminCreateUserConfig":{ "shape":"AdminCreateUserConfigType", - "documentation":"

The configuration for AdminCreateUser requests.

" + "documentation":"

The configuration for AdminCreateUser requests. Includes the template for the invitation message for new users, the duration of temporary passwords, and permitting self-service sign-up.

" }, "Schema":{ "shape":"SchemaAttributesListType", - "documentation":"

An array of schema attributes for the new user pool. These attributes can be standard or custom attributes.

" + "documentation":"

An array of attributes for the new user pool. You can add custom attributes and modify the properties of default attributes. The specifications in this parameter set the required attributes in your user pool. For more information, see Working with user attributes.

" }, "UserPoolAddOns":{ "shape":"UserPoolAddOnsType", @@ -4583,7 +4583,7 @@ }, "UsernameConfiguration":{ "shape":"UsernameConfigurationType", - "documentation":"

Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to False (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to False (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.

This configuration is immutable after you set it. For more information, see UsernameConfigurationType.

" + "documentation":"

Sets the case sensitivity option for sign-in usernames. When CaseSensitive is false (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, username, USERNAME, or UserName, or for email, email@example.com or EMaiL@eXamplE.Com. For most use cases, set case sensitivity to false as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.

When CaseSensitive is true (case sensitive), Amazon Cognito interprets USERNAME and UserName as distinct users.

This configuration is immutable after you set it.

" }, "AccountRecoverySetting":{ "shape":"AccountRecoverySettingType", @@ -4601,7 +4601,7 @@ "members":{ "UserPool":{ "shape":"UserPoolType", - "documentation":"

A container for the user pool details.

" + "documentation":"

The details of the created user pool.

" } }, "documentation":"

Represents the response from the server for the request to create a user pool.

" @@ -4690,11 +4690,11 @@ "members":{ "GroupName":{ "shape":"GroupNameType", - "documentation":"

The name of the group.

" + "documentation":"

The name of the group that you want to delete.

" }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool where you want to delete the group.

" } } }, @@ -4707,11 +4707,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where you want to delete the identity provider.

" }, "ProviderName":{ "shape":"ProviderNameType", - "documentation":"

The IdP name.

" + "documentation":"

The name of the IdP that you want to delete.

" } } }, @@ -4741,11 +4741,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that hosts the resource server.

" + "documentation":"

The ID of the user pool where you want to delete the resource server.

" }, "Identifier":{ "shape":"ResourceServerIdentifierType", - "documentation":"

The identifier for the resource server.

" + "documentation":"

The identifier of the resource server that you want to delete.

" } } }, @@ -4758,7 +4758,7 @@ "members":{ "UserAttributeNames":{ "shape":"AttributeNameListType", - "documentation":"

An array of strings representing the user attribute names you want to delete.

For custom attributes, you must prependattach the custom: prefix to the front of the attribute name.

" + "documentation":"

An array of strings representing the user attribute names you want to delete.

For custom attributes, you must prepend the custom: prefix to the attribute name, for example custom:department.

" }, "AccessToken":{ "shape":"TokenModelType", @@ -4782,11 +4782,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to delete the client.

" + "documentation":"

The ID of the user pool where you want to delete the client.

" }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID of the app associated with the user pool.

" + "documentation":"

The ID of the user pool app client that you want to delete.

" } }, "documentation":"

Represents the request to delete a user pool client.

" @@ -4800,11 +4800,11 @@ "members":{ "Domain":{ "shape":"DomainType", - "documentation":"

The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth.

" + "documentation":"

The domain that you want to delete. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth.

" }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool where you want to delete the domain.

" } } }, @@ -4819,7 +4819,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool you want to delete.

" + "documentation":"

The ID of the user pool that you want to delete.

" } }, "documentation":"

Represents the request to delete a user pool.

" @@ -4844,11 +4844,11 @@ "members":{ "AccessToken":{ "shape":"TokenModelType", - "documentation":"

A valid access token that Amazon Cognito issued to the user whose passkey you want to delete.

" + "documentation":"

A valid access token that Amazon Cognito issued to the user whose passkey credential you want to delete.

" }, "CredentialId":{ "shape":"StringType", - "documentation":"

The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials.

" + "documentation":"

The unique identifier of the passkey that you want to delete. Look up registered devices with ListWebAuthnCredentials.

" } } }, @@ -4884,11 +4884,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool that has the IdP that you want to describe..

" }, "ProviderName":{ "shape":"ProviderNameType", - "documentation":"

The IdP name.

" + "documentation":"

The name of the IdP that you want to describe.

" } } }, @@ -4898,7 +4898,7 @@ "members":{ "IdentityProvider":{ "shape":"IdentityProviderType", - "documentation":"

The identity provider details.

" + "documentation":"

The details of the requested IdP.

" } } }, @@ -4971,7 +4971,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that hosts the resource server.

" + "documentation":"

The ID of the user pool that hosts the resource server.

" }, "Identifier":{ "shape":"ResourceServerIdentifierType", @@ -4985,7 +4985,7 @@ "members":{ "ResourceServer":{ "shape":"ResourceServerType", - "documentation":"

The resource server.

" + "documentation":"

The details of the requested resource server.

" } } }, @@ -4995,11 +4995,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID.

" + "documentation":"

The ID of the user pool with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration.

" }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID.

" + "documentation":"

The ID of the app client with the risk configuration that you want to inspect. You can apply default risk configuration at the user pool level and further customize it from user pool defaults at the app-client level. Specify ClientId to inspect client-level configuration, or UserPoolId to inspect pool-level configuration.

" } } }, @@ -5009,7 +5009,7 @@ "members":{ "RiskConfiguration":{ "shape":"RiskConfigurationType", - "documentation":"

The risk configuration.

" + "documentation":"

The details of the requested risk configuration.

" } } }, @@ -5022,11 +5022,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are being imported into.

" + "documentation":"

The ID of the user pool that's associated with the import job.

" }, "JobId":{ "shape":"UserImportJobIdType", - "documentation":"

The job ID for the user import job.

" + "documentation":"

The Id of the user import job that you want to describe.

" } }, "documentation":"

Represents the request to describe the user import job.

" @@ -5036,7 +5036,7 @@ "members":{ "UserImportJob":{ "shape":"UserImportJobType", - "documentation":"

The job object that represents the user import job.

" + "documentation":"

The details of the user import job.

" } }, "documentation":"

Represents the response from the server to the request to describe the user import job.

" @@ -5050,11 +5050,11 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool you want to describe.

" + "documentation":"

The ID of the user pool that contains the app client you want to describe.

" }, "ClientId":{ "shape":"ClientIdType", - "documentation":"

The app client ID of the app associated with the user pool.

" + "documentation":"

The ID of the app client that you want to describe.

" } }, "documentation":"

Represents the request to describe a user pool client.

" @@ -5064,7 +5064,7 @@ "members":{ "UserPoolClient":{ "shape":"UserPoolClientType", - "documentation":"

The user pool client from a server response to describe the user pool client.

" + "documentation":"

The details of the request app client.

" } }, "documentation":"

Represents the response from the server from a request to describe the user pool client.

" @@ -5075,7 +5075,7 @@ "members":{ "Domain":{ "shape":"DomainType", - "documentation":"

The domain string. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth.

" + "documentation":"

The domain that you want to describe. For custom domains, this is the fully-qualified domain name, such as auth.example.com. For Amazon Cognito prefix domains, this is the prefix alone, such as auth.

" } } }, @@ -5084,7 +5084,7 @@ "members":{ "DomainDescription":{ "shape":"DomainDescriptionType", - "documentation":"

A domain description object containing information about the domain.

" + "documentation":"

The details of the requested user pool domain.

" } } }, @@ -5094,7 +5094,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool you want to describe.

" + "documentation":"

The ID of the user pool you want to describe.

" } }, "documentation":"

Represents the request to describe the user pool.

" @@ -5104,7 +5104,7 @@ "members":{ "UserPool":{ "shape":"UserPoolType", - "documentation":"

The container of metadata returned by the server to describe the pool.

" + "documentation":"

The details of the requested user pool.

" } }, "documentation":"

Represents the response to describe the user pool.

" @@ -5307,11 +5307,11 @@ "members":{ "Message":{ "shape":"EmailMfaMessageType", - "documentation":"

The template for the email message that your user pool sends to users with an MFA code. The message must contain the {####} placeholder. In the message, Amazon Cognito replaces this placeholder with the code. If you don't provide this parameter, Amazon Cognito sends messages in the default format.

" + "documentation":"

The template for the email message that your user pool sends to users with a code for MFA and sign-in with an email OTP. The message must contain the {####} placeholder. In the message, Amazon Cognito replaces this placeholder with the code. If you don't provide this parameter, Amazon Cognito sends messages in the default format.

" }, "Subject":{ "shape":"EmailMfaSubjectType", - "documentation":"

The subject of the email message that your user pool sends to users with an MFA code.

" + "documentation":"

The subject of the email message that your user pool sends to users with a code for MFA and email OTP sign-in.

" } }, "documentation":"

Sets or shows user pool email message configuration for MFA. Includes the subject and body of the email message template for MFA messages. To activate this setting, advanced security features must be active in your user pool.

This data type is a request parameter of SetUserPoolMfaConfig and a response parameter of GetUserPoolMfaConfig.

" @@ -5594,11 +5594,11 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

" + "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.

" }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "Username":{ "shape":"UsernameType", @@ -5610,7 +5610,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to reset a user's password.

" @@ -5632,7 +5632,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are to be imported into.

" + "documentation":"

The ID of the user pool that the users are to be imported into.

" } }, "documentation":"

Represents the request to get the header information of the CSV file for the user import job.

" @@ -5642,7 +5642,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are to be imported into.

" + "documentation":"

The ID of the user pool that the users are to be imported into.

" }, "CSVHeader":{ "shape":"ListOfStringTypes", @@ -5690,7 +5690,7 @@ }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" } } }, @@ -5776,7 +5776,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "ClientId":{ "shape":"ClientIdType", @@ -5811,7 +5811,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to get user attribute verification.

" @@ -6106,7 +6106,7 @@ "members":{ "AuthFlow":{ "shape":"AuthFlowType", - "documentation":"

The authentication flow that you want to initiate. The AuthParameters that you must submit are linked to the flow that you submit. For example:

Valid values include the following:

USER_AUTH

The entry point for sign-in with passwords, one-time passwords, biometric devices, and security keys.

USER_SRP_AUTH

Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow.

REFRESH_TOKEN_AUTH and REFRESH_TOKEN

Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token.

CUSTOM_AUTH

Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers.

USER_PASSWORD_AUTH

Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow.

ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth.

" + "documentation":"

The authentication flow that you want to initiate. Each AuthFlow has linked AuthParameters that you must submit. The following are some example flows and their parameters.

All flows

USER_AUTH

The entry point for sign-in with passwords, one-time passwords, and WebAuthN authenticators.

USER_SRP_AUTH

Username-password authentication with the Secure Remote Password (SRP) protocol. For more information, see Use SRP password verification in custom authentication flow.

REFRESH_TOKEN_AUTH and REFRESH_TOKEN

Provide a valid refresh token and receive new ID and access tokens. For more information, see Using the refresh token.

CUSTOM_AUTH

Custom authentication with Lambda triggers. For more information, see Custom authentication challenge Lambda triggers.

USER_PASSWORD_AUTH

Username-password authentication with the password sent directly in the request. For more information, see Admin authentication flow.

ADMIN_USER_PASSWORD_AUTH is a flow type of AdminInitiateAuth and isn't valid for InitiateAuth. ADMIN_NO_SRP_AUTH is a legacy server-side username-password flow and isn't valid for InitiateAuth.

" }, "AuthParameters":{ "shape":"AuthParametersType", @@ -6114,7 +6114,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input:

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for certain custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the InitiateAuth API action, Amazon Cognito invokes the Lambda functions that are specified for various triggers. The ClientMetadata value is passed as input to the functions for only the following triggers:

When Amazon Cognito invokes the functions for these triggers, it passes a JSON payload, which the function receives as input. This payload contains a validationData attribute, which provides the data that you assigned to the ClientMetadata parameter in your InitiateAuth request. In your function code in Lambda, you can process the validationData value to enhance your workflow for your specific needs.

When you use the InitiateAuth API action, Amazon Cognito also invokes the functions for the following triggers, but it doesn't provide the ClientMetadata value as input:

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" }, "ClientId":{ "shape":"ClientIdType", @@ -6126,7 +6126,7 @@ }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "Session":{ "shape":"SessionType", @@ -6376,7 +6376,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "Limit":{ "shape":"QueryLimitType", @@ -6453,7 +6453,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "MaxResults":{ "shape":"ListResourceServersLimitType", @@ -6507,7 +6507,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are being imported into.

" + "documentation":"

The ID of the user pool that the users are being imported into.

" }, "MaxResults":{ "shape":"PoolQueryLimitType", @@ -6540,7 +6540,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to list user pool clients.

" + "documentation":"

The ID of the user pool where you want to list user pool clients.

" }, "MaxResults":{ "shape":"QueryLimit", @@ -6605,7 +6605,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "GroupName":{ "shape":"GroupNameType", @@ -6640,7 +6640,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool on which the search should be performed.

" + "documentation":"

The ID of the user pool on which the search should be performed.

" }, "AttributesToGet":{ "shape":"SearchedAttributeNamesListType", @@ -6829,7 +6829,7 @@ }, "UseCognitoProvidedValues":{ "shape":"BooleanType", - "documentation":"

When true, applies the default branding style options. This option reverts to a \"blank\" style that you can modify later in the branding designer.

" + "documentation":"

When true, applies the default branding style options. This option reverts to default style options that are managed by Amazon Cognito. You can modify them later in the branding designer.

When you specify true for this option, you must also omit values for Settings and Assets in the request.

" }, "Settings":{ "shape":"Document", @@ -7260,11 +7260,11 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

" + "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.

" }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "Username":{ "shape":"UsernameType", @@ -7276,7 +7276,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to resend the confirmation code.

" @@ -7409,11 +7409,11 @@ }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

The request to respond to an authentication challenge.

" @@ -7726,7 +7726,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "ClientId":{ "shape":"ClientIdType", @@ -7881,7 +7881,7 @@ }, "SecretHash":{ "shape":"SecretHashType", - "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message.

" + "documentation":"

A keyed-hash message authentication code (HMAC) calculated using the secret key of a user pool client and username plus the client ID in the message. For more information about SecretHash, see Computing secret hash values.

" }, "Username":{ "shape":"UsernameType", @@ -7905,11 +7905,11 @@ }, "UserContextData":{ "shape":"UserContextDataType", - "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

" + "documentation":"

Contextual data about your user session, such as the device fingerprint, IP address, or location. Amazon Cognito advanced security evaluates the risk of an authentication event based on the context that your app generates and passes to Amazon Cognito when it makes API requests.

For more information, see Collecting data for threat protection in applications.

" }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to register a user.

" @@ -8038,7 +8038,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are being imported into.

" + "documentation":"

The ID of the user pool that the users are being imported into.

" }, "JobId":{ "shape":"UserImportJobIdType", @@ -8093,7 +8093,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool that the users are being imported into.

" + "documentation":"

The ID of the user pool that the users are being imported into.

" }, "JobId":{ "shape":"UserImportJobIdType", @@ -8422,7 +8422,7 @@ }, "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "Description":{ "shape":"DescriptionType", @@ -8530,7 +8530,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool.

" + "documentation":"

The ID of the user pool.

" }, "Identifier":{ "shape":"ResourceServerIdentifierType", @@ -8573,7 +8573,7 @@ }, "ClientMetadata":{ "shape":"ClientMetadataType", - "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action initiates.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, remember that Amazon Cognito won't do the following:

" + "documentation":"

A map of custom key-value pairs that you can provide as input for any custom workflows that this action initiates.

You create custom workflows by assigning Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing user pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

When you use the ClientMetadata parameter, note that Amazon Cognito won't do the following:

" } }, "documentation":"

Represents the request to update user attributes.

" @@ -8597,7 +8597,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool where you want to update the user pool client.

" + "documentation":"

The ID of the user pool where you want to update the user pool client.

" }, "ClientId":{ "shape":"ClientIdType", @@ -8637,7 +8637,7 @@ }, "SupportedIdentityProviders":{ "shape":"SupportedIdentityProvidersListType", - "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" + "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" }, "CallbackURLs":{ "shape":"CallbackURLsListType", @@ -8742,7 +8742,7 @@ "members":{ "UserPoolId":{ "shape":"UserPoolIdType", - "documentation":"

The user pool ID for the user pool you want to update.

" + "documentation":"

The ID of the user pool you want to update.

" }, "Policies":{ "shape":"UserPoolPolicyType", @@ -9101,7 +9101,7 @@ }, "SupportedIdentityProviders":{ "shape":"SupportedIdentityProvidersListType", - "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with the hosted UI and OAuth 2.0 authorization server. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" + "documentation":"

A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: COGNITO, Facebook, Google, SignInWithApple, and LoginWithAmazon. You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example MySAMLIdP or MyOIDCIdP.

This setting applies to providers that you can access with managed login. The removal of COGNITO from this list doesn't prevent authentication operations for local users with the user pools API in an Amazon Web Services SDK. The only way to prevent API-based authentication is to block access with a WAF rule.

" }, "CallbackURLs":{ "shape":"CallbackURLsListType", @@ -9639,7 +9639,7 @@ }, "UserVerification":{ "shape":"UserVerificationType", - "documentation":"

Sets or displays your user-pool treatment for MFA with a passkey. You can override other MFA options and require passkey MFA, or you can set it as preferred. When passkey MFA is preferred, the hosted UI encourages users to register a passkey at sign-in.

" + "documentation":"

When required, users can only register and sign in users with passkeys that are capable of user verification. When preferred, your user pool doesn't require the use of authenticators with user verification but encourages it.

" } }, "documentation":"

Settings for multi-factor authentication (MFA) with passkey, or webauthN, biometric and security-key devices in a user pool. Configures the following:

This data type is a request parameter of SetUserPoolMfaConfig and a response parameter of GetUserPoolMfaConfig.

" @@ -9725,5 +9725,5 @@ "WrappedBooleanType":{"type":"boolean"}, "WrappedIntegerType":{"type":"integer"} }, - "documentation":"

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

This API reference provides detailed information about API operations and object types in Amazon Cognito.

Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side authentication of users. You can interact with operations in the Amazon Cognito user pools API as any of the following subjects.

  1. An administrator who wants to configure user pools, app clients, users, groups, or other user pool functions.

  2. A server-side app, like a web application, that wants to use its Amazon Web Services privileges to manage, authenticate, or authorize a user.

  3. A client-side app, like a mobile app, that wants to make unauthenticated requests to manage, authenticate, or authorize a user.

For more information, see Using the Amazon Cognito user pools API and user pool endpoints in the Amazon Cognito Developer Guide.

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started with the CognitoIdentityProvider client in other supported Amazon Web Services SDKs.

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs.

" + "documentation":"

With the Amazon Cognito user pools API, you can configure user pools and authenticate users. To authenticate users from third-party identity providers (IdPs) in this API, you can link IdP users to native user profiles. Learn more about the authentication and authorization of federated users at Adding user pool sign-in through a third party and in the User pool federation endpoints and hosted UI reference.

This API reference provides detailed information about API operations and object types in Amazon Cognito.

Along with resource management operations, the Amazon Cognito user pools API includes classes of operations and authorization models for client-side and server-side authentication of users. You can interact with operations in the Amazon Cognito user pools API as any of the following subjects.

  1. An administrator who wants to configure user pools, app clients, users, groups, or other user pool functions.

  2. A server-side app, like a web application, that wants to use its Amazon Web Services privileges to manage, authenticate, or authorize a user.

  3. A client-side app, like a mobile app, that wants to make unauthenticated requests to manage, authenticate, or authorize a user.

For more information, see Using the Amazon Cognito user pools API and user pool endpoints in the Amazon Cognito Developer Guide.

With your Amazon Web Services SDK, you can build the logic to support operational flows in every use case for this API. You can also make direct REST API requests to Amazon Cognito user pools service endpoints. The following links can get you started with the CognitoIdentityProvider client in other supported Amazon Web Services SDKs.

To get started with an Amazon Web Services SDK, see Tools to Build on Amazon Web Services. For example actions and scenarios, see Code examples for Amazon Cognito Identity Provider using Amazon Web Services SDKs.

" } diff --git a/botocore/data/controlcatalog/2018-05-10/service-2.json b/botocore/data/controlcatalog/2018-05-10/service-2.json index 139fd5ad55..eaf2ab5760 100644 --- a/botocore/data/controlcatalog/2018-05-10/service-2.json +++ b/botocore/data/controlcatalog/2018-05-10/service-2.json @@ -384,7 +384,7 @@ "documentation":"

A string that describes a control's implementation type.

" } }, - "documentation":"

An object that describes the implementation type for a control.

Our ImplementationDetails Type format has three required segments:

For example, AWS::Config::ConfigRule or AWS::SecurityHub::SecurityControl resources have the format with three required segments.

Our ImplementationDetails Type format has an optional fourth segment, which is present for applicable implementation types. The format is as follows:

For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or AWS::CloudFormation::Type::HOOK have the format with four segments.

Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values, and we do not use CloudFormation to implement these controls.

" + "documentation":"

An object that describes the implementation type for a control.

Our ImplementationDetails Type format has three required segments:

For example, AWS::Config::ConfigRule or AWS::SecurityHub::SecurityControl resources have the format with three required segments.

Our ImplementationDetails Type format has an optional fourth segment, which is present for applicable implementation types. The format is as follows:

For example, AWS::Organizations::Policy::SERVICE_CONTROL_POLICY or AWS::CloudFormation::Type::HOOK have the format with four segments.

Although the format is similar, the values for the Type field do not match any Amazon Web Services CloudFormation values.

" }, "ImplementationType":{ "type":"string", diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index e55f2a9295..86100c0cb3 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -873,6 +873,12 @@ "documentation":"

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

", "location":"querystring", "locationName":"attempt" + }, + "accessSystemProfileLogs":{ + "shape":"Boolean", + "documentation":"

Allows access to system profile logs for Lake Formation-enabled jobs. Default is false.

", + "location":"querystring", + "locationName":"accessSystemProfileLogs" } } }, @@ -987,7 +993,7 @@ "type":"string", "max":1024, "min":1, - "pattern":"([a-z0-9]+[a-z0-9-.]*)\\/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)(?:\\:([a-zA-Z0-9_][a-zA-Z0-9-._]{0,299})|@(sha256:[0-9a-f]{64}))" + "pattern":"([0-9]{12})\\.dkr\\.ecr\\.([a-z0-9-]+).([a-z0-9._-]+)\\/((?:[a-z0-9]+(?:[-._][a-z0-9]+)*/)*[a-z0-9]+(?:[-._][a-z0-9]+)*)(?::([a-zA-Z0-9_]+[a-zA-Z0-9-._]*)|@(sha256:[0-9a-f]{64}))" }, "InitScriptPath":{ "type":"string", diff --git a/botocore/data/mgh/2017-05-31/endpoint-rule-set-1.json b/botocore/data/mgh/2017-05-31/endpoint-rule-set-1.json index 0881bf26b5..cf2fecfc19 100644 --- a/botocore/data/mgh/2017-05-31/endpoint-rule-set-1.json +++ b/botocore/data/mgh/2017-05-31/endpoint-rule-set-1.json @@ -32,38 +32,83 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { - "ref": "Region" + "ref": "Endpoint" } - ], - "assign": "PartitionResult" + ] } ], - "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "booleanEquals", "argv": [ { - "ref": "Endpoint" - } + "ref": "UseFIPS" + }, + true ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ { - "fn": "parseURL", + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", "argv": [ { - "ref": "Endpoint" + "ref": "Region" } ], - "assign": "url" + "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -75,158 +120,103 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mgh-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" }, { "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } - ] - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + ], + "type": "tree" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true ] } ], - "type": "tree", "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://mgh-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsFIPS" + true ] } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", + ], "rules": [ { "conditions": [], @@ -237,79 +227,88 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } - ] + ], + "type": "tree" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseDualStack" + }, + true ] } ], - "type": "tree", "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://mgh.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://mgh.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://mgh.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://mgh.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + ], + "type": "tree" } - ] + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/mgh/2017-05-31/paginators-1.json b/botocore/data/mgh/2017-05-31/paginators-1.json index 97efd0a5a9..db029bfdb1 100644 --- a/botocore/data/mgh/2017-05-31/paginators-1.json +++ b/botocore/data/mgh/2017-05-31/paginators-1.json @@ -29,6 +29,18 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "ApplicationStateList" + }, + "ListMigrationTaskUpdates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MigrationTaskUpdateList" + }, + "ListSourceResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SourceResourceList" } } } diff --git a/botocore/data/mgh/2017-05-31/service-2.json b/botocore/data/mgh/2017-05-31/service-2.json index ba8a920735..8684b95f5f 100644 --- a/botocore/data/mgh/2017-05-31/service-2.json +++ b/botocore/data/mgh/2017-05-31/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"mgh", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Migration Hub", "serviceId":"Migration Hub", "signatureVersion":"v4", "targetPrefix":"AWSMigrationHub", - "uid":"AWSMigrationHub-2017-05-31" + "uid":"AWSMigrationHub-2017-05-31", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateCreatedArtifact":{ @@ -55,6 +57,26 @@ ], "documentation":"

Associates a discovered resource ID from Application Discovery Service with a migration task.

" }, + "AssociateSourceResource":{ + "name":"AssociateSourceResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateSourceResourceRequest"}, + "output":{"shape":"AssociateSourceResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Associates a source resource with a migration task. For example, the source resource can be a source server, an application, or a migration wave.

" + }, "CreateProgressUpdateStream":{ "name":"CreateProgressUpdateStream", "http":{ @@ -177,6 +199,26 @@ ], "documentation":"

Disassociate an Application Discovery Service discovered resource from a migration task.

" }, + "DisassociateSourceResource":{ + "name":"DisassociateSourceResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateSourceResourceRequest"}, + "output":{"shape":"DisassociateSourceResourceResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"DryRunOperation"}, + {"shape":"UnauthorizedOperation"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the association between a source resource and a migration task.

" + }, "ImportMigrationTask":{ "name":"ImportMigrationTask", "http":{ @@ -254,6 +296,24 @@ ], "documentation":"

Lists discovered resources associated with the given MigrationTask.

" }, + "ListMigrationTaskUpdates":{ + "name":"ListMigrationTaskUpdates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMigrationTaskUpdatesRequest"}, + "output":{"shape":"ListMigrationTaskUpdatesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

This is a paginated API that returns all the migration-task states for the specified MigrationTaskName and ProgressUpdateStream.

" + }, "ListMigrationTasks":{ "name":"ListMigrationTasks", "http":{ @@ -292,6 +352,24 @@ ], "documentation":"

Lists progress update streams associated with the user account making this call.

" }, + "ListSourceResources":{ + "name":"ListSourceResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSourceResourcesRequest"}, + "output":{"shape":"ListSourceResourcesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists all the source resource that are associated with the specified MigrationTaskName and ProgressUpdateStream.

" + }, "NotifyApplicationState":{ "name":"NotifyApplicationState", "http":{ @@ -472,6 +550,37 @@ "members":{ } }, + "AssociateSourceResourceRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "SourceResource" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

A unique identifier that references the migration task. Do not include sensitive data in this field.

" + }, + "SourceResource":{ + "shape":"SourceResource", + "documentation":"

The source resource that you want to associate.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly.

" + } + } + }, + "AssociateSourceResourceResult":{ + "type":"structure", + "members":{ + } + }, "ConfigurationId":{ "type":"string", "max":1600, @@ -658,6 +767,37 @@ "members":{ } }, + "DisassociateSourceResourceRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName", + "SourceResourceName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

A unique identifier that references the migration task. Do not include sensitive data in this field.

" + }, + "SourceResourceName":{ + "shape":"SourceResourceName", + "documentation":"

The name that was specified for the source resource.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

This is an optional parameter that you can use to test whether the call will succeed. Set this parameter to true to verify that you have the permissions that are required to make the call, and that you have specified the other parameters in the call correctly.

" + } + } + }, + "DisassociateSourceResourceResult":{ + "type":"structure", + "members":{ + } + }, "DiscoveredResource":{ "type":"structure", "required":["ConfigurationId"], @@ -856,6 +996,44 @@ } } }, + "ListMigrationTaskUpdatesRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

A unique identifier that references the migration task. Do not include sensitive data in this field.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results.

" + } + } + }, + "ListMigrationTaskUpdatesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If the response includes a NextToken value, that means that there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, call this API again and specify this NextToken value in the request. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + }, + "MigrationTaskUpdateList":{ + "shape":"MigrationTaskUpdateList", + "documentation":"

The list of migration-task updates.

" + } + } + }, "ListMigrationTasksRequest":{ "type":"structure", "members":{ @@ -912,6 +1090,44 @@ } } }, + "ListSourceResourcesRequest":{ + "type":"structure", + "required":[ + "ProgressUpdateStream", + "MigrationTaskName" + ], + "members":{ + "ProgressUpdateStream":{ + "shape":"ProgressUpdateStream", + "documentation":"

The name of the progress-update stream, which is used for access control as well as a namespace for migration-task names that is implicitly linked to your AWS account. The progress-update stream must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

" + }, + "MigrationTaskName":{ + "shape":"MigrationTaskName", + "documentation":"

A unique identifier that references the migration task. Do not store confidential data in this field.

" + }, + "NextToken":{ + "shape":"Token", + "documentation":"

If NextToken was returned by a previous call, there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, specify the NextToken value that the previous call returned. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + }, + "MaxResults":{ + "shape":"MaxResultsSourceResources", + "documentation":"

The maximum number of results to include in the response. If more results exist than the value that you specify here for MaxResults, the response will include a token that you can use to retrieve the next set of results.

" + } + } + }, + "ListSourceResourcesResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"Token", + "documentation":"

If the response includes a NextToken value, that means that there are more results available. The value of NextToken is a unique pagination token for each page. To retrieve the next page of results, call this API again and specify this NextToken value in the request. Keep all other arguments unchanged. Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + }, + "SourceResourceList":{ + "shape":"SourceResourceList", + "documentation":"

The list of source resources.

" + } + } + }, "MaxResults":{ "type":"integer", "box":true, @@ -930,6 +1146,12 @@ "max":10, "min":1 }, + "MaxResultsSourceResources":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, "MigrationTask":{ "type":"structure", "members":{ @@ -996,6 +1218,25 @@ "type":"list", "member":{"shape":"MigrationTaskSummary"} }, + "MigrationTaskUpdate":{ + "type":"structure", + "members":{ + "UpdateDateTime":{ + "shape":"UpdateDateTime", + "documentation":"

The timestamp for the update.

" + }, + "UpdateType":{ + "shape":"UpdateType", + "documentation":"

The type of the update.

" + }, + "MigrationTaskState":{"shape":"Task"} + }, + "documentation":"

A migration-task progress update.

" + }, + "MigrationTaskUpdateList":{ + "type":"list", + "member":{"shape":"MigrationTaskUpdate"} + }, "NextUpdateSeconds":{ "type":"integer", "min":0 @@ -1205,6 +1446,40 @@ "exception":true, "fault":true }, + "SourceResource":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"SourceResourceName", + "documentation":"

This is the name that you want to use to identify the resource. If the resource is an AWS resource, we recommend that you set this parameter to the ARN of the resource.

" + }, + "Description":{ + "shape":"SourceResourceDescription", + "documentation":"

A description that can be free-form text to record additional detail about the resource for clarity or later reference.

" + }, + "StatusDetail":{ + "shape":"StatusDetail", + "documentation":"

A free-form description of the status of the resource.

" + } + }, + "documentation":"

A source resource can be a source server, a migration wave, an application, or any other resource that you track.

" + }, + "SourceResourceDescription":{ + "type":"string", + "max":500, + "min":0, + "pattern":"^.{0,500}$" + }, + "SourceResourceList":{ + "type":"list", + "member":{"shape":"SourceResource"} + }, + "SourceResourceName":{ + "type":"string", + "max":1600, + "min":1 + }, "Status":{ "type":"string", "enum":[ @@ -1216,9 +1491,9 @@ }, "StatusDetail":{ "type":"string", - "max":500, + "max":2500, "min":0, - "pattern":"^.{0,500}$" + "pattern":"^.{0,2500}$" }, "Task":{ "type":"structure", @@ -1269,7 +1544,11 @@ "documentation":"

Exception raised to indicate a request was not authorized when the DryRun flag is set to \"true\".

", "exception":true }, - "UpdateDateTime":{"type":"timestamp"} + "UpdateDateTime":{"type":"timestamp"}, + "UpdateType":{ + "type":"string", + "enum":["MIGRATION_TASK_STATE_UPDATED"] + } }, "documentation":"

The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub.

Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException error will be returned. Also, you must make the API calls while in your home region.

" } diff --git a/botocore/data/sesv2/2019-09-27/endpoint-rule-set-1.json b/botocore/data/sesv2/2019-09-27/endpoint-rule-set-1.json index 658dc3ff9e..373ce6fa02 100644 --- a/botocore/data/sesv2/2019-09-27/endpoint-rule-set-1.json +++ b/botocore/data/sesv2/2019-09-27/endpoint-rule-set-1.json @@ -26,9 +26,199 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "EndpointId": { + "required": false, + "documentation": "Operation parameter for EndpointId", + "type": "String" } }, "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "EndpointId" + } + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + }, + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isValidHostLabel", + "argv": [ + { + "ref": "EndpointId" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://{EndpointId}.endpoints.email.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://{EndpointId}.endpoints.email.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4a", + "signingName": "ses", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "EndpointId must be a valid host label", + "type": "error" + } + ], + "type": "tree" + }, { "conditions": [ { diff --git a/botocore/data/sesv2/2019-09-27/paginators-1.json b/botocore/data/sesv2/2019-09-27/paginators-1.json index ea142457a6..66721166cf 100644 --- a/botocore/data/sesv2/2019-09-27/paginators-1.json +++ b/botocore/data/sesv2/2019-09-27/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListMultiRegionEndpoints": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize", + "result_key": "MultiRegionEndpoints" + } + } } diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index 040f43de05..6018ffc67a 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -250,6 +250,22 @@ ], "documentation":"

Creates an import job for a data destination.

" }, + "CreateMultiRegionEndpoint":{ + "name":"CreateMultiRegionEndpoint", + "http":{ + "method":"POST", + "requestUri":"/v2/email/multi-region-endpoints" + }, + "input":{"shape":"CreateMultiRegionEndpointRequest"}, + "output":{"shape":"CreateMultiRegionEndpointResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Creates a multi-region endpoint (global-endpoint).

The primary region is going to be the AWS-Region where the operation is executed. The secondary region has to be provided in request's parameters. From the data flow standpoint there is no difference between primary and secondary regions - sending traffic will be split equally between the two. The primary region is the region where the resource has been created and where it can be managed.

" + }, "DeleteConfigurationSet":{ "name":"DeleteConfigurationSet", "http":{ @@ -389,6 +405,22 @@ ], "documentation":"

Deletes an email template.

You can execute this operation no more than once per second.

" }, + "DeleteMultiRegionEndpoint":{ + "name":"DeleteMultiRegionEndpoint", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/multi-region-endpoints/{EndpointName}" + }, + "input":{"shape":"DeleteMultiRegionEndpointRequest"}, + "output":{"shape":"DeleteMultiRegionEndpointResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Deletes a multi-region endpoint (global-endpoint).

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be deleted.

" + }, "DeleteSuppressedDestination":{ "name":"DeleteSuppressedDestination", "http":{ @@ -703,6 +735,21 @@ ], "documentation":"

Provides information about a specific message, including the from address, the subject, the recipient address, email tags, as well as events associated with the message.

You can execute this operation no more than once per second.

" }, + "GetMultiRegionEndpoint":{ + "name":"GetMultiRegionEndpoint", + "http":{ + "method":"GET", + "requestUri":"/v2/email/multi-region-endpoints/{EndpointName}" + }, + "input":{"shape":"GetMultiRegionEndpointRequest"}, + "output":{"shape":"GetMultiRegionEndpointResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Displays the multi-region endpoint (global-endpoint) configuration.

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed can be displayed.

" + }, "GetSuppressedDestination":{ "name":"GetSuppressedDestination", "http":{ @@ -875,6 +922,20 @@ ], "documentation":"

Lists all of the import jobs.

" }, + "ListMultiRegionEndpoints":{ + "name":"ListMultiRegionEndpoints", + "http":{ + "method":"GET", + "requestUri":"/v2/email/multi-region-endpoints" + }, + "input":{"shape":"ListMultiRegionEndpointsRequest"}, + "output":{"shape":"ListMultiRegionEndpointsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

List the multi-region endpoints (global-endpoints).

Only multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed will be listed.

" + }, "ListRecommendations":{ "name":"ListRecommendations", "http":{ @@ -2333,6 +2394,42 @@ }, "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" }, + "CreateMultiRegionEndpointRequest":{ + "type":"structure", + "required":[ + "EndpointName", + "Details" + ], + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the multi-region endpoint (global-endpoint).

" + }, + "Details":{ + "shape":"Details", + "documentation":"

Contains details of a multi-region endpoint (global-endpoint) being created.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of objects that define the tags (keys and values) to associate with the multi-region endpoint (global-endpoint).

" + } + }, + "documentation":"

Represents a request to create a multi-region endpoint (global-endpoint).

" + }, + "CreateMultiRegionEndpointResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

A status of the multi-region endpoint (global-endpoint) right after the create request.

" + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

" + } + }, + "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" + }, "CustomRedirectDomain":{ "type":"string", "documentation":"

The domain to use for tracking open and click events.

" @@ -2666,6 +2763,29 @@ }, "documentation":"

If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

" }, + "DeleteMultiRegionEndpointRequest":{ + "type":"structure", + "required":["EndpointName"], + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the multi-region endpoint (global-endpoint) to be deleted.

", + "location":"uri", + "locationName":"EndpointName" + } + }, + "documentation":"

Represents a request to delete a multi-region endpoint (global-endpoint).

" + }, + "DeleteMultiRegionEndpointResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"Status", + "documentation":"

A status of the multi-region endpoint (global-endpoint) right after the delete request.

" + } + }, + "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" + }, "DeleteSuppressedDestinationRequest":{ "type":"structure", "required":["EmailAddress"], @@ -2789,6 +2909,17 @@ }, "documentation":"

An object that describes the recipients for an email.

Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

" }, + "Details":{ + "type":"structure", + "required":["RoutesDetails"], + "members":{ + "RoutesDetails":{ + "shape":"RoutesDetails", + "documentation":"

A list of route configuration details. Must contain exactly one route configuration.

" + } + }, + "documentation":"

An object that contains configuration details of multi-region endpoint (global-endpoint).

" + }, "DiagnosticCode":{"type":"string"}, "DimensionName":{ "type":"string", @@ -3162,6 +3293,17 @@ }, "Enabled":{"type":"boolean"}, "EnabledWrapper":{"type":"boolean"}, + "EndpointId":{ + "type":"string", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

" + }, + "EndpointName":{ + "type":"string", + "documentation":"

The name of the multi-region endpoint (global-endpoint).

", + "max":64, + "min":1, + "pattern":"^[\\w\\-_]+$" + }, "EngagementEventType":{ "type":"string", "documentation":"

The type of delivery events:

", @@ -4216,6 +4358,49 @@ }, "documentation":"

Information about a message.

" }, + "GetMultiRegionEndpointRequest":{ + "type":"structure", + "required":["EndpointName"], + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the multi-region endpoint (global-endpoint).

", + "location":"uri", + "locationName":"EndpointName" + } + }, + "documentation":"

Represents a request to display the multi-region endpoint (global-endpoint).

" + }, + "GetMultiRegionEndpointResponse":{ + "type":"structure", + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the multi-region endpoint (global-endpoint).

" + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

" + }, + "Routes":{ + "shape":"Routes", + "documentation":"

Contains routes information for the multi-region endpoint (global-endpoint).

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the multi-region endpoint (global-endpoint).

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time stamp of when the multi-region endpoint (global-endpoint) was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time stamp of when the multi-region endpoint (global-endpoint) was last updated.

" + } + }, + "documentation":"

An HTTP 200 response if the request succeeds, or an error message if the request fails.

" + }, "GetSuppressedDestinationRequest":{ "type":"structure", "required":["EmailAddress"], @@ -4942,6 +5127,38 @@ }, "documentation":"

An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

" }, + "ListMultiRegionEndpointsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextTokenV2", + "documentation":"

A token returned from a previous call to ListMultiRegionEndpoints to indicate the position in the list of multi-region endpoints (global-endpoints).

", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"PageSizeV2", + "documentation":"

The number of results to show in a single call to ListMultiRegionEndpoints. If the number of results is larger than the number you specified in this parameter, the response includes a NextToken element that you can use to retrieve the next page of results.

", + "location":"querystring", + "locationName":"PageSize" + } + }, + "documentation":"

Represents a request to list all the multi-region endpoints (global-endpoints) whose primary region is the AWS-Region where operation is executed.

" + }, + "ListMultiRegionEndpointsResponse":{ + "type":"structure", + "members":{ + "MultiRegionEndpoints":{ + "shape":"MultiRegionEndpoints", + "documentation":"

An array that contains key multi-region endpoint (global-endpoint) properties.

" + }, + "NextToken":{ + "shape":"NextTokenV2", + "documentation":"

A token indicating that there are additional multi-region endpoints (global-endpoints) available to be listed. Pass this token to a subsequent ListMultiRegionEndpoints call to retrieve the next page.

" + } + }, + "documentation":"

The following elements are returned by the service.

" + }, "ListOfContactLists":{ "type":"list", "member":{"shape":"ContactList"} @@ -5434,7 +5651,47 @@ }, "documentation":"

An object that contains details about the data source for the metrics export.

" }, + "MultiRegionEndpoint":{ + "type":"structure", + "members":{ + "EndpointName":{ + "shape":"EndpointName", + "documentation":"

The name of the multi-region endpoint (global-endpoint).

" + }, + "Status":{ + "shape":"Status", + "documentation":"

The status of the multi-region endpoint (global-endpoint).

" + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

" + }, + "Regions":{ + "shape":"Regions", + "documentation":"

Primary and secondary regions between which multi-region endpoint splits sending traffic.

" + }, + "CreatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time stamp of when the multi-region endpoint (global-endpoint) was created.

" + }, + "LastUpdatedTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time stamp of when the multi-region endpoint (global-endpoint) was last updated.

" + } + }, + "documentation":"

An object that contains multi-region endpoint (global-endpoint) properties.

" + }, + "MultiRegionEndpoints":{ + "type":"list", + "member":{"shape":"MultiRegionEndpoint"} + }, "NextToken":{"type":"string"}, + "NextTokenV2":{ + "type":"string", + "max":5000, + "min":1, + "pattern":"^^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" + }, "NotFoundException":{ "type":"structure", "members":{ @@ -5462,6 +5719,11 @@ }, "documentation":"

An object that contains information about email that was sent from the selected domain.

" }, + "PageSizeV2":{ + "type":"integer", + "max":1000, + "min":1 + }, "Percentage":{ "type":"double", "documentation":"

An object that contains information about inbox placement percentages.

" @@ -6135,6 +6397,14 @@ "type":"list", "member":{"shape":"Recommendation"} }, + "Region":{ + "type":"string", + "documentation":"

The name of an AWS-Region.

" + }, + "Regions":{ + "type":"list", + "member":{"shape":"Region"} + }, "RenderedEmailTemplate":{ "type":"string", "documentation":"

The complete MIME message rendered by applying the data in the TemplateData parameter to the template specified in the TemplateName parameter.

" @@ -6204,6 +6474,38 @@ "DENIED" ] }, + "Route":{ + "type":"structure", + "required":["Region"], + "members":{ + "Region":{ + "shape":"Region", + "documentation":"

The name of an AWS-Region.

" + } + }, + "documentation":"

An object which contains an AWS-Region and routing status.

" + }, + "RouteDetails":{ + "type":"structure", + "required":["Region"], + "members":{ + "Region":{ + "shape":"Region", + "documentation":"

The name of an AWS-Region to be a secondary region for the multi-region endpoint (global-endpoint).

" + } + }, + "documentation":"

An object that contains route configuration. Includes secondary region name.

" + }, + "Routes":{ + "type":"list", + "member":{"shape":"Route"}, + "documentation":"

A list of routes between which the traffic will be split when sending through the multi-region endpoint (global-endpoint).

" + }, + "RoutesDetails":{ + "type":"list", + "member":{"shape":"RouteDetails"}, + "documentation":"

A list of route configuration details. Must contain exactly one route configuration.

" + }, "S3Url":{ "type":"string", "documentation":"

An Amazon S3 URL in the format s3://<bucket_name>/<object> or a pre-signed URL.

", @@ -6282,6 +6584,11 @@ "ConfigurationSetName":{ "shape":"ConfigurationSetName", "documentation":"

The name of the configuration set to use when sending the email.

" + }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

", + "contextParam":{"name":"EndpointId"} } }, "documentation":"

Represents a request to send email messages to multiple destinations using Amazon SES. For more information, see the Amazon SES Developer Guide.

" @@ -6369,6 +6676,11 @@ "shape":"ConfigurationSetName", "documentation":"

The name of the configuration set to use when sending the email.

" }, + "EndpointId":{ + "shape":"EndpointId", + "documentation":"

The ID of the multi-region endpoint (global-endpoint).

", + "contextParam":{"name":"EndpointId"} + }, "ListManagementOptions":{ "shape":"ListManagementOptions", "documentation":"

An object used to specify a list or topic to which an email belongs, which will be used when a contact chooses to unsubscribe.

" @@ -6439,6 +6751,16 @@ }, "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.

" }, + "Status":{ + "type":"string", + "documentation":"

The status of the multi-region endpoint (global-endpoint).

", + "enum":[ + "CREATING", + "READY", + "FAILED", + "DELETING" + ] + }, "Subject":{"type":"string"}, "SubscriptionStatus":{ "type":"string", diff --git a/botocore/data/timestream-influxdb/2023-01-27/service-2.json b/botocore/data/timestream-influxdb/2023-01-27/service-2.json index eb364429cc..c95a7644c7 100644 --- a/botocore/data/timestream-influxdb/2023-01-27/service-2.json +++ b/botocore/data/timestream-influxdb/2023-01-27/service-2.json @@ -163,6 +163,7 @@ }, "input":{"shape":"TagResourceRequest"}, "errors":[ + {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"} ], "documentation":"

Tags are composed of a Key/Value pairs. You can use tags to categorize and track your Timestream for InfluxDB resources.

", @@ -275,7 +276,7 @@ }, "password":{ "shape":"Password", - "documentation":"

The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in AWS SecretManager in your account.

" + "documentation":"

The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Web Services SecretManager in your account.

" }, "organization":{ "shape":"Organization", @@ -328,6 +329,10 @@ "port":{ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections.

Valid Values: 1024-65535

Default: 8086

Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680

" + }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" } } }, @@ -364,6 +369,10 @@ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections. The default value is 8086.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, "dbInstanceType":{ "shape":"DbInstanceType", "documentation":"

The Timestream for InfluxDB instance type that InfluxDB runs on.

" @@ -410,7 +419,7 @@ }, "influxAuthParametersSecretArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -503,7 +512,7 @@ }, "name":{ "shape":"DbInstanceName", - "documentation":"

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands.

" + "documentation":"

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands.

" }, "arn":{ "shape":"Arn", @@ -521,6 +530,10 @@ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, "dbInstanceType":{ "shape":"DbInstanceType", "documentation":"

The Timestream for InfluxDB instance type to run InfluxDB on.

" @@ -657,6 +670,10 @@ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, "dbInstanceType":{ "shape":"DbInstanceType", "documentation":"

The Timestream for InfluxDB instance type that InfluxDB runs on.

" @@ -703,7 +720,7 @@ }, "influxAuthParametersSecretArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -789,6 +806,10 @@ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, "dbInstanceType":{ "shape":"DbInstanceType", "documentation":"

The Timestream for InfluxDB instance type that InfluxDB runs on.

" @@ -835,7 +856,7 @@ }, "influxAuthParametersSecretArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -1236,6 +1257,13 @@ "max":100, "min":1 }, + "NetworkType":{ + "type":"string", + "enum":[ + "IPV4", + "DUAL" + ] + }, "NextToken":{ "type":"string", "min":1 @@ -1468,7 +1496,7 @@ }, "name":{ "shape":"DbInstanceName", - "documentation":"

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and AWS CLI commands.

" + "documentation":"

This customer-supplied name uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and Amazon Web Services CLI commands.

" }, "arn":{ "shape":"Arn", @@ -1486,6 +1514,10 @@ "shape":"Port", "documentation":"

The port number on which InfluxDB accepts connections.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, "dbInstanceType":{ "shape":"DbInstanceType", "documentation":"

The Timestream for InfluxDB instance type that InfluxDB runs on.

" @@ -1532,7 +1564,7 @@ }, "influxAuthParametersSecretArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" } } }, @@ -1590,5 +1622,5 @@ "min":1 } }, - "documentation":"

Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on AWS for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time.

" + "documentation":"

Amazon Timestream for InfluxDB is a managed time-series database engine that makes it easy for application developers and DevOps teams to run InfluxDB databases on Amazon Web Services for near real-time time-series applications using open-source APIs. With Amazon Timestream for InfluxDB, it is easy to set up, operate, and scale time-series workloads that can answer queries with single-digit millisecond query response time.

" } diff --git a/tests/functional/endpoint-rules/mgh/endpoint-tests-1.json b/tests/functional/endpoint-rules/mgh/endpoint-tests-1.json index c99eb4af56..3f01197299 100644 --- a/tests/functional/endpoint-rules/mgh/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/mgh/endpoint-tests-1.json @@ -1,42 +1,29 @@ { "testCases": [ { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://mgh-fips.eu-central-1.api.aws" - } - }, - "params": { - "Region": "eu-central-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.eu-central-1.amazonaws.com" + "url": "https://mgh.ap-northeast-1.amazonaws.com" } }, "params": { - "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.eu-central-1.api.aws" + "url": "https://mgh.ap-southeast-2.amazonaws.com" } }, "params": { - "Region": "eu-central-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,47 +35,47 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.us-west-2.api.aws" + "url": "https://mgh.eu-west-1.amazonaws.com" } }, "params": { - "Region": "us-west-2", - "UseDualStack": true, - "UseFIPS": true + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.us-west-2.amazonaws.com" + "url": "https://mgh.eu-west-2.amazonaws.com" } }, "params": { - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": true + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.us-west-2.api.aws" + "url": "https://mgh.us-east-1.amazonaws.com" } }, "params": { - "Region": "us-west-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,281 +87,273 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh-fips.eu-west-2.api.aws" + "url": "https://mgh-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-2", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.eu-west-2.amazonaws.com" + "url": "https://mgh-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh.eu-west-2.api.aws" + "url": "https://mgh.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh.eu-west-2.amazonaws.com" + "url": "https://mgh-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.eu-west-1.api.aws" + "url": "https://mgh-fips.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "eu-west-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh-fips.eu-west-1.amazonaws.com" + "url": "https://mgh.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.eu-west-1.api.aws" + "url": "https://mgh.cn-north-1.amazonaws.com.cn" } }, "params": { - "Region": "eu-west-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh.eu-west-1.amazonaws.com" + "url": "https://mgh-fips.us-gov-east-1.api.aws" } }, "params": { - "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.ap-northeast-1.api.aws" + "url": "https://mgh-fips.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://mgh-fips.ap-northeast-1.amazonaws.com" + "url": "https://mgh.us-gov-east-1.api.aws" } }, "params": { - "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.ap-northeast-1.api.aws" + "url": "https://mgh.us-gov-east-1.amazonaws.com" } }, "params": { - "Region": "ap-northeast-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mgh.ap-northeast-1.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.ap-southeast-2.api.aws" + "url": "https://mgh-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mgh-fips.ap-southeast-2.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.ap-southeast-2.api.aws" + "url": "https://mgh.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mgh.ap-southeast-2.amazonaws.com" - } + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh-fips.us-east-1.api.aws" + "url": "https://mgh-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", "expect": { - "endpoint": { - "url": "https://mgh-fips.us-east-1.amazonaws.com" - } + "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://mgh.us-east-1.api.aws" + "url": "https://mgh.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", "expect": { "endpoint": { - "url": "https://mgh.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { "Region": "us-east-1", + "UseFIPS": false, "UseDualStack": false, - "UseFIPS": false + "Endpoint": "https://example.com" } }, { - "documentation": "For custom endpoint with fips disabled and dualstack disabled", + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", "expect": { "endpoint": { "url": "https://example.com" } }, "params": { - "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -385,8 +364,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -397,10 +376,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/tests/functional/endpoint-rules/sesv2/endpoint-tests-1.json b/tests/functional/endpoint-rules/sesv2/endpoint-tests-1.json index fa4feb98bd..2cba42fcdf 100644 --- a/tests/functional/endpoint-rules/sesv2/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/sesv2/endpoint-tests-1.json @@ -594,6 +594,163 @@ "expect": { "error": "Invalid Configuration: Missing Region" } + }, + { + "documentation": "Valid EndpointId with dualstack and FIPS disabled. i.e, IPv4 Only stack with no FIPS", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://abc123.456def.endpoints.email.amazonaws.com" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": false, + "Region": "us-east-1" + } + }, + { + "documentation": "Valid EndpointId with dualstack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://abc123.456def.endpoints.email.api.aws" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "UseFIPS": false, + "Region": "us-west-2" + } + }, + { + "documentation": "Valid EndpointId with FIPS set, dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": true, + "Region": "ap-northeast-1" + } + }, + { + "documentation": "Valid EndpointId with both dualstack and FIPS enabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "UseFIPS": true, + "Region": "ap-northeast-2" + } + }, + { + "documentation": "Regular regional request, without EndpointId", + "expect": { + "endpoint": { + "url": "https://email.eu-west-1.amazonaws.com" + } + }, + "params": { + "UseDualStack": false, + "Region": "eu-west-1" + } + }, + { + "documentation": "Invalid EndpointId (Invalid chars / format)", + "expect": { + "error": "EndpointId must be a valid host label" + }, + "params": { + "EndpointId": "badactor.com?foo=bar", + "UseDualStack": false, + "Region": "eu-west-2" + } + }, + { + "documentation": "Invalid EndpointId (Empty)", + "expect": { + "error": "EndpointId must be a valid host label" + }, + "params": { + "EndpointId": "", + "UseDualStack": false, + "Region": "ap-south-1" + } + }, + { + "documentation": "Valid EndpointId with custom sdk endpoint", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "signingName": "ses", + "name": "sigv4a", + "signingRegionSet": [ + "*" + ] + } + ] + }, + "url": "https://example.com" + } + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Valid EndpointId with custom sdk endpoint with FIPS enabled", + "expect": { + "error": "Invalid Configuration: FIPS is not supported with multi-region endpoints" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": false, + "UseFIPS": true, + "Region": "us-east-1", + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Valid EndpointId with DualStack enabled and partition does not support DualStack", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "EndpointId": "abc123.456def", + "UseDualStack": true, + "Region": "us-isob-east-1" + } } ], "version": "1.0" From 345e6803c93be36c43ae0a942d9a1df2ae060a09 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 11 Dec 2024 19:41:38 +0000 Subject: [PATCH 02/20] Update endpoints model --- botocore/data/endpoints.json | 211 +++++++++-------------------------- 1 file changed, 54 insertions(+), 157 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 79bb797e8a..93ffb541d3 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -6281,12 +6281,18 @@ }, "ca-central-1" : { "variants" : [ { + "hostname" : "dlm-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.ca-west-1.api.aws", "tags" : [ "dualstack" ] } ] @@ -6365,24 +6371,36 @@ }, "us-east-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { + "hostname" : "dlm-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-east-2.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { + "hostname" : "dlm-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-west-2.api.aws", "tags" : [ "dualstack" ] } ] @@ -21157,34 +21175,8 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, - "ca-central-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "ca-central-1-fips" : { - "credentialScope" : { - "region" : "ca-central-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.ca-central-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "ca-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "ca-west-1-fips" : { - "credentialScope" : { - "region" : "ca-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.ca-west-1.amazonaws.com", - "protocols" : [ "https" ] - }, + "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -21204,62 +21196,10 @@ "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, - "us-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-1-fips" : { - "credentialScope" : { - "region" : "us-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-east-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-east-2" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-east-2-fips" : { - "credentialScope" : { - "region" : "us-east-2" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-east-2.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-1-fips" : { - "credentialScope" : { - "region" : "us-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-west-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-west-2" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-west-2-fips" : { - "credentialScope" : { - "region" : "us-west-2" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-west-2.amazonaws.com", - "protocols" : [ "https" ] - } + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } } }, "sts" : { @@ -22353,6 +22293,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -26638,6 +26579,9 @@ "endpoints" : { "us-gov-east-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -26651,6 +26595,9 @@ }, "us-gov-west-1" : { "variants" : [ { + "hostname" : "dlm-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { "hostname" : "dlm.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -30040,34 +29987,8 @@ } ] }, "endpoints" : { - "us-gov-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-gov-east-1-fips" : { - "credentialScope" : { - "region" : "us-gov-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-gov-east-1.amazonaws.com", - "protocols" : [ "https" ] - }, - "us-gov-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", - "tags" : [ "fips" ] - } ] - }, - "us-gov-west-1-fips" : { - "credentialScope" : { - "region" : "us-gov-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-gov-west-1.amazonaws.com", - "protocols" : [ "https" ] - } + "us-gov-east-1" : { }, + "us-gov-west-1" : { } } }, "sts" : { @@ -30780,8 +30701,18 @@ }, "dlm" : { "endpoints" : { - "us-iso-east-1" : { }, - "us-iso-west-1" : { } + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "dlm-fips.us-iso-east-1.api.aws.ic.gov", + "tags" : [ "dualstack", "fips" ] + } ] + }, + "us-iso-west-1" : { + "variants" : [ { + "hostname" : "dlm-fips.us-iso-west-1.api.aws.ic.gov", + "tags" : [ "dualstack", "fips" ] + } ] + } } }, "dms" : { @@ -31443,34 +31374,8 @@ } }, "endpoints" : { - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-east-1-fips" : { - "credentialScope" : { - "region" : "us-iso-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-iso-east-1.c2s.ic.gov", - "protocols" : [ "https" ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", - "tags" : [ "fips" ] - } ] - }, - "us-iso-west-1-fips" : { - "credentialScope" : { - "region" : "us-iso-west-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-iso-west-1.c2s.ic.gov", - "protocols" : [ "https" ] - } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "sts" : { @@ -31709,7 +31614,12 @@ }, "dlm" : { "endpoints" : { - "us-isob-east-1" : { } + "us-isob-east-1" : { + "variants" : [ { + "hostname" : "dlm-fips.us-isob-east-1.api.aws.scloud", + "tags" : [ "dualstack", "fips" ] + } ] + } } }, "dms" : { @@ -32191,20 +32101,7 @@ "protocols" : [ "http", "https" ] }, "endpoints" : { - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", - "tags" : [ "fips" ] - } ] - }, - "us-isob-east-1-fips" : { - "credentialScope" : { - "region" : "us-isob-east-1" - }, - "deprecated" : true, - "hostname" : "streams.dynamodb-fips.us-isob-east-1.sc2s.sgov.gov", - "protocols" : [ "https" ] - } + "us-isob-east-1" : { } } }, "sts" : { From 2123adbea27bb16e1df0f81809424bf561a4e3a9 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 11 Dec 2024 19:42:46 +0000 Subject: [PATCH 03/20] Bumping version to 1.35.79 --- .changes/1.35.79.json | 42 +++++++++++++++++++ .../api-change-artifact-68323.json | 5 --- .../api-change-cloudtrail-39887.json | 5 --- .../api-change-cognitoidp-8257.json | 5 --- .../api-change-controlcatalog-50136.json | 5 --- .../api-change-emrserverless-60875.json | 5 --- .../next-release/api-change-mgh-14397.json | 5 --- .../next-release/api-change-sesv2-43901.json | 5 --- .../api-change-timestreaminfluxdb-1068.json | 5 --- CHANGELOG.rst | 13 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 12 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 .changes/1.35.79.json delete mode 100644 .changes/next-release/api-change-artifact-68323.json delete mode 100644 .changes/next-release/api-change-cloudtrail-39887.json delete mode 100644 .changes/next-release/api-change-cognitoidp-8257.json delete mode 100644 .changes/next-release/api-change-controlcatalog-50136.json delete mode 100644 .changes/next-release/api-change-emrserverless-60875.json delete mode 100644 .changes/next-release/api-change-mgh-14397.json delete mode 100644 .changes/next-release/api-change-sesv2-43901.json delete mode 100644 .changes/next-release/api-change-timestreaminfluxdb-1068.json diff --git a/.changes/1.35.79.json b/.changes/1.35.79.json new file mode 100644 index 0000000000..309912d527 --- /dev/null +++ b/.changes/1.35.79.json @@ -0,0 +1,42 @@ +[ + { + "category": "``artifact``", + "description": "Add support for listing active customer agreements for the calling AWS Account.", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "Doc-only updates for CloudTrail.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "Updated descriptions for some API operations and parameters, corrected some errors in Cognito user pools", + "type": "api-change" + }, + { + "category": "``controlcatalog``", + "description": "Minor documentation updates to the content of ImplementationDetails object part of the Control Catalog GetControl API", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "This release adds support for accessing system profile logs in Lake Formation-enabled jobs.", + "type": "api-change" + }, + { + "category": "``mgh``", + "description": "API and documentation updates for AWS MigrationHub related to adding support for listing migration task updates and associating, disassociating and listing source resources", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Introduces support for multi-region endpoint.", + "type": "api-change" + }, + { + "category": "``timestream-influxdb``", + "description": "Adds networkType parameter to CreateDbInstance API which allows IPv6 support to the InfluxDB endpoint", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-artifact-68323.json b/.changes/next-release/api-change-artifact-68323.json deleted file mode 100644 index c305773505..0000000000 --- a/.changes/next-release/api-change-artifact-68323.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``artifact``", - "description": "Add support for listing active customer agreements for the calling AWS Account." -} diff --git a/.changes/next-release/api-change-cloudtrail-39887.json b/.changes/next-release/api-change-cloudtrail-39887.json deleted file mode 100644 index 021601dbaf..0000000000 --- a/.changes/next-release/api-change-cloudtrail-39887.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cloudtrail``", - "description": "Doc-only updates for CloudTrail." -} diff --git a/.changes/next-release/api-change-cognitoidp-8257.json b/.changes/next-release/api-change-cognitoidp-8257.json deleted file mode 100644 index a50fd9e39d..0000000000 --- a/.changes/next-release/api-change-cognitoidp-8257.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cognito-idp``", - "description": "Updated descriptions for some API operations and parameters, corrected some errors in Cognito user pools" -} diff --git a/.changes/next-release/api-change-controlcatalog-50136.json b/.changes/next-release/api-change-controlcatalog-50136.json deleted file mode 100644 index dcf87dd4f2..0000000000 --- a/.changes/next-release/api-change-controlcatalog-50136.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``controlcatalog``", - "description": "Minor documentation updates to the content of ImplementationDetails object part of the Control Catalog GetControl API" -} diff --git a/.changes/next-release/api-change-emrserverless-60875.json b/.changes/next-release/api-change-emrserverless-60875.json deleted file mode 100644 index 30954ae85c..0000000000 --- a/.changes/next-release/api-change-emrserverless-60875.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``emr-serverless``", - "description": "This release adds support for accessing system profile logs in Lake Formation-enabled jobs." -} diff --git a/.changes/next-release/api-change-mgh-14397.json b/.changes/next-release/api-change-mgh-14397.json deleted file mode 100644 index 7d821c0250..0000000000 --- a/.changes/next-release/api-change-mgh-14397.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mgh``", - "description": "API and documentation updates for AWS MigrationHub related to adding support for listing migration task updates and associating, disassociating and listing source resources" -} diff --git a/.changes/next-release/api-change-sesv2-43901.json b/.changes/next-release/api-change-sesv2-43901.json deleted file mode 100644 index b63d229ddb..0000000000 --- a/.changes/next-release/api-change-sesv2-43901.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sesv2``", - "description": "Introduces support for multi-region endpoint." -} diff --git a/.changes/next-release/api-change-timestreaminfluxdb-1068.json b/.changes/next-release/api-change-timestreaminfluxdb-1068.json deleted file mode 100644 index 5cde1fc625..0000000000 --- a/.changes/next-release/api-change-timestreaminfluxdb-1068.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``timestream-influxdb``", - "description": "Adds networkType parameter to CreateDbInstance API which allows IPv6 support to the InfluxDB endpoint" -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c6ee9ba669..d0acd1cd8a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.35.79 +======= + +* api-change:``artifact``: Add support for listing active customer agreements for the calling AWS Account. +* api-change:``cloudtrail``: Doc-only updates for CloudTrail. +* api-change:``cognito-idp``: Updated descriptions for some API operations and parameters, corrected some errors in Cognito user pools +* api-change:``controlcatalog``: Minor documentation updates to the content of ImplementationDetails object part of the Control Catalog GetControl API +* api-change:``emr-serverless``: This release adds support for accessing system profile logs in Lake Formation-enabled jobs. +* api-change:``mgh``: API and documentation updates for AWS MigrationHub related to adding support for listing migration task updates and associating, disassociating and listing source resources +* api-change:``sesv2``: Introduces support for multi-region endpoint. +* api-change:``timestream-influxdb``: Adds networkType parameter to CreateDbInstance API which allows IPv6 support to the InfluxDB endpoint + + 1.35.78 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index d4da138ff5..238f428f26 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.78' +__version__ = '1.35.79' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index db4163b31b..827a4de42f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.78' +release = '1.35.79' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 007e93cdcb9cbb67176c1f7adedb6fb171e4f171 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 12 Dec 2024 19:24:29 +0000 Subject: [PATCH 04/20] Update to latest models --- .../api-change-connect-81969.json | 5 + .../next-release/api-change-dms-40416.json | 5 + .../next-release/api-change-glue-72059.json | 5 + .../api-change-guardduty-79289.json | 5 + .../api-change-route53domains-82022.json | 5 + .../data/connect/2017-08-08/paginators-1.json | 19 + .../data/connect/2017-08-08/service-2.json | 658 +++++++++++++++++- botocore/data/dms/2016-01-01/service-2.json | 138 +++- botocore/data/glue/2017-03-31/service-2.json | 53 +- .../data/guardduty/2017-11-28/service-2.json | 18 +- .../route53domains/2014-05-15/service-2.json | 12 +- 11 files changed, 857 insertions(+), 66 deletions(-) create mode 100644 .changes/next-release/api-change-connect-81969.json create mode 100644 .changes/next-release/api-change-dms-40416.json create mode 100644 .changes/next-release/api-change-glue-72059.json create mode 100644 .changes/next-release/api-change-guardduty-79289.json create mode 100644 .changes/next-release/api-change-route53domains-82022.json diff --git a/.changes/next-release/api-change-connect-81969.json b/.changes/next-release/api-change-connect-81969.json new file mode 100644 index 0000000000..d8c3f7c5ab --- /dev/null +++ b/.changes/next-release/api-change-connect-81969.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``connect``", + "description": "Configure holidays and other overrides to hours of operation in advance. During contact handling, Amazon Connect automatically checks for overrides and provides customers with an appropriate flow path. After an override period passes call center automatically reverts to standard hours of operation." +} diff --git a/.changes/next-release/api-change-dms-40416.json b/.changes/next-release/api-change-dms-40416.json new file mode 100644 index 0000000000..6a8e0c77ed --- /dev/null +++ b/.changes/next-release/api-change-dms-40416.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``dms``", + "description": "Add parameters to support for kerberos authentication. Add parameter for disabling the Unicode source filter with PostgreSQL settings. Add parameter to use large integer value with Kinesis/Kafka settings." +} diff --git a/.changes/next-release/api-change-glue-72059.json b/.changes/next-release/api-change-glue-72059.json new file mode 100644 index 0000000000..82886e248c --- /dev/null +++ b/.changes/next-release/api-change-glue-72059.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``glue``", + "description": "To support customer-managed encryption in Data Quality to allow customers encrypt data with their own KMS key, we will add a DataQualityEncryption field to the SecurityConfiguration API where customers can provide their KMS keys." +} diff --git a/.changes/next-release/api-change-guardduty-79289.json b/.changes/next-release/api-change-guardduty-79289.json new file mode 100644 index 0000000000..3519d1fbcd --- /dev/null +++ b/.changes/next-release/api-change-guardduty-79289.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``guardduty``", + "description": "Improved descriptions for certain APIs." +} diff --git a/.changes/next-release/api-change-route53domains-82022.json b/.changes/next-release/api-change-route53domains-82022.json new file mode 100644 index 0000000000..8e3e2bcc63 --- /dev/null +++ b/.changes/next-release/api-change-route53domains-82022.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``route53domains``", + "description": "This release includes the following API updates: added the enumeration type RESTORE_DOMAIN to the OperationType; constrained the Price attribute to non-negative values; updated the LangCode to allow 2 or 3 alphabetical characters." +} diff --git a/botocore/data/connect/2017-08-08/paginators-1.json b/botocore/data/connect/2017-08-08/paginators-1.json index db94a88008..2980cc21bb 100644 --- a/botocore/data/connect/2017-08-08/paginators-1.json +++ b/botocore/data/connect/2017-08-08/paginators-1.json @@ -416,6 +416,25 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "ContactFlowVersionSummaryList" + }, + "ListHoursOfOperationOverrides": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "LastModifiedRegion", + "LastModifiedTime" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperationOverrideList" + }, + "SearchHoursOfOperationOverrides": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperationOverrides" } } } diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 9853dda733..04c3bf33e8 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -561,6 +561,25 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Creates hours of operation.

" }, + "CreateHoursOfOperationOverride":{ + "name":"CreateHoursOfOperationOverride", + "http":{ + "method":"PUT", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + }, + "input":{"shape":"CreateHoursOfOperationOverrideRequest"}, + "output":{"shape":"CreateHoursOfOperationOverrideResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates an hours of operation override in an Amazon Connect hours of operation resource

" + }, "CreateInstance":{ "name":"CreateInstance", "http":{ @@ -701,7 +720,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Creates a new queue for the specified Amazon Connect instance.

  • If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

  • Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

  • If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

" + "documentation":"

Creates a new queue for the specified Amazon Connect instance.

  • If the phone number is claimed to a traffic distribution group that was created in the same Region as the Amazon Connect instance where you are calling this API, then you can use a full phone number ARN or a UUID for OutboundCallerIdNumberId. However, if the phone number is claimed to a traffic distribution group that is in one Region, and you are calling this API from an instance in another Amazon Web Services Region that is associated with the traffic distribution group, you must provide a full phone number ARN. If a UUID is provided in this scenario, you will receive a ResourceNotFoundException.

  • Only use the phone number ARN format that doesn't contain instance in the path, for example, arn:aws:connect:us-east-1:1234567890:phone-number/uuid. This is the same ARN format that is returned when you call the ListPhoneNumbersV2 API.

  • If you plan to use IAM policies to allow/deny access to this API for phone number resources claimed to a traffic distribution group, see Allow or Deny queue API actions for phone numbers in a replica Region.

" }, "CreateQuickConnect":{ "name":"CreateQuickConnect", @@ -1073,6 +1092,22 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Deletes an hours of operation.

" }, + "DeleteHoursOfOperationOverride":{ + "name":"DeleteHoursOfOperationOverride", + "http":{ + "method":"DELETE", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"DeleteHoursOfOperationOverrideRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes an hours of operation override in an Amazon Connect hours of operation resource

" + }, "DeleteInstance":{ "name":"DeleteInstance", "http":{ @@ -1529,6 +1564,23 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Describes the hours of operation.

" }, + "DescribeHoursOfOperationOverride":{ + "name":"DescribeHoursOfOperationOverride", + "http":{ + "method":"GET", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"DescribeHoursOfOperationOverrideRequest"}, + "output":{"shape":"DescribeHoursOfOperationOverrideResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Describes the hours of operation override.

" + }, "DescribeInstance":{ "name":"DescribeInstance", "http":{ @@ -2113,6 +2165,23 @@ ], "documentation":"

Gets the real-time active user data from the specified Amazon Connect instance.

" }, + "GetEffectiveHoursOfOperations":{ + "name":"GetEffectiveHoursOfOperations", + "http":{ + "method":"GET", + "requestUri":"/effective-hours-of-operations/{InstanceId}/{HoursOfOperationId}" + }, + "input":{"shape":"GetEffectiveHoursOfOperationsRequest"}, + "output":{"shape":"GetEffectiveHoursOfOperationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Get the hours of operations with the effective override applied.

" + }, "GetFederationToken":{ "name":"GetFederationToken", "http":{ @@ -2505,6 +2574,23 @@ ], "documentation":"

List the flow association based on the filters.

" }, + "ListHoursOfOperationOverrides":{ + "name":"ListHoursOfOperationOverrides", + "http":{ + "method":"GET", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + }, + "input":{"shape":"ListHoursOfOperationOverridesRequest"}, + "output":{"shape":"ListHoursOfOperationOverridesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

List the hours of operation overrides.

" + }, "ListHoursOfOperations":{ "name":"ListHoursOfOperations", "http":{ @@ -3277,6 +3363,23 @@ ], "documentation":"

Searches email address in an instance, with optional filtering.

" }, + "SearchHoursOfOperationOverrides":{ + "name":"SearchHoursOfOperationOverrides", + "http":{ + "method":"POST", + "requestUri":"/search-hours-of-operation-overrides" + }, + "input":{"shape":"SearchHoursOfOperationOverridesRequest"}, + "output":{"shape":"SearchHoursOfOperationOverridesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Searches the hours of operation overrides.

" + }, "SearchHoursOfOperations":{ "name":"SearchHoursOfOperations", "http":{ @@ -4151,6 +4254,24 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates the hours of operation.

" }, + "UpdateHoursOfOperationOverride":{ + "name":"UpdateHoursOfOperationOverride", + "http":{ + "method":"POST", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"UpdateHoursOfOperationOverrideRequest"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ConditionalOperationFailedException"} + ], + "documentation":"

Update the hours of operation override.

" + }, "UpdateInstanceAttribute":{ "name":"UpdateInstanceAttribute", "http":{ @@ -6560,6 +6681,14 @@ "type":"list", "member":{"shape":"CommonAttributeAndCondition"} }, + "CommonHumanReadableDescription":{ + "type":"string", + "pattern":"^[\\P{C}\\r\\n\\t]{1,250}$" + }, + "CommonHumanReadableName":{ + "type":"string", + "pattern":"^[\\P{C}\\r\\n\\t]{1,127}$" + }, "CommonNameLength127":{ "type":"string", "max":127, @@ -6633,7 +6762,7 @@ "members":{ "Message":{"shape":"Message"} }, - "documentation":"

A conditional check failed.

", + "documentation":"

Request processing failed because dependent condition failed.

", "error":{"httpStatusCode":409}, "exception":true }, @@ -6831,7 +6960,7 @@ }, "ParticipantRole":{ "shape":"ParticipantRole", - "documentation":"

The role of the participant in the chat conversation.

" + "documentation":"

The role of the participant in the chat conversation.

Only CUSTOMER is currently supported. Any other values other than CUSTOMER will result in an exception (4xx error).

" }, "IncludeRawMessage":{ "shape":"IncludeRawMessage", @@ -7029,7 +7158,15 @@ "shape":"ContactFlowModuleSearchConditionList", "documentation":"

A list of conditions which would be applied together with an AND condition.

" }, - "StringCondition":{"shape":"StringCondition"} + "StringCondition":{"shape":"StringCondition"}, + "StateCondition":{ + "shape":"ContactFlowModuleState", + "documentation":"

The state of the flow.

" + }, + "StatusCondition":{ + "shape":"ContactFlowModuleStatus", + "documentation":"

The status of the flow.

" + } }, "documentation":"

The search criteria to be used to return flow modules.

" }, @@ -7881,6 +8018,60 @@ } } }, + "CreateHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "Name", + "Config", + "EffectiveFrom", + "EffectiveTill" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "Name":{ + "shape":"CommonHumanReadableName", + "documentation":"

The name of the hours of operation override.

" + }, + "Description":{ + "shape":"CommonHumanReadableDescription", + "documentation":"

The description of the hours of operation override.

" + }, + "Config":{ + "shape":"HoursOfOperationOverrideConfigList", + "documentation":"

Configuration information for the hours of operation override: day, start time, and end time.

" + }, + "EffectiveFrom":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date from when the hours of operation override would be effective.

" + }, + "EffectiveTill":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date until when the hours of operation override would be effective.

" + } + } + }, + "CreateHoursOfOperationOverrideResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "documentation":"

The identifier for the hours of operation override.

" + } + } + }, "CreateHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -9130,6 +9321,34 @@ "type":"list", "member":{"shape":"DataSetId"} }, + "DateComparisonType":{ + "type":"string", + "enum":[ + "GREATER_THAN", + "LESS_THAN", + "GREATER_THAN_OR_EQUAL_TO", + "LESS_THAN_OR_EQUAL_TO", + "EQUAL_TO" + ] + }, + "DateCondition":{ + "type":"structure", + "members":{ + "FieldName":{ + "shape":"String", + "documentation":"

An object to specify the hours of operation override date field.

" + }, + "Value":{ + "shape":"DateYearMonthDayFormat", + "documentation":"

An object to specify the hours of operation override date value.

" + }, + "ComparisonType":{ + "shape":"DateComparisonType", + "documentation":"

An object to specify the hours of operation override date condition comparisonType.

" + } + }, + "documentation":"

An object to specify the hours of operation override date condition.

" + }, "DateReference":{ "type":"structure", "members":{ @@ -9144,6 +9363,10 @@ }, "documentation":"

Information about a reference when the referenceType is DATE. Otherwise, null.

" }, + "DateYearMonthDayFormat":{ + "type":"string", + "pattern":"^\\d{4}-\\d{2}-\\d{2}$" + }, "DeactivateEvaluationFormRequest":{ "type":"structure", "required":[ @@ -9391,6 +9614,34 @@ } } }, + "DeleteHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "documentation":"

The identifier for the hours of operation override.

", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + } + } + }, "DeleteHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -10124,6 +10375,43 @@ } } }, + "DescribeHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "documentation":"

The identifier for the hours of operation override.

", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + } + } + }, + "DescribeHoursOfOperationOverrideResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverride":{ + "shape":"HoursOfOperationOverride", + "documentation":"

Information about the hours of operations override.

" + } + } + }, "DescribeHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -11177,6 +11465,24 @@ "exception":true }, "DurationInSeconds":{"type":"integer"}, + "EffectiveHoursOfOperationList":{ + "type":"list", + "member":{"shape":"EffectiveHoursOfOperations"} + }, + "EffectiveHoursOfOperations":{ + "type":"structure", + "members":{ + "Date":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date that the hours of operation or overrides applies to.

" + }, + "OperationalHours":{ + "shape":"OperationalHours", + "documentation":"

Information about the hours of operations with the effective override applied.

" + } + }, + "documentation":"

Information about the hours of operations with the effective override applied.

" + }, "Email":{ "type":"string", "sensitive":true @@ -12852,6 +13158,54 @@ } } }, + "GetEffectiveHoursOfOperationsRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "FromDate", + "ToDate" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "FromDate":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The Date from when the hours of operation are listed.

", + "location":"querystring", + "locationName":"fromDate" + }, + "ToDate":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The Date until when the hours of operation are listed.

", + "location":"querystring", + "locationName":"toDate" + } + } + }, + "GetEffectiveHoursOfOperationsResponse":{ + "type":"structure", + "members":{ + "EffectiveHoursOfOperationList":{ + "shape":"EffectiveHoursOfOperationList", + "documentation":"

Information about the effective hours of operations

" + }, + "TimeZone":{ + "shape":"TimeZone", + "documentation":"

The time zone for the hours of operation.

" + } + } + }, "GetFederationTokenRequest":{ "type":"structure", "required":["InstanceId"], @@ -13733,6 +14087,104 @@ "member":{"shape":"HoursOfOperation"} }, "HoursOfOperationName":{"type":"string"}, + "HoursOfOperationOverride":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "documentation":"

The identifier for the hours of operation override.

" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

" + }, + "HoursOfOperationArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) for the hours of operation.

" + }, + "Name":{ + "shape":"CommonHumanReadableName", + "documentation":"

The name of the hours of operation override.

" + }, + "Description":{ + "shape":"CommonHumanReadableDescription", + "documentation":"

The description of the hours of operation override.

" + }, + "Config":{ + "shape":"HoursOfOperationOverrideConfigList", + "documentation":"

Configuration information for the hours of operation override: day, start time, and end time.

" + }, + "EffectiveFrom":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date from which the hours of operation override would be effective.

" + }, + "EffectiveTill":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date till which the hours of operation override would be effective.

" + } + }, + "documentation":"

Information about the hours of operations override.

" + }, + "HoursOfOperationOverrideConfig":{ + "type":"structure", + "members":{ + "Day":{ + "shape":"OverrideDays", + "documentation":"

The day that the hours of operation override applies to.

" + }, + "StartTime":{ + "shape":"OverrideTimeSlice", + "documentation":"

The start time when your contact center opens if overrides are applied.

" + }, + "EndTime":{ + "shape":"OverrideTimeSlice", + "documentation":"

The end time that your contact center closes if overrides are applied.

" + } + }, + "documentation":"

Information about the hours of operation override config: day, start time, and end time.

" + }, + "HoursOfOperationOverrideConfigList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverrideConfig"}, + "max":100, + "min":0 + }, + "HoursOfOperationOverrideId":{ + "type":"string", + "max":36, + "min":1 + }, + "HoursOfOperationOverrideList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverride"} + }, + "HoursOfOperationOverrideSearchConditionList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverrideSearchCriteria"} + }, + "HoursOfOperationOverrideSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{ + "shape":"HoursOfOperationOverrideSearchConditionList", + "documentation":"

A list of conditions which would be applied together with an OR condition.

" + }, + "AndConditions":{ + "shape":"HoursOfOperationOverrideSearchConditionList", + "documentation":"

A list of conditions which would be applied together with an AND condition.

" + }, + "StringCondition":{"shape":"StringCondition"}, + "DateCondition":{ + "shape":"DateCondition", + "documentation":"

A leaf node condition which can be used to specify a date condition.

" + } + }, + "documentation":"

The search criteria to be used to return hours of operations overrides.

" + }, + "HoursOfOperationOverrideYearMonthDayDateFormat":{ + "type":"string", + "pattern":"^\\d{4}-\\d{2}-\\d{2}$" + }, "HoursOfOperationSearchConditionList":{ "type":"list", "member":{"shape":"HoursOfOperationSearchCriteria"} @@ -15161,6 +15613,61 @@ } } }, + "ListHoursOfOperationOverridesRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximum number of results to return per page. The default MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value of 1000.

", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListHoursOfOperationOverridesResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "HoursOfOperationOverrideList":{ + "shape":"HoursOfOperationOverrideList", + "documentation":"

Information about the hours of operation override.

" + }, + "LastModifiedRegion":{ + "shape":"RegionName", + "documentation":"

The AWS Region where this resource was last modified.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when this resource was last modified.

" + } + } + }, "ListHoursOfOperationsRequest":{ "type":"structure", "required":["InstanceId"], @@ -17096,6 +17603,24 @@ "max":128, "min":0 }, + "OperationalHour":{ + "type":"structure", + "members":{ + "Start":{ + "shape":"OverrideTimeSlice", + "documentation":"

The start time that your contact center opens.

" + }, + "End":{ + "shape":"OverrideTimeSlice", + "documentation":"

The end time that your contact center closes.

" + } + }, + "documentation":"

Information about the hours of operations with the effective override applied.

" + }, + "OperationalHours":{ + "type":"list", + "member":{"shape":"OperationalHour"} + }, "Origin":{ "type":"string", "max":267 @@ -17229,6 +17754,38 @@ "error":{"httpStatusCode":404}, "exception":true }, + "OverrideDays":{ + "type":"string", + "enum":[ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, + "OverrideTimeSlice":{ + "type":"structure", + "required":[ + "Hours", + "Minutes" + ], + "members":{ + "Hours":{ + "shape":"Hours24Format", + "documentation":"

The hours.

", + "box":true + }, + "Minutes":{ + "shape":"MinutesLimit60", + "documentation":"

The minutes.

", + "box":true + } + }, + "documentation":"

The start time or end time for an hours of operation override.

" + }, "PEM":{ "type":"string", "max":1024, @@ -20342,6 +20899,47 @@ } } }, + "SearchHoursOfOperationOverridesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500.

" + }, + "MaxResults":{ + "shape":"MaxResult100", + "documentation":"

The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum value of 100.

", + "box":true + }, + "SearchFilter":{"shape":"HoursOfOperationSearchFilter"}, + "SearchCriteria":{ + "shape":"HoursOfOperationOverrideSearchCriteria", + "documentation":"

The search criteria to be used to return hours of operations overrides.

" + } + } + }, + "SearchHoursOfOperationOverridesResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrides":{ + "shape":"HoursOfOperationOverrideList", + "documentation":"

Information about the hours of operations overrides.

" + }, + "NextToken":{ + "shape":"NextToken2500", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500.

" + }, + "ApproximateTotalCount":{ + "shape":"ApproximateTotalCount", + "documentation":"

The total number of hours of operations which matched your search query.

" + } + } + }, "SearchHoursOfOperationsRequest":{ "type":"structure", "required":["InstanceId"], @@ -23845,6 +24443,54 @@ "max":250, "min":0 }, + "UpdateHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance.

", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "documentation":"

The identifier for the hours of operation.

", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "documentation":"

The identifier for the hours of operation override.

", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + }, + "Name":{ + "shape":"CommonHumanReadableName", + "documentation":"

The name of the hours of operation override.

" + }, + "Description":{ + "shape":"CommonHumanReadableDescription", + "documentation":"

The description of the hours of operation override.

" + }, + "Config":{ + "shape":"HoursOfOperationOverrideConfigList", + "documentation":"

Configuration information for the hours of operation override: day, start time, and end time.

" + }, + "EffectiveFrom":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date from when the hours of operation override would be effective.

" + }, + "EffectiveTill":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "documentation":"

The date till when the hours of operation override would be effective.

" + } + } + }, "UpdateHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -25225,11 +25871,11 @@ "members":{ "FirstName":{ "shape":"AgentFirstName", - "documentation":"

The first name. This is required if you are using Amazon Connect or SAML for identity management.

" + "documentation":"

The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.

" }, "LastName":{ "shape":"AgentLastName", - "documentation":"

The last name. This is required if you are using Amazon Connect or SAML for identity management.

" + "documentation":"

The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.

" }, "Email":{ "shape":"Email", diff --git a/botocore/data/dms/2016-01-01/service-2.json b/botocore/data/dms/2016-01-01/service-2.json index fd5bcba50d..cb44f5db0b 100644 --- a/botocore/data/dms/2016-01-01/service-2.json +++ b/botocore/data/dms/2016-01-01/service-2.json @@ -364,7 +364,8 @@ "output":{"shape":"DeleteEventSubscriptionResponse"}, "errors":[ {"shape":"ResourceNotFoundFault"}, - {"shape":"InvalidResourceStateFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} ], "documentation":"

Deletes an DMS event subscription.

" }, @@ -469,7 +470,8 @@ "output":{"shape":"DeleteReplicationSubnetGroupResponse"}, "errors":[ {"shape":"InvalidResourceStateFault"}, - {"shape":"ResourceNotFoundFault"} + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} ], "documentation":"

Deletes a subnet group.

" }, @@ -1063,7 +1065,8 @@ "output":{"shape":"DescribeTableStatisticsResponse"}, "errors":[ {"shape":"ResourceNotFoundFault"}, - {"shape":"InvalidResourceStateFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} ], "documentation":"

Returns table statistics on the database migration task, including table name, rows inserted, rows updated, and rows deleted.

Note that the \"last updated\" column the DMS console only indicates the time that DMS last updated the table statistics record for a table. It does not indicate the time of the last update to the table.

" }, @@ -1188,7 +1191,8 @@ {"shape":"KMSDisabledFault"}, {"shape":"KMSInvalidStateFault"}, {"shape":"KMSNotFoundFault"}, - {"shape":"KMSThrottlingFault"} + {"shape":"KMSThrottlingFault"}, + {"shape":"AccessDeniedFault"} ], "documentation":"

Modifies an existing DMS event notification subscription.

" }, @@ -2694,7 +2698,7 @@ "documentation":"

The amount of storage (in gigabytes) to be initially allocated for the replication instance.

" }, "ReplicationInstanceClass":{ - "shape":"String", + "shape":"ReplicationInstanceClass", "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Choosing the right DMS replication instance; and, Selecting the best size for a replication instance.

" }, "VpcSecurityGroupIds":{ @@ -2748,6 +2752,10 @@ "NetworkType":{ "shape":"String", "documentation":"

The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported.

" + }, + "KerberosAuthenticationSettings":{ + "shape":"KerberosAuthenticationSettings", + "documentation":"

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when creating a replication instance.

" } }, "documentation":"

" @@ -3745,7 +3753,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

Filters applied to the data providers described in the form of key-value pairs.

Valid filter names: data-provider-identifier

" + "documentation":"

Filters applied to the data providers described in the form of key-value pairs.

Valid filter names and values: data-provider-identifier, data provider arn or name

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4193,7 +4201,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

Filters applied to the instance profiles described in the form of key-value pairs.

" + "documentation":"

Filters applied to the instance profiles described in the form of key-value pairs.

Valid filter names and values: instance-profile-identifier, instance profile arn or name

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -4398,7 +4406,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

Filters applied to the migration projects described in the form of key-value pairs.

" + "documentation":"

Filters applied to the migration projects described in the form of key-value pairs.

Valid filter names and values:

  • instance-profile-identifier, instance profile arn or name

  • data-provider-identifier, data provider arn or name

  • migration-project-identifier, migration project arn or name

" }, "MaxRecords":{ "shape":"IntegerOptional", @@ -6107,6 +6115,10 @@ "SslEndpointIdentificationAlgorithm":{ "shape":"KafkaSslEndpointIdentificationAlgorithm", "documentation":"

Sets hostname verification for the certificate. This setting is supported in DMS version 3.5.1 and later.

" + }, + "UseLargeIntegerValue":{ + "shape":"BooleanOptional", + "documentation":"

Specifies using the large integer value with Kafka.

" } }, "documentation":"

Provides information that describes an Apache Kafka endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

" @@ -6118,6 +6130,24 @@ "https" ] }, + "KerberosAuthenticationSettings":{ + "type":"structure", + "members":{ + "KeyCacheSecretId":{ + "shape":"String", + "documentation":"

Specifies the secret ID of the key cache for the replication instance.

" + }, + "KeyCacheSecretIamArn":{ + "shape":"String", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the IAM role that grants Amazon Web Services DMS access to the secret containing key cache file for the replication instance.

" + }, + "Krb5FileContents":{ + "shape":"String", + "documentation":"

Specifies the ID of the secret that stores the key cache file required for kerberos authentication of the replication instance.

" + } + }, + "documentation":"

Specifies using Kerberos authentication settings for use with DMS.

" + }, "KeyList":{ "type":"list", "member":{"shape":"String"} @@ -6164,6 +6194,10 @@ "NoHexPrefix":{ "shape":"BooleanOptional", "documentation":"

Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to an Amazon Kinesis target. Use the NoHexPrefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

" + }, + "UseLargeIntegerValue":{ + "shape":"BooleanOptional", + "documentation":"

Specifies using the large integer value with Kinesis.

" } }, "documentation":"

Provides information that describes an Amazon Kinesis Data Stream endpoint. This information includes the output format of records applied to the endpoint and details of transaction and control table data information.

" @@ -6339,6 +6373,10 @@ "ForceLobLookup":{ "shape":"BooleanOptional", "documentation":"

Forces LOB lookup on inline LOB.

" + }, + "AuthenticationMethod":{ + "shape":"SqlServerAuthenticationMethod", + "documentation":"

Specifies using Kerberos authentication with Microsoft SQL Server.

" } }, "documentation":"

Provides information that defines a Microsoft SQL Server endpoint.

" @@ -6893,7 +6931,7 @@ "documentation":"

Indicates whether the changes should be applied immediately or during the next maintenance window.

" }, "ReplicationInstanceClass":{ - "shape":"String", + "shape":"ReplicationInstanceClass", "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "VpcSecurityGroupIds":{ @@ -6927,6 +6965,10 @@ "NetworkType":{ "shape":"String", "documentation":"

The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported.

" + }, + "KerberosAuthenticationSettings":{ + "shape":"KerberosAuthenticationSettings", + "documentation":"

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when modifying a replication instance.

" } }, "documentation":"

" @@ -7295,6 +7337,13 @@ "one" ] }, + "OracleAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "kerberos" + ] + }, "OracleDataProviderSettings":{ "type":"structure", "members":{ @@ -7402,7 +7451,7 @@ }, "ArchivedLogsOnly":{ "shape":"BooleanOptional", - "documentation":"

When this field is set to Y, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges.

" + "documentation":"

When this field is set to True, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges.

" }, "AsmPassword":{ "shape":"SecretString", @@ -7478,15 +7527,15 @@ }, "UseBFile":{ "shape":"BooleanOptional", - "documentation":"

Set this attribute to Y to capture change data using the Binary Reader utility. Set UseLogminerReader to N to set this attribute to Y. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.

" + "documentation":"

Set this attribute to True to capture change data using the Binary Reader utility. Set UseLogminerReader to False to set this attribute to True. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.

" }, "UseDirectPathFullLoad":{ "shape":"BooleanOptional", - "documentation":"

Set this attribute to Y to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

" + "documentation":"

Set this attribute to True to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

" }, "UseLogminerReader":{ "shape":"BooleanOptional", - "documentation":"

Set this attribute to Y to capture change data using the Oracle LogMiner utility (the default). Set this attribute to N if you want to access the redo logs as a binary file. When you set UseLogminerReader to N, also set UseBfile to Y. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.

" + "documentation":"

Set this attribute to True to capture change data using the Oracle LogMiner utility (the default). Set this attribute to False if you want to access the redo logs as a binary file. When you set UseLogminerReader to False, also set UseBfile to True. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.

" }, "SecretsManagerAccessRoleArn":{ "shape":"String", @@ -7514,7 +7563,11 @@ }, "OpenTransactionWindow":{ "shape":"IntegerOptional", - "documentation":"

The timeframe in minutes to check for open transactions for a CDC-only task.

You can specify an integer value between 0 (the default) and 240 (the maximum).

This parameter is only valid in DMS version 3.5.0 and later. DMS supports a window of up to 9.5 hours including the value for OpenTransactionWindow.

" + "documentation":"

The timeframe in minutes to check for open transactions for a CDC-only task.

You can specify an integer value between 0 (the default) and 240 (the maximum).

This parameter is only valid in DMS version 3.5.0 and later.

" + }, + "AuthenticationMethod":{ + "shape":"OracleAuthenticationMethod", + "documentation":"

Specifies using Kerberos authentication with Oracle.

" } }, "documentation":"

Provides information that defines an Oracle endpoint.

" @@ -7527,7 +7580,7 @@ "documentation":"

The version of the replication engine.

" }, "ReplicationInstanceClass":{ - "shape":"String", + "shape":"ReplicationInstanceClass", "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "StorageType":{ @@ -7634,11 +7687,11 @@ }, "CaptureDdls":{ "shape":"BooleanOptional", - "documentation":"

To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

If this value is set to N, you don't have to create tables or triggers on the source database.

" + "documentation":"

To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

The default value is true.

If this value is set to N, you don't have to create tables or triggers on the source database.

" }, "MaxFileSize":{ "shape":"IntegerOptional", - "documentation":"

Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.

Example: maxFileSize=512

" + "documentation":"

Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.

The default value is 32,768 KB (32 MB).

Example: maxFileSize=512

" }, "DatabaseName":{ "shape":"String", @@ -7646,7 +7699,7 @@ }, "DdlArtifactsSchema":{ "shape":"String", - "documentation":"

The schema in which the operational DDL database artifacts are created.

Example: ddlArtifactsSchema=xyzddlschema;

" + "documentation":"

The schema in which the operational DDL database artifacts are created.

The default value is public.

Example: ddlArtifactsSchema=xyzddlschema;

" }, "ExecuteTimeout":{ "shape":"IntegerOptional", @@ -7654,19 +7707,19 @@ }, "FailTasksOnLobTruncation":{ "shape":"BooleanOptional", - "documentation":"

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

" + "documentation":"

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

The default value is false.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

" }, "HeartbeatEnable":{ "shape":"BooleanOptional", - "documentation":"

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios.

" + "documentation":"

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios.

The default value is false.

" }, "HeartbeatSchema":{ "shape":"String", - "documentation":"

Sets the schema in which the heartbeat artifacts are created.

" + "documentation":"

Sets the schema in which the heartbeat artifacts are created.

The default value is public.

" }, "HeartbeatFrequency":{ "shape":"IntegerOptional", - "documentation":"

Sets the WAL heartbeat frequency (in minutes).

" + "documentation":"

Sets the WAL heartbeat frequency (in minutes).

The default value is 5 minutes.

" }, "Password":{ "shape":"SecretString", @@ -7690,7 +7743,7 @@ }, "PluginName":{ "shape":"PluginNameValue", - "documentation":"

Specifies the plugin to use to create a replication slot.

" + "documentation":"

Specifies the plugin to use to create a replication slot.

The default value is pglogical.

" }, "SecretsManagerAccessRoleArn":{ "shape":"String", @@ -7706,15 +7759,15 @@ }, "MapBooleanAsBoolean":{ "shape":"BooleanOptional", - "documentation":"

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

" + "documentation":"

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

The default value is false.

" }, "MapJsonbAsClob":{ "shape":"BooleanOptional", - "documentation":"

When true, DMS migrates JSONB values as CLOB.

" + "documentation":"

When true, DMS migrates JSONB values as CLOB.

The default value is false.

" }, "MapLongVarcharAs":{ "shape":"LongVarcharMappingType", - "documentation":"

When true, DMS migrates LONG values as VARCHAR.

" + "documentation":"

Sets what datatype to map LONG values as.

The default value is wstring.

" }, "DatabaseMode":{ "shape":"DatabaseMode", @@ -7723,6 +7776,10 @@ "BabelfishDatabaseName":{ "shape":"String", "documentation":"

The Babelfish for Aurora PostgreSQL database name for the endpoint.

" + }, + "DisableUnicodeSourceFilter":{ + "shape":"BooleanOptional", + "documentation":"

Disables the Unicode source filter with PostgreSQL, for values passed into the Selection rule filter on Source Endpoint column values. By default DMS performs source filter comparisons using a Unicode string which can cause look ups to ignore the indexes in the text columns and slow down migrations.

Unicode support should only be disabled when using a selection rule filter is on a text column in the Source database that is indexed.

" } }, "documentation":"

Provides information that defines a PostgreSQL endpoint.

" @@ -8372,7 +8429,7 @@ }, "StartReplicationType":{ "shape":"String", - "documentation":"

The replication type.

" + "documentation":"

The type of replication to start.

" }, "CdcStartTime":{ "shape":"TStamp", @@ -8478,7 +8535,7 @@ "documentation":"

The replication instance identifier is a required parameter. This parameter is stored as a lowercase string.

Constraints:

  • Must contain 1-63 alphanumeric characters or hyphens.

  • First character must be a letter.

  • Cannot end with a hyphen or contain two consecutive hyphens.

Example: myrepinstance

" }, "ReplicationInstanceClass":{ - "shape":"String", + "shape":"ReplicationInstanceClass", "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a default value is pre-selected in the DMS console.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "ReplicationInstanceStatus":{ @@ -8574,10 +8631,18 @@ "NetworkType":{ "shape":"String", "documentation":"

The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported.

" + }, + "KerberosAuthenticationSettings":{ + "shape":"KerberosAuthenticationSettings", + "documentation":"

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when replicating an instance.

" } }, "documentation":"

Provides information that defines a replication instance.

" }, + "ReplicationInstanceClass":{ + "type":"string", + "max":30 + }, "ReplicationInstanceIpv6AddressList":{ "type":"list", "member":{"shape":"String"} @@ -8624,7 +8689,7 @@ "type":"structure", "members":{ "ReplicationInstanceClass":{ - "shape":"String", + "shape":"ReplicationInstanceClass", "documentation":"

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" }, "AllocatedStorage":{ @@ -8786,7 +8851,7 @@ }, "StopReason":{ "shape":"String", - "documentation":"

The reason the replication task was stopped. This response parameter can return one of the following values:

  • \"Stop Reason NORMAL\"

  • \"Stop Reason RECOVERABLE_ERROR\"

  • \"Stop Reason FATAL_ERROR\"

  • \"Stop Reason FULL_LOAD_ONLY_FINISHED\"

  • \"Stop Reason STOPPED_AFTER_FULL_LOAD\" – Full load completed, with cached changes not applied

  • \"Stop Reason STOPPED_AFTER_CACHED_EVENTS\" – Full load completed, with cached changes applied

  • \"Stop Reason EXPRESS_LICENSE_LIMITS_REACHED\"

  • \"Stop Reason STOPPED_AFTER_DDL_APPLY\" – User-defined stop task after DDL applied

  • \"Stop Reason STOPPED_DUE_TO_LOW_MEMORY\"

  • \"Stop Reason STOPPED_DUE_TO_LOW_DISK\"

  • \"Stop Reason STOPPED_AT_SERVER_TIME\" – User-defined server time for stopping task

  • \"Stop Reason STOPPED_AT_COMMIT_TIME\" – User-defined commit time for stopping task

  • \"Stop Reason RECONFIGURATION_RESTART\"

  • \"Stop Reason RECYCLE_TASK\"

" + "documentation":"

The reason the replication task was stopped. This response parameter can return one of the following values:

  • \"Stop Reason NORMAL\" – The task completed successfully with no additional information returned.

  • \"Stop Reason RECOVERABLE_ERROR\"

  • \"Stop Reason FATAL_ERROR\"

  • \"Stop Reason FULL_LOAD_ONLY_FINISHED\" – The task completed the full load phase. DMS applied cached changes if you set StopTaskCachedChangesApplied to true.

  • \"Stop Reason STOPPED_AFTER_FULL_LOAD\" – Full load completed, with cached changes not applied

  • \"Stop Reason STOPPED_AFTER_CACHED_EVENTS\" – Full load completed, with cached changes applied

  • \"Stop Reason EXPRESS_LICENSE_LIMITS_REACHED\"

  • \"Stop Reason STOPPED_AFTER_DDL_APPLY\" – User-defined stop task after DDL applied

  • \"Stop Reason STOPPED_DUE_TO_LOW_MEMORY\"

  • \"Stop Reason STOPPED_DUE_TO_LOW_DISK\"

  • \"Stop Reason STOPPED_AT_SERVER_TIME\" – User-defined server time for stopping task

  • \"Stop Reason STOPPED_AT_COMMIT_TIME\" – User-defined commit time for stopping task

  • \"Stop Reason RECONFIGURATION_RESTART\"

  • \"Stop Reason RECYCLE_TASK\"

" }, "ReplicationTaskCreationDate":{ "shape":"TStamp", @@ -8855,7 +8920,7 @@ "documentation":"

The task assessment results in JSON format.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

" }, "S3ObjectUrl":{ - "shape":"String", + "shape":"SecretString", "documentation":"

The URL of the S3 object containing the task assessment results.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

" } }, @@ -8878,7 +8943,7 @@ }, "Status":{ "shape":"String", - "documentation":"

Assessment run status.

This status can have one of the following values:

  • \"cancelling\" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun operation.

  • \"deleting\" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun operation.

  • \"failed\" – At least one individual assessment completed with a failed status.

  • \"error-provisioning\" – An internal error occurred while resources were provisioned (during provisioning status).

  • \"error-executing\" – An internal error occurred while individual assessments ran (during running status).

  • \"invalid state\" – The assessment run is in an unknown state.

  • \"passed\" – All individual assessments have completed, and none has a failed status.

  • \"provisioning\" – Resources required to run individual assessments are being provisioned.

  • \"running\" – Individual assessments are being run.

  • \"starting\" – The assessment run is starting, but resources are not yet being provisioned for individual assessments.

" + "documentation":"

Assessment run status.

This status can have one of the following values:

  • \"cancelling\" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun operation.

  • \"deleting\" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun operation.

  • \"failed\" – At least one individual assessment completed with a failed status.

  • \"error-provisioning\" – An internal error occurred while resources were provisioned (during provisioning status).

  • \"error-executing\" – An internal error occurred while individual assessments ran (during running status).

  • \"invalid state\" – The assessment run is in an unknown state.

  • \"passed\" – All individual assessments have completed, and none has a failed status.

  • \"provisioning\" – Resources required to run individual assessments are being provisioned.

  • \"running\" – Individual assessments are being run.

  • \"starting\" – The assessment run is starting, but resources are not yet being provisioned for individual assessments.

  • \"warning\" – At least one individual assessment completed with a warning status.

" }, "ReplicationTaskAssessmentRunCreationDate":{ "shape":"TStamp", @@ -9499,6 +9564,13 @@ "type":"string", "enum":["replication-instance"] }, + "SqlServerAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "kerberos" + ] + }, "SslSecurityProtocolValue":{ "type":"string", "enum":[ @@ -9755,7 +9827,7 @@ }, "StartReplicationType":{ "shape":"String", - "documentation":"

The replication type.

" + "documentation":"

The replication type.

When the replication type is full-load or full-load-and-cdc, the only valid value for the first run of the replication is start-replication. This option will start the replication.

You can also use ReloadTables to reload specific tables that failed during replication instead of restarting the replication.

The resume-processing option isn't applicable for a full-load replication, because you can't resume partially loaded tables during the full load phase.

For a full-load-and-cdc replication, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position.

" }, "CdcStartTime":{ "shape":"TStamp", diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index 57c7879bd5..5deb48b199 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -857,7 +857,7 @@ {"shape":"ResourceNumberLimitExceededException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Creates a new trigger.

" + "documentation":"

Creates a new trigger.

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

" }, "CreateUsageProfile":{ "name":"CreateUsageProfile", @@ -2103,7 +2103,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run.

" + "documentation":"

Retrieves the metadata for a given job run. Job run history is accessible for 365 days for your workflow and job run.

" }, "GetJobRuns":{ "name":"GetJobRuns", @@ -2119,7 +2119,7 @@ {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"} ], - "documentation":"

Retrieves metadata for all runs of a given job definition.

" + "documentation":"

Retrieves metadata for all runs of a given job definition.

GetJobRuns returns the job runs in chronological order, with the newest jobs returned first.

" }, "GetJobs":{ "name":"GetJobs", @@ -4246,7 +4246,7 @@ {"shape":"OperationTimeoutException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Updates a trigger definition.

" + "documentation":"

Updates a trigger definition.

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

" }, "UpdateUsageProfile":{ "name":"UpdateUsageProfile", @@ -9184,7 +9184,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "CodeGenConfigurationNodes":{ "shape":"CodeGenConfigurationNodes", @@ -9612,7 +9612,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "SecurityConfiguration":{ "shape":"NameString", @@ -9850,7 +9850,7 @@ }, "DefaultRunProperties":{ "shape":"WorkflowRunProperties", - "documentation":"

A collection of properties to be used as part of each execution of the workflow.

" + "documentation":"

A collection of properties to be used as part of each execution of the workflow.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" }, "Tags":{ "shape":"TagsMap", @@ -10294,6 +10294,27 @@ "max":2000, "min":0 }, + "DataQualityEncryption":{ + "type":"structure", + "members":{ + "DataQualityEncryptionMode":{ + "shape":"DataQualityEncryptionMode", + "documentation":"

The encryption mode to use for encrypting Data Quality assets. These assets include data quality rulesets, results, statistics, anomaly detection models and observations.

Valid values are SSEKMS for encryption using a customer-managed KMS key, or DISABLED.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

" + } + }, + "documentation":"

Specifies how Data Quality assets in your account should be encrypted.

" + }, + "DataQualityEncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS" + ] + }, "DataQualityEvaluationRunAdditionalRunOptions":{ "type":"structure", "members":{ @@ -12476,6 +12497,10 @@ "JobBookmarksEncryption":{ "shape":"JobBookmarksEncryption", "documentation":"

The encryption configuration for job bookmarks.

" + }, + "DataQualityEncryption":{ + "shape":"DataQualityEncryption", + "documentation":"

The encryption configuration for Glue Data Quality assets.

" } }, "documentation":"

Specifies an encryption configuration.

" @@ -17235,7 +17260,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -17459,7 +17484,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -17586,7 +17611,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -20821,7 +20846,7 @@ }, "RunProperties":{ "shape":"WorkflowRunProperties", - "documentation":"

The properties to put for the specified run.

" + "documentation":"

The properties to put for the specified run.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" } } }, @@ -23705,7 +23730,7 @@ }, "WorkerType":{ "shape":"WorkerType", - "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.

" + "documentation":"

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

  • For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.

  • For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).

  • For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type.

  • For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.

  • For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.

" }, "NumberOfWorkers":{ "shape":"NullableInteger", @@ -23800,7 +23825,7 @@ }, "RunProperties":{ "shape":"WorkflowRunProperties", - "documentation":"

The workflow run properties for the new workflow run.

" + "documentation":"

The workflow run properties for the new workflow run.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" } } }, @@ -26613,7 +26638,7 @@ }, "DefaultRunProperties":{ "shape":"WorkflowRunProperties", - "documentation":"

A collection of properties to be used as part of each execution of the workflow.

" + "documentation":"

A collection of properties to be used as part of each execution of the workflow.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

" }, "MaxConcurrentRuns":{ "shape":"NullableInteger", diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index f1ef12b6ea..315df5a419 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -2420,7 +2420,7 @@ }, "FindingCriteria":{ "shape":"FindingCriteria", - "documentation":"

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

  • accountId

  • id

  • region

  • severity

    To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition:

    • Low: [\"1\", \"2\", \"3\"]

    • Medium: [\"4\", \"5\", \"6\"]

    • High: [\"7\", \"8\", \"9\"]

    For more information, see Severity levels for GuardDuty findings.

  • type

  • updatedAt

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.outpostArn

  • resource.resourceType

  • resource.s3BucketDetails.publicAccess.effectivePermissions

  • resource.s3BucketDetails.name

  • resource.s3BucketDetails.tags.key

  • resource.s3BucketDetails.tags.value

  • resource.s3BucketDetails.type

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.errorCode

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV6

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.dnsRequestAction.domainWithSuffix

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV6

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.action.awsApiCallAction.remoteAccountDetails.affiliated

  • service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6

  • service.action.kubernetesApiCallAction.namespace

  • service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn

  • service.action.kubernetesApiCallAction.requestUri

  • service.action.kubernetesApiCallAction.statusCode

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.localIpDetails.ipAddressV6

  • service.action.networkConnectionAction.protocol

  • service.action.awsApiCallAction.serviceName

  • service.action.awsApiCallAction.remoteAccountDetails.accountId

  • service.additionalInfo.threatListName

  • service.resourceRole

  • resource.eksClusterDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.namespace

  • resource.kubernetesDetails.kubernetesUserDetails.username

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix

  • service.ebsVolumeScanDetails.scanId

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash

  • resource.ecsClusterDetails.name

  • resource.ecsClusterDetails.taskDetails.containers.image

  • resource.ecsClusterDetails.taskDetails.definitionArn

  • resource.containerDetails.image

  • resource.rdsDbInstanceDetails.dbInstanceIdentifier

  • resource.rdsDbInstanceDetails.dbClusterIdentifier

  • resource.rdsDbInstanceDetails.engine

  • resource.rdsDbUserDetails.user

  • resource.rdsDbInstanceDetails.tags.key

  • resource.rdsDbInstanceDetails.tags.value

  • service.runtimeDetails.process.executableSha256

  • service.runtimeDetails.process.name

  • service.runtimeDetails.process.name

  • resource.lambdaDetails.functionName

  • resource.lambdaDetails.functionArn

  • resource.lambdaDetails.tags.key

  • resource.lambdaDetails.tags.value

", + "documentation":"

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

  • accountId

  • id

  • region

  • severity

    To filter on the basis of severity, the API and CLI use the following input list for the FindingCriteria condition:

    • Low: [\"1\", \"2\", \"3\"]

    • Medium: [\"4\", \"5\", \"6\"]

    • High: [\"7\", \"8\"]

    • Critical: [\"9\", \"10\"]

    For more information, see Findings severity levels in the Amazon GuardDuty User Guide.

  • type

  • updatedAt

    Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds.

  • resource.accessKeyDetails.accessKeyId

  • resource.accessKeyDetails.principalId

  • resource.accessKeyDetails.userName

  • resource.accessKeyDetails.userType

  • resource.instanceDetails.iamInstanceProfile.id

  • resource.instanceDetails.imageId

  • resource.instanceDetails.instanceId

  • resource.instanceDetails.tags.key

  • resource.instanceDetails.tags.value

  • resource.instanceDetails.networkInterfaces.ipv6Addresses

  • resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress

  • resource.instanceDetails.networkInterfaces.publicDnsName

  • resource.instanceDetails.networkInterfaces.publicIp

  • resource.instanceDetails.networkInterfaces.securityGroups.groupId

  • resource.instanceDetails.networkInterfaces.securityGroups.groupName

  • resource.instanceDetails.networkInterfaces.subnetId

  • resource.instanceDetails.networkInterfaces.vpcId

  • resource.instanceDetails.outpostArn

  • resource.resourceType

  • resource.s3BucketDetails.publicAccess.effectivePermissions

  • resource.s3BucketDetails.name

  • resource.s3BucketDetails.tags.key

  • resource.s3BucketDetails.tags.value

  • resource.s3BucketDetails.type

  • service.action.actionType

  • service.action.awsApiCallAction.api

  • service.action.awsApiCallAction.callerType

  • service.action.awsApiCallAction.errorCode

  • service.action.awsApiCallAction.remoteIpDetails.city.cityName

  • service.action.awsApiCallAction.remoteIpDetails.country.countryName

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.awsApiCallAction.remoteIpDetails.ipAddressV6

  • service.action.awsApiCallAction.remoteIpDetails.organization.asn

  • service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg

  • service.action.awsApiCallAction.serviceName

  • service.action.dnsRequestAction.domain

  • service.action.dnsRequestAction.domainWithSuffix

  • service.action.networkConnectionAction.blocked

  • service.action.networkConnectionAction.connectionDirection

  • service.action.networkConnectionAction.localPortDetails.port

  • service.action.networkConnectionAction.protocol

  • service.action.networkConnectionAction.remoteIpDetails.city.cityName

  • service.action.networkConnectionAction.remoteIpDetails.country.countryName

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV4

  • service.action.networkConnectionAction.remoteIpDetails.ipAddressV6

  • service.action.networkConnectionAction.remoteIpDetails.organization.asn

  • service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg

  • service.action.networkConnectionAction.remotePortDetails.port

  • service.action.awsApiCallAction.remoteAccountDetails.affiliated

  • service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV4

  • service.action.kubernetesApiCallAction.remoteIpDetails.ipAddressV6

  • service.action.kubernetesApiCallAction.namespace

  • service.action.kubernetesApiCallAction.remoteIpDetails.organization.asn

  • service.action.kubernetesApiCallAction.requestUri

  • service.action.kubernetesApiCallAction.statusCode

  • service.action.networkConnectionAction.localIpDetails.ipAddressV4

  • service.action.networkConnectionAction.localIpDetails.ipAddressV6

  • service.action.networkConnectionAction.protocol

  • service.action.awsApiCallAction.serviceName

  • service.action.awsApiCallAction.remoteAccountDetails.accountId

  • service.additionalInfo.threatListName

  • service.resourceRole

  • resource.eksClusterDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.name

  • resource.kubernetesDetails.kubernetesWorkloadDetails.namespace

  • resource.kubernetesDetails.kubernetesUserDetails.username

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.image

  • resource.kubernetesDetails.kubernetesWorkloadDetails.containers.imagePrefix

  • service.ebsVolumeScanDetails.scanId

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.name

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.severity

  • service.ebsVolumeScanDetails.scanDetections.threatDetectedByName.threatNames.filePaths.hash

  • resource.ecsClusterDetails.name

  • resource.ecsClusterDetails.taskDetails.containers.image

  • resource.ecsClusterDetails.taskDetails.definitionArn

  • resource.containerDetails.image

  • resource.rdsDbInstanceDetails.dbInstanceIdentifier

  • resource.rdsDbInstanceDetails.dbClusterIdentifier

  • resource.rdsDbInstanceDetails.engine

  • resource.rdsDbUserDetails.user

  • resource.rdsDbInstanceDetails.tags.key

  • resource.rdsDbInstanceDetails.tags.value

  • service.runtimeDetails.process.executableSha256

  • service.runtimeDetails.process.name

  • service.runtimeDetails.process.name

  • resource.lambdaDetails.functionName

  • resource.lambdaDetails.functionArn

  • resource.lambdaDetails.tags.key

  • resource.lambdaDetails.tags.value

", "locationName":"findingCriteria" }, "ClientToken":{ @@ -3186,7 +3186,7 @@ "members":{ "Scans":{ "shape":"Scans", - "documentation":"

Contains information about malware scans.

", + "documentation":"

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

", "locationName":"scans" }, "NextToken":{ @@ -7113,7 +7113,7 @@ "members":{ "Name":{ "shape":"OrgFeatureAdditionalConfiguration", - "documentation":"

The name of the additional configuration that will be configured for the organization.

", + "documentation":"

The name of the additional configuration that will be configured for the organization. These values are applicable to only Runtime Monitoring protection plan.

", "locationName":"name" }, "AutoEnable":{ @@ -7122,14 +7122,14 @@ "locationName":"autoEnable" } }, - "documentation":"

A list of additional configurations which will be configured for the organization.

" + "documentation":"

A list of additional configurations which will be configured for the organization.

Additional configuration applies to only GuardDuty Runtime Monitoring protection plan.

" }, "OrganizationAdditionalConfigurationResult":{ "type":"structure", "members":{ "Name":{ "shape":"OrgFeatureAdditionalConfiguration", - "documentation":"

The name of the additional configuration that is configured for the member accounts within the organization.

", + "documentation":"

The name of the additional configuration that is configured for the member accounts within the organization. These values are applicable to only Runtime Monitoring protection plan.

", "locationName":"name" }, "AutoEnable":{ @@ -8520,7 +8520,7 @@ "members":{ "DetectorId":{ "shape":"DetectorId", - "documentation":"

The unique ID of the detector that the request is associated with.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", + "documentation":"

The unique ID of the detector that is associated with the request.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", "locationName":"detectorId" }, "AdminDetectorId":{ @@ -8540,7 +8540,7 @@ }, "FailureReason":{ "shape":"NonEmptyString", - "documentation":"

Represents the reason for FAILED scan status.

", + "documentation":"

Represents the reason for FAILED scan status.

", "locationName":"failureReason" }, "ScanStartTime":{ @@ -8594,7 +8594,7 @@ "locationName":"scanType" } }, - "documentation":"

Contains information about a malware scan.

" + "documentation":"

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

" }, "ScanCondition":{ "type":"structure", @@ -9863,7 +9863,7 @@ }, "AutoEnable":{ "shape":"Boolean", - "documentation":"

Represents whether or not to automatically enable member accounts in the organization.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable.

", + "documentation":"

Represents whether to automatically enable member accounts in the organization. This applies to only new member accounts, not the existing member accounts. When a new account joins the organization, the chosen features will be enabled for them by default.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use AutoEnableOrganizationMembers instead", "locationName":"autoEnable" diff --git a/botocore/data/route53domains/2014-05-15/service-2.json b/botocore/data/route53domains/2014-05-15/service-2.json index ea75d18949..9dc2611607 100644 --- a/botocore/data/route53domains/2014-05-15/service-2.json +++ b/botocore/data/route53domains/2014-05-15/service-2.json @@ -1782,7 +1782,7 @@ }, "LangCode":{ "type":"string", - "max":3 + "pattern":"|[A-Za-z]{2,3}" }, "ListDomainsAttributeName":{ "type":"string", @@ -2049,13 +2049,14 @@ "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", - "TRANSFER_ON_RENEW" + "TRANSFER_ON_RENEW", + "RESTORE_DOMAIN" ] }, "OperationTypeList":{ "type":"list", "member":{"shape":"OperationType"}, - "max":20 + "max":21 }, "Operator":{ "type":"string", @@ -2077,7 +2078,10 @@ "type":"string", "sensitive":true }, - "Price":{"type":"double"}, + "Price":{ + "type":"double", + "min":0.0 + }, "PriceWithCurrency":{ "type":"structure", "required":[ From e36e0e9542d67d74fbf07800e8394254939b5f34 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 12 Dec 2024 19:24:30 +0000 Subject: [PATCH 05/20] Update endpoints model --- botocore/data/endpoints.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 93ffb541d3..80f78bc40c 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -14403,6 +14403,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com", @@ -31875,6 +31876,18 @@ "us-isob-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "organizations.us-isob-east-1.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, "outposts" : { "endpoints" : { "us-isob-east-1" : { } From 696e322b2e2a1d13c01bde13a39e9fe02219910f Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 12 Dec 2024 19:25:24 +0000 Subject: [PATCH 06/20] Bumping version to 1.35.80 --- .changes/1.35.80.json | 27 +++++++++++++++++++ .../api-change-connect-81969.json | 5 ---- .../next-release/api-change-dms-40416.json | 5 ---- .../next-release/api-change-glue-72059.json | 5 ---- .../api-change-guardduty-79289.json | 5 ---- .../api-change-route53domains-82022.json | 5 ---- CHANGELOG.rst | 10 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 9 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 .changes/1.35.80.json delete mode 100644 .changes/next-release/api-change-connect-81969.json delete mode 100644 .changes/next-release/api-change-dms-40416.json delete mode 100644 .changes/next-release/api-change-glue-72059.json delete mode 100644 .changes/next-release/api-change-guardduty-79289.json delete mode 100644 .changes/next-release/api-change-route53domains-82022.json diff --git a/.changes/1.35.80.json b/.changes/1.35.80.json new file mode 100644 index 0000000000..39bc4239bb --- /dev/null +++ b/.changes/1.35.80.json @@ -0,0 +1,27 @@ +[ + { + "category": "``connect``", + "description": "Configure holidays and other overrides to hours of operation in advance. During contact handling, Amazon Connect automatically checks for overrides and provides customers with an appropriate flow path. After an override period passes call center automatically reverts to standard hours of operation.", + "type": "api-change" + }, + { + "category": "``dms``", + "description": "Add parameters to support for kerberos authentication. Add parameter for disabling the Unicode source filter with PostgreSQL settings. Add parameter to use large integer value with Kinesis/Kafka settings.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "To support customer-managed encryption in Data Quality to allow customers encrypt data with their own KMS key, we will add a DataQualityEncryption field to the SecurityConfiguration API where customers can provide their KMS keys.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Improved descriptions for certain APIs.", + "type": "api-change" + }, + { + "category": "``route53domains``", + "description": "This release includes the following API updates: added the enumeration type RESTORE_DOMAIN to the OperationType; constrained the Price attribute to non-negative values; updated the LangCode to allow 2 or 3 alphabetical characters.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-connect-81969.json b/.changes/next-release/api-change-connect-81969.json deleted file mode 100644 index d8c3f7c5ab..0000000000 --- a/.changes/next-release/api-change-connect-81969.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``connect``", - "description": "Configure holidays and other overrides to hours of operation in advance. During contact handling, Amazon Connect automatically checks for overrides and provides customers with an appropriate flow path. After an override period passes call center automatically reverts to standard hours of operation." -} diff --git a/.changes/next-release/api-change-dms-40416.json b/.changes/next-release/api-change-dms-40416.json deleted file mode 100644 index 6a8e0c77ed..0000000000 --- a/.changes/next-release/api-change-dms-40416.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``dms``", - "description": "Add parameters to support for kerberos authentication. Add parameter for disabling the Unicode source filter with PostgreSQL settings. Add parameter to use large integer value with Kinesis/Kafka settings." -} diff --git a/.changes/next-release/api-change-glue-72059.json b/.changes/next-release/api-change-glue-72059.json deleted file mode 100644 index 82886e248c..0000000000 --- a/.changes/next-release/api-change-glue-72059.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``glue``", - "description": "To support customer-managed encryption in Data Quality to allow customers encrypt data with their own KMS key, we will add a DataQualityEncryption field to the SecurityConfiguration API where customers can provide their KMS keys." -} diff --git a/.changes/next-release/api-change-guardduty-79289.json b/.changes/next-release/api-change-guardduty-79289.json deleted file mode 100644 index 3519d1fbcd..0000000000 --- a/.changes/next-release/api-change-guardduty-79289.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``guardduty``", - "description": "Improved descriptions for certain APIs." -} diff --git a/.changes/next-release/api-change-route53domains-82022.json b/.changes/next-release/api-change-route53domains-82022.json deleted file mode 100644 index 8e3e2bcc63..0000000000 --- a/.changes/next-release/api-change-route53domains-82022.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``route53domains``", - "description": "This release includes the following API updates: added the enumeration type RESTORE_DOMAIN to the OperationType; constrained the Price attribute to non-negative values; updated the LangCode to allow 2 or 3 alphabetical characters." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d0acd1cd8a..4f09c39c72 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.35.80 +======= + +* api-change:``connect``: Configure holidays and other overrides to hours of operation in advance. During contact handling, Amazon Connect automatically checks for overrides and provides customers with an appropriate flow path. After an override period passes call center automatically reverts to standard hours of operation. +* api-change:``dms``: Add parameters to support for kerberos authentication. Add parameter for disabling the Unicode source filter with PostgreSQL settings. Add parameter to use large integer value with Kinesis/Kafka settings. +* api-change:``glue``: To support customer-managed encryption in Data Quality to allow customers encrypt data with their own KMS key, we will add a DataQualityEncryption field to the SecurityConfiguration API where customers can provide their KMS keys. +* api-change:``guardduty``: Improved descriptions for certain APIs. +* api-change:``route53domains``: This release includes the following API updates: added the enumeration type RESTORE_DOMAIN to the OperationType; constrained the Price attribute to non-negative values; updated the LangCode to allow 2 or 3 alphabetical characters. + + 1.35.79 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 238f428f26..8e494bebf3 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.79' +__version__ = '1.35.80' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 827a4de42f..172c1b4848 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.79' +release = '1.35.80' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 994ac686f83b3cac42e879f8558dfe94dab9a023 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Fri, 13 Dec 2024 19:17:47 +0000 Subject: [PATCH 07/20] Update to latest models --- .../api-change-cloudhsmv2-90981.json | 5 + .../next-release/api-change-ec2-66466.json | 5 + .../next-release/api-change-eks-42910.json | 5 + .../next-release/api-change-logs-91140.json | 5 + .../api-change-mediaconnect-92309.json | 5 + .../api-change-networkmanager-71337.json | 5 + .../api-change-servicediscovery-99181.json | 5 + .../data/cloudhsmv2/2017-04-28/service-2.json | 34 ++++ botocore/data/ec2/2016-11-15/service-2.json | 22 ++- botocore/data/eks/2017-11-01/service-2.json | 35 +++- botocore/data/logs/2014-03-28/service-2.json | 4 +- .../mediaconnect/2018-11-14/service-2.json | 23 +++ .../networkmanager/2019-07-05/service-2.json | 2 +- .../2017-03-14/service-2.json | 164 +++++++++++++++++- 14 files changed, 301 insertions(+), 18 deletions(-) create mode 100644 .changes/next-release/api-change-cloudhsmv2-90981.json create mode 100644 .changes/next-release/api-change-ec2-66466.json create mode 100644 .changes/next-release/api-change-eks-42910.json create mode 100644 .changes/next-release/api-change-logs-91140.json create mode 100644 .changes/next-release/api-change-mediaconnect-92309.json create mode 100644 .changes/next-release/api-change-networkmanager-71337.json create mode 100644 .changes/next-release/api-change-servicediscovery-99181.json diff --git a/.changes/next-release/api-change-cloudhsmv2-90981.json b/.changes/next-release/api-change-cloudhsmv2-90981.json new file mode 100644 index 0000000000..d19277592b --- /dev/null +++ b/.changes/next-release/api-change-cloudhsmv2-90981.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloudhsmv2``", + "description": "Add support for Dual-Stack hsm2m.medium clusters. The customers will now be able to create hsm2m.medium clusters having both IPv4 and IPv6 connection capabilities by specifying a new param called NetworkType=DUALSTACK during cluster creation." +} diff --git a/.changes/next-release/api-change-ec2-66466.json b/.changes/next-release/api-change-ec2-66466.json new file mode 100644 index 0000000000..710fa89944 --- /dev/null +++ b/.changes/next-release/api-change-ec2-66466.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ec2``", + "description": "This release adds GroupId to the response for DeleteSecurityGroup." +} diff --git a/.changes/next-release/api-change-eks-42910.json b/.changes/next-release/api-change-eks-42910.json new file mode 100644 index 0000000000..f9395522eb --- /dev/null +++ b/.changes/next-release/api-change-eks-42910.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``eks``", + "description": "Add NodeRepairConfig in CreateNodegroupRequest and UpdateNodegroupConfigRequest" +} diff --git a/.changes/next-release/api-change-logs-91140.json b/.changes/next-release/api-change-logs-91140.json new file mode 100644 index 0000000000..09818c3e92 --- /dev/null +++ b/.changes/next-release/api-change-logs-91140.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``logs``", + "description": "Limit PutIntegration IntegrationName and ListIntegrations IntegrationNamePrefix parameters to 50 characters" +} diff --git a/.changes/next-release/api-change-mediaconnect-92309.json b/.changes/next-release/api-change-mediaconnect-92309.json new file mode 100644 index 0000000000..7211c2b6ec --- /dev/null +++ b/.changes/next-release/api-change-mediaconnect-92309.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mediaconnect``", + "description": "AWS Elemental MediaConnect Gateway now supports Source Specific Multicast (SSM) for ingress bridges. This enables you to specify a source IP address in addition to a multicast IP when creating or updating an ingress bridge source." +} diff --git a/.changes/next-release/api-change-networkmanager-71337.json b/.changes/next-release/api-change-networkmanager-71337.json new file mode 100644 index 0000000000..7a8098b396 --- /dev/null +++ b/.changes/next-release/api-change-networkmanager-71337.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``networkmanager``", + "description": "There was a sentence fragment in UpdateDirectConnectGatewayAttachment that was causing customer confusion as to whether it's an incomplete sentence or if it was a typo. Removed the fragment." +} diff --git a/.changes/next-release/api-change-servicediscovery-99181.json b/.changes/next-release/api-change-servicediscovery-99181.json new file mode 100644 index 0000000000..ba956013b8 --- /dev/null +++ b/.changes/next-release/api-change-servicediscovery-99181.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``servicediscovery``", + "description": "AWS Cloud Map now supports service-level attributes, allowing you to associate custom metadata directly with services. These attributes can be retrieved, updated, and deleted using the new GetServiceAttributes, UpdateServiceAttributes, and DeleteServiceAttributes API calls." +} diff --git a/botocore/data/cloudhsmv2/2017-04-28/service-2.json b/botocore/data/cloudhsmv2/2017-04-28/service-2.json index 18bbecd93b..55387b1d43 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/service-2.json +++ b/botocore/data/cloudhsmv2/2017-04-28/service-2.json @@ -305,6 +305,7 @@ {"shape":"CloudHsmAccessDeniedException"}, {"shape":"CloudHsmInternalFailureException"}, {"shape":"CloudHsmInvalidRequestException"}, + {"shape":"CloudHsmResourceLimitExceededException"}, {"shape":"CloudHsmResourceNotFoundException"}, {"shape":"CloudHsmServiceException"}, {"shape":"CloudHsmTagException"} @@ -509,6 +510,14 @@ "documentation":"

The request was rejected because it is not a valid request.

", "exception":true }, + "CloudHsmResourceLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

The request was rejected because it exceeds an CloudHSM limit.

", + "exception":true + }, "CloudHsmResourceNotFoundException":{ "type":"structure", "members":{ @@ -588,6 +597,10 @@ "shape":"VpcId", "documentation":"

The identifier (ID) of the virtual private cloud (VPC) that contains the cluster.

" }, + "NetworkType":{ + "shape":"NetworkType", + "documentation":"

The cluster's NetworkType can be set to either IPV4 (which is the default) or DUALSTACK. When set to IPV4, communication between your application and the Hardware Security Modules (HSMs) is restricted to the IPv4 protocol only. In contrast, the DUALSTACK network type enables communication over both the IPv4 and IPv6 protocols. To use the DUALSTACK option, you'll need to configure your Virtual Private Cloud (VPC) and subnets to support both IPv4 and IPv6. This involves adding IPv6 Classless Inter-Domain Routing (CIDR) blocks to the existing IPv4 CIDR blocks in your subnets. The choice between IPV4 and DUALSTACK network types determines the flexibility of the network addressing setup for your cluster. The DUALSTACK option provides more flexibility by allowing both IPv4 and IPv6 communication.

" + }, "Certificates":{ "shape":"Certificates", "documentation":"

Contains one or more certificates or a certificate signing request (CSR).

" @@ -623,6 +636,8 @@ "INITIALIZED", "ACTIVE", "UPDATE_IN_PROGRESS", + "MODIFY_IN_PROGRESS", + "ROLLBACK_IN_PROGRESS", "DELETE_IN_PROGRESS", "DELETED", "DEGRADED" @@ -690,6 +705,10 @@ "shape":"SubnetIds", "documentation":"

The identifiers (IDs) of the subnets where you are creating the cluster. You must specify at least one subnet. If you specify multiple subnets, they must meet the following criteria:

  • All subnets must be in the same virtual private cloud (VPC).

  • You can specify only one subnet per Availability Zone.

" }, + "NetworkType":{ + "shape":"NetworkType", + "documentation":"

The NetworkType to create a cluster with. The allowed values are IPV4 and DUALSTACK.

" + }, "TagList":{ "shape":"TagList", "documentation":"

Tags to apply to the CloudHSM cluster during creation.

" @@ -985,6 +1004,10 @@ "shape":"IpAddress", "documentation":"

The IP address of the HSM's elastic network interface (ENI).

" }, + "EniIpV6":{ + "shape":"IpV6Address", + "documentation":"

The IPv6 address (if any) of the HSM's elastic network interface (ENI).

" + }, "HsmId":{ "shape":"HsmId", "documentation":"

The HSM's identifier (ID).

" @@ -1062,6 +1085,10 @@ "type":"string", "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" }, + "IpV6Address":{ + "type":"string", + "max":100 + }, "ListTagsRequest":{ "type":"structure", "required":["ResourceId"], @@ -1145,6 +1172,13 @@ "Cluster":{"shape":"Cluster"} } }, + "NetworkType":{ + "type":"string", + "enum":[ + "IPV4", + "DUALSTACK" + ] + }, "NextToken":{ "type":"string", "max":256, diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index ed8d7ccf21..c3467f07ab 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -1855,6 +1855,7 @@ "requestUri":"/" }, "input":{"shape":"DeleteSecurityGroupRequest"}, + "output":{"shape":"DeleteSecurityGroupResult"}, "documentation":"

Deletes a security group.

If you attempt to delete a security group that is associated with an instance or network interface, is referenced by another security group in the same VPC, or has a VPC association, the operation fails with DependencyViolation.

" }, "DeleteSnapshot":{ @@ -6388,7 +6389,7 @@ }, "input":{"shape":"StartDeclarativePoliciesReportRequest"}, "output":{"shape":"StartDeclarativePoliciesReportResult"}, - "documentation":"

Generates an account status report. The report is generated asynchronously, and can take several hours to complete.

The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization).

The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values):

s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv

Prerequisites for generating a report

  • The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization.

  • An S3 bucket must be available before generating the report (you can create a new one or use an existing one), and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under .

  • Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide.

  • Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error.

For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.

" + "documentation":"

Generates an account status report. The report is generated asynchronously, and can take several hours to complete.

The report provides the current status of all attributes supported by declarative policies for the accounts within the specified scope. The scope is determined by the specified TargetId, which can represent an individual account, or all the accounts that fall under the specified organizational unit (OU) or root (the entire Amazon Web Services Organization).

The report is saved to your specified S3 bucket, using the following path structure (with the italicized placeholders representing your specific values):

s3://amzn-s3-demo-bucket/your-optional-s3-prefix/ec2_targetId_reportId_yyyyMMddThhmmZ.csv

Prerequisites for generating a report

  • The StartDeclarativePoliciesReport API can only be called by the management account or delegated administrators for the organization.

  • An S3 bucket must be available before generating the report (you can create a new one or use an existing one), it must be in the same Region where the report generation request is made, and it must have an appropriate bucket policy. For a sample S3 policy, see Sample Amazon S3 policy under .

  • Trusted access must be enabled for the service for which the declarative policy will enforce a baseline configuration. If you use the Amazon Web Services Organizations console, this is done automatically when you enable declarative policies. The API uses the following service principal to identify the EC2 service: ec2.amazonaws.com. For more information on how to enable trusted access with the Amazon Web Services CLI and Amazon Web Services SDKs, see Using Organizations with other Amazon Web Services services in the Amazon Web Services Organizations User Guide.

  • Only one report per organization can be generated at a time. Attempting to generate a report while another is in progress will result in an error.

For more information, including the required IAM permissions to run this API, see Generating the account status report for declarative policies in the Amazon Web Services Organizations User Guide.

" }, "StartInstances":{ "name":"StartInstances", @@ -19144,6 +19145,21 @@ } } }, + "DeleteSecurityGroupResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "documentation":"

Returns true if the request succeeds; otherwise, returns an error.

", + "locationName":"return" + }, + "GroupId":{ + "shape":"SecurityGroupId", + "documentation":"

The ID of the deleted security group.

", + "locationName":"groupId" + } + } + }, "DeleteSnapshotRequest":{ "type":"structure", "required":["SnapshotId"], @@ -31409,7 +31425,7 @@ "locationName":"Value" } }, - "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

If you specify multiple filters, the filters are joined with an AND, and the request returns only results that match all of the specified filters.

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

If you specify multiple filters, the filters are joined with an AND, and the request returns only results that match all of the specified filters.

For more information, see List and filter using the CLI and API in the Amazon EC2 User Guide.

" }, "FilterList":{ "type":"list", @@ -58278,7 +58294,7 @@ }, "S3Bucket":{ "shape":"String", - "documentation":"

The name of the S3 bucket where the report will be saved.

" + "documentation":"

The name of the S3 bucket where the report will be saved. The bucket must be in the same Region where the report generation request is made.

" }, "S3Prefix":{ "shape":"String", diff --git a/botocore/data/eks/2017-11-01/service-2.json b/botocore/data/eks/2017-11-01/service-2.json index 1a80210d64..c389546bce 100644 --- a/botocore/data/eks/2017-11-01/service-2.json +++ b/botocore/data/eks/2017-11-01/service-2.json @@ -2279,6 +2279,10 @@ "shape":"NodegroupUpdateConfig", "documentation":"

The node group update configuration.

" }, + "nodeRepairConfig":{ + "shape":"NodeRepairConfig", + "documentation":"

The node auto repair configuration for the node group.

" + }, "capacityType":{ "shape":"CapacityTypes", "documentation":"

The capacity type for your node group.

" @@ -4341,6 +4345,16 @@ }, "documentation":"

Information about an Amazon EKS add-on from the Amazon Web Services Marketplace.

" }, + "NodeRepairConfig":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"BoxedBoolean", + "documentation":"

Specifies whether to enable node auto repair for the node group. Node auto repair is disabled by default.

" + } + }, + "documentation":"

The node auto repair configuration for the node group.

" + }, "Nodegroup":{ "type":"structure", "members":{ @@ -4428,6 +4442,10 @@ "shape":"NodegroupUpdateConfig", "documentation":"

The node group update configuration.

" }, + "nodeRepairConfig":{ + "shape":"NodeRepairConfig", + "documentation":"

The node auto repair configuration for the node group.

" + }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", "documentation":"

If a launch template was used to create the node group, then this is the launch template that was used.

" @@ -4863,11 +4881,11 @@ "members":{ "remoteNodeNetworks":{ "shape":"RemoteNodeNetworkList", - "documentation":"

The list of network CIDRs that can contain hybrid nodes.

" + "documentation":"

The list of network CIDRs that can contain hybrid nodes.

These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

  • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

  • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

  • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

  • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

" }, "remotePodNetworks":{ "shape":"RemotePodNetworkList", - "documentation":"

The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "documentation":"

The list of network CIDRs that can contain pods that run Kubernetes webhooks on hybrid nodes.

These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

" } }, "documentation":"

The configuration in the cluster for EKS Hybrid Nodes. You can't change or update this configuration after the cluster is created.

" @@ -4891,10 +4909,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

A network CIDR that can contain hybrid nodes.

" + "documentation":"

A network CIDR that can contain hybrid nodes.

These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

  • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

  • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

  • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

  • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

" } }, - "documentation":"

A network CIDR that can contain hybrid nodes.

" + "documentation":"

A network CIDR that can contain hybrid nodes.

These CIDR blocks define the expected IP address range of the hybrid nodes that join the cluster. These blocks are typically determined by your network administrator.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

  • Each block must have a route to the VPC that uses the VPC CIDR blocks, not public IPs or Elastic IPs. There are many options including Transit Gateway, Site-to-Site VPN, or Direct Connect.

  • Each host must allow outbound connection to the EKS cluster control plane on TCP ports 443 and 10250.

  • Each host must allow inbound connection from the EKS cluster control plane on TCP port 10250 for logs, exec and port-forward operations.

  • Each host must allow TCP and UDP network connectivity to and from other hosts that are running CoreDNS on UDP port 53 for service and pod DNS names.

" }, "RemoteNodeNetworkList":{ "type":"list", @@ -4906,10 +4924,10 @@ "members":{ "cidrs":{ "shape":"StringList", - "documentation":"

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "documentation":"

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

" } }, - "documentation":"

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

" + "documentation":"

A network CIDR that can contain pods that run Kubernetes webhooks on hybrid nodes.

These CIDR blocks are determined by configuring your Container Network Interface (CNI) plugin. We recommend the Calico CNI or Cilium CNI. Note that the Amazon VPC CNI plugin for Kubernetes isn't available for on-premises and edge locations.

Enter one or more IPv4 CIDR blocks in decimal dotted-quad notation (for example, 10.2.0.0/16).

It must satisfy the following requirements:

  • Each block must be within an IPv4 RFC-1918 network range. Minimum allowed size is /24, maximum allowed size is /8. Publicly-routable addresses aren't supported.

  • Each block cannot overlap with the range of the VPC CIDR blocks for your EKS resources, or the block of the Kubernetes service IP range.

" }, "RemotePodNetworkList":{ "type":"list", @@ -5520,6 +5538,10 @@ "shape":"NodegroupUpdateConfig", "documentation":"

The node group update configuration.

" }, + "nodeRepairConfig":{ + "shape":"NodeRepairConfig", + "documentation":"

The node auto repair configuration for the node group.

" + }, "clientRequestToken":{ "shape":"String", "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", @@ -5621,6 +5643,7 @@ "ResolveConflicts", "MaxUnavailable", "MaxUnavailablePercentage", + "NodeRepairEnabled", "ConfigurationValues", "SecurityGroups", "Subnets", diff --git a/botocore/data/logs/2014-03-28/service-2.json b/botocore/data/logs/2014-03-28/service-2.json index 6da54712c8..b31f879823 100644 --- a/botocore/data/logs/2014-03-28/service-2.json +++ b/botocore/data/logs/2014-03-28/service-2.json @@ -3990,13 +3990,13 @@ }, "IntegrationName":{ "type":"string", - "max":256, + "max":50, "min":1, "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, "IntegrationNamePrefix":{ "type":"string", - "max":256, + "max":50, "min":1, "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, diff --git a/botocore/data/mediaconnect/2018-11-14/service-2.json b/botocore/data/mediaconnect/2018-11-14/service-2.json index 8305e3575a..b8953a842e 100644 --- a/botocore/data/mediaconnect/2018-11-14/service-2.json +++ b/botocore/data/mediaconnect/2018-11-14/service-2.json @@ -2258,6 +2258,10 @@ "locationName": "multicastIp", "documentation": "The network source multicast IP." }, + "MulticastSourceSettings": { + "shape": "MulticastSourceSettings", + "locationName": "multicastSourceSettings" + }, "Name": { "shape": "__string", "locationName": "name", @@ -2930,6 +2934,10 @@ "locationName": "multicastIp", "documentation": "The network source multicast IP." }, + "MulticastSourceSettings": { + "shape": "MulticastSourceSettings", + "locationName": "multicastSourceSettings" + }, "Name": { "shape": "__string", "locationName": "name", @@ -5171,6 +5179,17 @@ }, "documentation": "The settings for source monitoring." }, + "MulticastSourceSettings": { + "type": "structure", + "members": { + "MulticastSourceIp": { + "shape": "__string", + "locationName": "multicastSourceIp", + "documentation": "The IP address of the source for source-specific multicast (SSM)." + } + }, + "documentation": "The settings related to the multicast source." + }, "NetworkInterfaceType": { "type": "string", "enum": [ @@ -6488,6 +6507,10 @@ "locationName": "multicastIp", "documentation": "The network source multicast IP." }, + "MulticastSourceSettings": { + "shape": "MulticastSourceSettings", + "locationName": "multicastSourceSettings" + }, "NetworkName": { "shape": "__string", "locationName": "networkName", diff --git a/botocore/data/networkmanager/2019-07-05/service-2.json b/botocore/data/networkmanager/2019-07-05/service-2.json index 1def86ce27..e64ee0b937 100644 --- a/botocore/data/networkmanager/2019-07-05/service-2.json +++ b/botocore/data/networkmanager/2019-07-05/service-2.json @@ -7261,7 +7261,7 @@ }, "EdgeLocations":{ "shape":"ExternalRegionCodeList", - "documentation":"

One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments. Do

" + "documentation":"

One or more edge locations to update for the Direct Connect gateway attachment. The updated array of edge locations overwrites the previous array of locations. EdgeLocations is only used for Direct Connect gateway attachments.

" } } }, diff --git a/botocore/data/servicediscovery/2017-03-14/service-2.json b/botocore/data/servicediscovery/2017-03-14/service-2.json index 77f38f5622..852fdfb05b 100644 --- a/botocore/data/servicediscovery/2017-03-14/service-2.json +++ b/botocore/data/servicediscovery/2017-03-14/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"servicediscovery", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"ServiceDiscovery", "serviceFullName":"AWS Cloud Map", "serviceId":"ServiceDiscovery", "signatureVersion":"v4", "targetPrefix":"Route53AutoNaming_v20170314", - "uid":"servicediscovery-2017-03-14" + "uid":"servicediscovery-2017-03-14", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateHttpNamespace":{ @@ -110,7 +112,21 @@ {"shape":"ServiceNotFound"}, {"shape":"ResourceInUse"} ], - "documentation":"

Deletes a specified service. If the service still contains one or more registered instances, the request fails.

" + "documentation":"

Deletes a specified service and all associated service attributes. If the service still contains one or more registered instances, the request fails.

" + }, + "DeleteServiceAttributes":{ + "name":"DeleteServiceAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteServiceAttributesRequest"}, + "output":{"shape":"DeleteServiceAttributesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"ServiceNotFound"} + ], + "documentation":"

Deletes specific attributes associated with a service.

" }, "DeregisterInstance":{ "name":"DeregisterInstance", @@ -235,6 +251,20 @@ ], "documentation":"

Gets the settings for a specified service.

" }, + "GetServiceAttributes":{ + "name":"GetServiceAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetServiceAttributesRequest"}, + "output":{"shape":"GetServiceAttributesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"ServiceNotFound"} + ], + "documentation":"

Returns the attributes associated with a specified service.

" + }, "ListInstances":{ "name":"ListInstances", "http":{ @@ -425,6 +455,21 @@ {"shape":"ServiceNotFound"} ], "documentation":"

Submits a request to perform the following operations:

  • Update the TTL setting for existing DnsRecords configurations

  • Add, update, or delete HealthCheckConfig for a specified service

    You can't add, update, or delete a HealthCheckCustomConfig configuration.

For public and private DNS namespaces, note the following:

  • If you omit any existing DnsRecords or HealthCheckConfig configurations from an UpdateService request, the configurations are deleted from the service.

  • If you omit an existing HealthCheckCustomConfig configuration from an UpdateService request, the configuration isn't deleted from the service.

When you update settings for a service, Cloud Map also updates the corresponding settings in all the records and health checks that were created by using the specified service.

" + }, + "UpdateServiceAttributes":{ + "name":"UpdateServiceAttributes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateServiceAttributesRequest"}, + "output":{"shape":"UpdateServiceAttributesResponse"}, + "errors":[ + {"shape":"InvalidInput"}, + {"shape":"ServiceNotFound"}, + {"shape":"ServiceAttributesLimitExceededException"} + ], + "documentation":"

Submits a request to update a specified service to add service-level attributes.

" } }, "shapes":{ @@ -650,6 +695,28 @@ } } }, + "DeleteServiceAttributesRequest":{ + "type":"structure", + "required":[ + "ServiceId", + "Attributes" + ], + "members":{ + "ServiceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the service from which the attributes will be deleted.

" + }, + "Attributes":{ + "shape":"ServiceAttributeKeyList", + "documentation":"

A list of keys corresponding to each attribute that you want to delete.

" + } + } + }, + "DeleteServiceAttributesResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteServiceRequest":{ "type":"structure", "required":["Id"], @@ -784,10 +851,10 @@ }, "DnsRecords":{ "shape":"DnsRecordList", - "documentation":"

An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map to create when you register an instance.

" + "documentation":"

An array that contains one DnsRecord object for each Route 53 DNS record that you want Cloud Map to create when you register an instance.

The record type of a service specified in a DnsRecord object can't be updated. To change a record type, you need to delete the service and recreate it with a new DnsConfig.

" } }, - "documentation":"

A complex type that contains information about the Amazon Route 53 DNS records that you want Cloud Map to create when you register an instance.

The record types of a service can only be changed by deleting the service and recreating it with a new Dnsconfig.

" + "documentation":"

A complex type that contains information about the Amazon Route 53 DNS records that you want Cloud Map to create when you register an instance.

" }, "DnsConfigChange":{ "type":"structure", @@ -971,6 +1038,25 @@ } } }, + "GetServiceAttributesRequest":{ + "type":"structure", + "required":["ServiceId"], + "members":{ + "ServiceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the service that you want to get attributes for.

" + } + } + }, + "GetServiceAttributesResponse":{ + "type":"structure", + "members":{ + "ServiceAttributes":{ + "shape":"ServiceAttributes", + "documentation":"

A complex type that contains the service ARN and a list of attribute key-value pairs associated with the service.

" + } + } + }, "GetServiceRequest":{ "type":"structure", "required":["Id"], @@ -1405,7 +1491,8 @@ }, "NamespaceName":{ "type":"string", - "max":1024 + "max":1024, + "pattern":"^[!-~]{1,1024}$" }, "NamespaceNameHttp":{ "type":"string", @@ -1946,6 +2033,49 @@ "documentation":"

The service can't be created because a service with the same name already exists.

", "exception":true }, + "ServiceAttributeKey":{ + "type":"string", + "max":255 + }, + "ServiceAttributeKeyList":{ + "type":"list", + "member":{"shape":"ServiceAttributeKey"}, + "max":30, + "min":1 + }, + "ServiceAttributeValue":{ + "type":"string", + "max":1024 + }, + "ServiceAttributes":{ + "type":"structure", + "members":{ + "ServiceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the service that the attributes are associated with.

" + }, + "Attributes":{ + "shape":"ServiceAttributesMap", + "documentation":"

A string map that contains the following information for the service that you specify in ServiceArn:

  • The attributes that apply to the service.

  • For each attribute, the applicable value.

You can specify a total of 30 attributes.

" + } + }, + "documentation":"

A complex type that contains information about attributes associated with a specific service.

" + }, + "ServiceAttributesLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The attribute can't be added to the service because you've exceeded the quota for the number of attributes you can add to a service.

", + "exception":true + }, + "ServiceAttributesMap":{ + "type":"map", + "key":{"shape":"ServiceAttributeKey"}, + "value":{"shape":"ServiceAttributeValue"}, + "max":30, + "min":1 + }, "ServiceChange":{ "type":"structure", "members":{ @@ -2280,6 +2410,28 @@ } } }, + "UpdateServiceAttributesRequest":{ + "type":"structure", + "required":[ + "ServiceId", + "Attributes" + ], + "members":{ + "ServiceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the service that you want to update.

" + }, + "Attributes":{ + "shape":"ServiceAttributesMap", + "documentation":"

A string map that contains attribute key-value pairs.

" + } + } + }, + "UpdateServiceAttributesResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateServiceRequest":{ "type":"structure", "required":[ @@ -2293,7 +2445,7 @@ }, "Service":{ "shape":"ServiceChange", - "documentation":"

A complex type that contains the new settings for the service.

" + "documentation":"

A complex type that contains the new settings for the service. You can specify a maximum of 30 attributes (key-value pairs).

" } } }, From c4dd25dbbb309332adaaf91f23fc40c194c64744 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Fri, 13 Dec 2024 19:17:48 +0000 Subject: [PATCH 08/20] Update endpoints model --- botocore/data/endpoints.json | 66 +++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 80f78bc40c..05a28b33ff 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -22079,6 +22079,28 @@ } } }, + "trustedadvisor" : { + "endpoints" : { + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "trustedadvisor-fips.us-east-1.api.aws" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "trustedadvisor-fips.us-east-2.api.aws" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "trustedadvisor-fips.us-west-2.api.aws" + } + } + }, "verifiedpermissions" : { "endpoints" : { "af-south-1" : { }, @@ -26580,9 +26602,6 @@ "endpoints" : { "us-gov-east-1" : { "variants" : [ { - "hostname" : "dlm-fips.us-gov-east-1.api.aws", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "dlm.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -26596,9 +26615,6 @@ }, "us-gov-west-1" : { "variants" : [ { - "hostname" : "dlm-fips.us-gov-west-1.api.aws", - "tags" : [ "dualstack", "fips" ] - }, { "hostname" : "dlm.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] } ] @@ -30639,6 +30655,12 @@ } } }, + "codebuild" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "codedeploy" : { "endpoints" : { "us-iso-east-1" : { }, @@ -30702,18 +30724,8 @@ }, "dlm" : { "endpoints" : { - "us-iso-east-1" : { - "variants" : [ { - "hostname" : "dlm-fips.us-iso-east-1.api.aws.ic.gov", - "tags" : [ "dualstack", "fips" ] - } ] - }, - "us-iso-west-1" : { - "variants" : [ { - "hostname" : "dlm-fips.us-iso-west-1.api.aws.ic.gov", - "tags" : [ "dualstack", "fips" ] - } ] - } + "us-iso-east-1" : { }, + "us-iso-west-1" : { } } }, "dms" : { @@ -31328,6 +31340,12 @@ } } }, + "scheduler" : { + "endpoints" : { + "us-iso-east-1" : { }, + "us-iso-west-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-iso-east-1" : { }, @@ -31615,12 +31633,7 @@ }, "dlm" : { "endpoints" : { - "us-isob-east-1" : { - "variants" : [ { - "hostname" : "dlm-fips.us-isob-east-1.api.aws.scloud", - "tags" : [ "dualstack", "fips" ] - } ] - } + "us-isob-east-1" : { } } }, "dms" : { @@ -32045,6 +32058,11 @@ } } }, + "scheduler" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "secretsmanager" : { "endpoints" : { "us-isob-east-1" : { } From 53e39986929170799e8fbb2c60f60a2f0482e0f6 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Fri, 13 Dec 2024 19:18:57 +0000 Subject: [PATCH 09/20] Bumping version to 1.35.81 --- .changes/1.35.81.json | 37 +++++++++++++++++++ .../api-change-cloudhsmv2-90981.json | 5 --- .../next-release/api-change-ec2-66466.json | 5 --- .../next-release/api-change-eks-42910.json | 5 --- .../next-release/api-change-logs-91140.json | 5 --- .../api-change-mediaconnect-92309.json | 5 --- .../api-change-networkmanager-71337.json | 5 --- .../api-change-servicediscovery-99181.json | 5 --- CHANGELOG.rst | 12 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 11 files changed, 51 insertions(+), 37 deletions(-) create mode 100644 .changes/1.35.81.json delete mode 100644 .changes/next-release/api-change-cloudhsmv2-90981.json delete mode 100644 .changes/next-release/api-change-ec2-66466.json delete mode 100644 .changes/next-release/api-change-eks-42910.json delete mode 100644 .changes/next-release/api-change-logs-91140.json delete mode 100644 .changes/next-release/api-change-mediaconnect-92309.json delete mode 100644 .changes/next-release/api-change-networkmanager-71337.json delete mode 100644 .changes/next-release/api-change-servicediscovery-99181.json diff --git a/.changes/1.35.81.json b/.changes/1.35.81.json new file mode 100644 index 0000000000..1e068976db --- /dev/null +++ b/.changes/1.35.81.json @@ -0,0 +1,37 @@ +[ + { + "category": "``cloudhsmv2``", + "description": "Add support for Dual-Stack hsm2m.medium clusters. The customers will now be able to create hsm2m.medium clusters having both IPv4 and IPv6 connection capabilities by specifying a new param called NetworkType=DUALSTACK during cluster creation.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds GroupId to the response for DeleteSecurityGroup.", + "type": "api-change" + }, + { + "category": "``eks``", + "description": "Add NodeRepairConfig in CreateNodegroupRequest and UpdateNodegroupConfigRequest", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Limit PutIntegration IntegrationName and ListIntegrations IntegrationNamePrefix parameters to 50 characters", + "type": "api-change" + }, + { + "category": "``mediaconnect``", + "description": "AWS Elemental MediaConnect Gateway now supports Source Specific Multicast (SSM) for ingress bridges. This enables you to specify a source IP address in addition to a multicast IP when creating or updating an ingress bridge source.", + "type": "api-change" + }, + { + "category": "``networkmanager``", + "description": "There was a sentence fragment in UpdateDirectConnectGatewayAttachment that was causing customer confusion as to whether it's an incomplete sentence or if it was a typo. Removed the fragment.", + "type": "api-change" + }, + { + "category": "``servicediscovery``", + "description": "AWS Cloud Map now supports service-level attributes, allowing you to associate custom metadata directly with services. These attributes can be retrieved, updated, and deleted using the new GetServiceAttributes, UpdateServiceAttributes, and DeleteServiceAttributes API calls.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-cloudhsmv2-90981.json b/.changes/next-release/api-change-cloudhsmv2-90981.json deleted file mode 100644 index d19277592b..0000000000 --- a/.changes/next-release/api-change-cloudhsmv2-90981.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cloudhsmv2``", - "description": "Add support for Dual-Stack hsm2m.medium clusters. The customers will now be able to create hsm2m.medium clusters having both IPv4 and IPv6 connection capabilities by specifying a new param called NetworkType=DUALSTACK during cluster creation." -} diff --git a/.changes/next-release/api-change-ec2-66466.json b/.changes/next-release/api-change-ec2-66466.json deleted file mode 100644 index 710fa89944..0000000000 --- a/.changes/next-release/api-change-ec2-66466.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ec2``", - "description": "This release adds GroupId to the response for DeleteSecurityGroup." -} diff --git a/.changes/next-release/api-change-eks-42910.json b/.changes/next-release/api-change-eks-42910.json deleted file mode 100644 index f9395522eb..0000000000 --- a/.changes/next-release/api-change-eks-42910.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``eks``", - "description": "Add NodeRepairConfig in CreateNodegroupRequest and UpdateNodegroupConfigRequest" -} diff --git a/.changes/next-release/api-change-logs-91140.json b/.changes/next-release/api-change-logs-91140.json deleted file mode 100644 index 09818c3e92..0000000000 --- a/.changes/next-release/api-change-logs-91140.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``logs``", - "description": "Limit PutIntegration IntegrationName and ListIntegrations IntegrationNamePrefix parameters to 50 characters" -} diff --git a/.changes/next-release/api-change-mediaconnect-92309.json b/.changes/next-release/api-change-mediaconnect-92309.json deleted file mode 100644 index 7211c2b6ec..0000000000 --- a/.changes/next-release/api-change-mediaconnect-92309.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mediaconnect``", - "description": "AWS Elemental MediaConnect Gateway now supports Source Specific Multicast (SSM) for ingress bridges. This enables you to specify a source IP address in addition to a multicast IP when creating or updating an ingress bridge source." -} diff --git a/.changes/next-release/api-change-networkmanager-71337.json b/.changes/next-release/api-change-networkmanager-71337.json deleted file mode 100644 index 7a8098b396..0000000000 --- a/.changes/next-release/api-change-networkmanager-71337.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``networkmanager``", - "description": "There was a sentence fragment in UpdateDirectConnectGatewayAttachment that was causing customer confusion as to whether it's an incomplete sentence or if it was a typo. Removed the fragment." -} diff --git a/.changes/next-release/api-change-servicediscovery-99181.json b/.changes/next-release/api-change-servicediscovery-99181.json deleted file mode 100644 index ba956013b8..0000000000 --- a/.changes/next-release/api-change-servicediscovery-99181.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``servicediscovery``", - "description": "AWS Cloud Map now supports service-level attributes, allowing you to associate custom metadata directly with services. These attributes can be retrieved, updated, and deleted using the new GetServiceAttributes, UpdateServiceAttributes, and DeleteServiceAttributes API calls." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4f09c39c72..3c8f3837bd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.35.81 +======= + +* api-change:``cloudhsmv2``: Add support for Dual-Stack hsm2m.medium clusters. The customers will now be able to create hsm2m.medium clusters having both IPv4 and IPv6 connection capabilities by specifying a new param called NetworkType=DUALSTACK during cluster creation. +* api-change:``ec2``: This release adds GroupId to the response for DeleteSecurityGroup. +* api-change:``eks``: Add NodeRepairConfig in CreateNodegroupRequest and UpdateNodegroupConfigRequest +* api-change:``logs``: Limit PutIntegration IntegrationName and ListIntegrations IntegrationNamePrefix parameters to 50 characters +* api-change:``mediaconnect``: AWS Elemental MediaConnect Gateway now supports Source Specific Multicast (SSM) for ingress bridges. This enables you to specify a source IP address in addition to a multicast IP when creating or updating an ingress bridge source. +* api-change:``networkmanager``: There was a sentence fragment in UpdateDirectConnectGatewayAttachment that was causing customer confusion as to whether it's an incomplete sentence or if it was a typo. Removed the fragment. +* api-change:``servicediscovery``: AWS Cloud Map now supports service-level attributes, allowing you to associate custom metadata directly with services. These attributes can be retrieved, updated, and deleted using the new GetServiceAttributes, UpdateServiceAttributes, and DeleteServiceAttributes API calls. + + 1.35.80 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 8e494bebf3..c72c9ac84d 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.80' +__version__ = '1.35.81' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 172c1b4848..18c8d2506c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.80' +release = '1.35.81' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 69f31e7c22ac8ebccb1837ecdf71d8c53233742a Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Mon, 16 Dec 2024 19:08:19 +0000 Subject: [PATCH 10/20] Update to latest models --- .../next-release/api-change-cloud9-3690.json | 5 ++ .../next-release/api-change-dlm-71242.json | 5 ++ .../next-release/api-change-ec2-45209.json | 5 ++ .../api-change-greengrassv2-86150.json | 5 ++ .../api-change-medialive-49946.json | 5 ++ .../next-release/api-change-rds-65326.json | 5 ++ .../data/cloud9/2017-09-23/service-2.json | 34 ++++++----- botocore/data/dlm/2018-01-12/service-2.json | 24 +++++--- botocore/data/ec2/2016-11-15/service-2.json | 33 ++++++++-- .../greengrassv2/2020-11-30/service-2.json | 32 +++++++++- .../data/medialive/2017-10-14/service-2.json | 61 ++++++++++++++++--- botocore/data/rds/2014-10-31/service-2.json | 1 + 12 files changed, 174 insertions(+), 41 deletions(-) create mode 100644 .changes/next-release/api-change-cloud9-3690.json create mode 100644 .changes/next-release/api-change-dlm-71242.json create mode 100644 .changes/next-release/api-change-ec2-45209.json create mode 100644 .changes/next-release/api-change-greengrassv2-86150.json create mode 100644 .changes/next-release/api-change-medialive-49946.json create mode 100644 .changes/next-release/api-change-rds-65326.json diff --git a/.changes/next-release/api-change-cloud9-3690.json b/.changes/next-release/api-change-cloud9-3690.json new file mode 100644 index 0000000000..2925fa74c0 --- /dev/null +++ b/.changes/next-release/api-change-cloud9-3690.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloud9``", + "description": "Added information about Ubuntu 18.04 will be removed from the available imageIds for Cloud9 because Ubuntu 18.04 has ended standard support on May 31, 2023." +} diff --git a/.changes/next-release/api-change-dlm-71242.json b/.changes/next-release/api-change-dlm-71242.json new file mode 100644 index 0000000000..6f093950d3 --- /dev/null +++ b/.changes/next-release/api-change-dlm-71242.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``dlm``", + "description": "This release adds support for Local Zones in Amazon Data Lifecycle Manager EBS snapshot lifecycle policies." +} diff --git a/.changes/next-release/api-change-ec2-45209.json b/.changes/next-release/api-change-ec2-45209.json new file mode 100644 index 0000000000..59616de417 --- /dev/null +++ b/.changes/next-release/api-change-ec2-45209.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ec2``", + "description": "This release adds support for EBS local snapshots in AWS Dedicated Local Zones, which allows you to store snapshots of EBS volumes locally in Dedicated Local Zones." +} diff --git a/.changes/next-release/api-change-greengrassv2-86150.json b/.changes/next-release/api-change-greengrassv2-86150.json new file mode 100644 index 0000000000..0420047e3f --- /dev/null +++ b/.changes/next-release/api-change-greengrassv2-86150.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``greengrassv2``", + "description": "Add support for runtime in GetCoreDevice and ListCoreDevices APIs." +} diff --git a/.changes/next-release/api-change-medialive-49946.json b/.changes/next-release/api-change-medialive-49946.json new file mode 100644 index 0000000000..cfec01bdf2 --- /dev/null +++ b/.changes/next-release/api-change-medialive-49946.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``medialive``", + "description": "AWS Elemental MediaLive adds three new features: MediaPackage v2 endpoint support for live stream delivery, KLV metadata passthrough in CMAF Ingest output groups, and Metadata Name Modifier in CMAF Ingest output groups for customizing metadata track names in output streams." +} diff --git a/.changes/next-release/api-change-rds-65326.json b/.changes/next-release/api-change-rds-65326.json new file mode 100644 index 0000000000..eb25275f04 --- /dev/null +++ b/.changes/next-release/api-change-rds-65326.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``rds``", + "description": "This release adds support for the \"MYSQL_CACHING_SHA2_PASSWORD\" enum value for RDS Proxy ClientPasswordAuthType." +} diff --git a/botocore/data/cloud9/2017-09-23/service-2.json b/botocore/data/cloud9/2017-09-23/service-2.json index 52aec83d9f..8969d8e545 100644 --- a/botocore/data/cloud9/2017-09-23/service-2.json +++ b/botocore/data/cloud9/2017-09-23/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"cloud9", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"AWS Cloud9", "serviceId":"Cloud9", "signatureVersion":"v4", "targetPrefix":"AWSCloud9WorkspaceManagementService", - "uid":"cloud9-2017-09-23" + "uid":"cloud9-2017-09-23", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateEnvironmentEC2":{ @@ -29,7 +31,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment.

", + "documentation":"

Creates an Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true }, "CreateEnvironmentMembership":{ @@ -49,7 +51,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Adds an environment member to an Cloud9 development environment.

", + "documentation":"

Adds an environment member to an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true }, "DeleteEnvironment":{ @@ -69,7 +71,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.

", + "documentation":"

Deletes an Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true }, "DeleteEnvironmentMembership":{ @@ -89,7 +91,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Deletes an environment member from a development environment.

", + "documentation":"

Deletes an environment member from a development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true }, "DescribeEnvironmentMemberships":{ @@ -109,7 +111,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Gets information about environment members for an Cloud9 development environment.

" + "documentation":"

Gets information about environment members for an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "DescribeEnvironmentStatus":{ "name":"DescribeEnvironmentStatus", @@ -128,7 +130,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Gets status information for an Cloud9 development environment.

" + "documentation":"

Gets status information for an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "DescribeEnvironments":{ "name":"DescribeEnvironments", @@ -147,7 +149,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Gets information about Cloud9 development environments.

" + "documentation":"

Gets information about Cloud9 development environments.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "ListEnvironments":{ "name":"ListEnvironments", @@ -166,7 +168,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Gets a list of Cloud9 development environment identifiers.

" + "documentation":"

Gets a list of Cloud9 development environment identifiers.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -181,7 +183,7 @@ {"shape":"InternalServerErrorException"}, {"shape":"BadRequestException"} ], - "documentation":"

Gets a list of the tags associated with an Cloud9 development environment.

" + "documentation":"

Gets a list of the tags associated with an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "TagResource":{ "name":"TagResource", @@ -197,7 +199,7 @@ {"shape":"BadRequestException"}, {"shape":"ConcurrentAccessException"} ], - "documentation":"

Adds tags to an Cloud9 development environment.

Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources.

" + "documentation":"

Adds tags to an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

Tags that you add to an Cloud9 environment by using this method will NOT be automatically propagated to underlying resources.

" }, "UntagResource":{ "name":"UntagResource", @@ -213,7 +215,7 @@ {"shape":"BadRequestException"}, {"shape":"ConcurrentAccessException"} ], - "documentation":"

Removes tags from an Cloud9 development environment.

" + "documentation":"

Removes tags from an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

" }, "UpdateEnvironment":{ "name":"UpdateEnvironment", @@ -232,7 +234,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Changes the settings of an existing Cloud9 development environment.

", + "documentation":"

Changes the settings of an existing Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true }, "UpdateEnvironmentMembership":{ @@ -252,7 +254,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Changes the settings of an existing environment member for an Cloud9 development environment.

", + "documentation":"

Changes the settings of an existing environment member for an Cloud9 development environment.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

", "idempotent":true } }, @@ -331,7 +333,7 @@ }, "imageId":{ "shape":"ImageId", - "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.

We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported.

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.

AMI aliases

  • Amazon Linux 2: amazonlinux-2-x86_64

  • Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64

  • Ubuntu 18.04: ubuntu-18.04-x86_64

  • Ubuntu 22.04: ubuntu-22.04-x86_64

SSM paths

  • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

  • Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64

  • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

  • Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64

" + "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported.

From December 16, 2024, Ubuntu 18.04 will be removed from the list of available imageIds for Cloud9. This change is necessary as Ubuntu 18.04 has ended standard support on May 31, 2023. This change will only affect direct API consumers, and not Cloud9 console users.

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.

AMI aliases

  • Amazon Linux 2: amazonlinux-2-x86_64

  • Amazon Linux 2023 (recommended): amazonlinux-2023-x86_64

  • Ubuntu 18.04: ubuntu-18.04-x86_64

  • Ubuntu 22.04: ubuntu-22.04-x86_64

SSM paths

  • Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64

  • Amazon Linux 2023 (recommended): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2023-x86_64

  • Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64

  • Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64

" }, "automaticStopTimeMinutes":{ "shape":"AutomaticStopTimeMinutes", @@ -973,5 +975,5 @@ "pattern":"^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):(iam|sts)::\\d+:(root|(user\\/[\\w+=/:,.@-]{1,64}|federated-user\\/[\\w+=/:,.@-]{2,32}|assumed-role\\/[\\w+=:,.@-]{1,64}\\/[\\w+=,.@-]{1,64}))$" } }, - "documentation":"Cloud9

Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud.

For more information about Cloud9, see the Cloud9 User Guide.

Cloud9 supports these operations:

  • CreateEnvironmentEC2: Creates an Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment.

  • CreateEnvironmentMembership: Adds an environment member to an environment.

  • DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.

  • DeleteEnvironmentMembership: Deletes an environment member from an environment.

  • DescribeEnvironmentMemberships: Gets information about environment members for an environment.

  • DescribeEnvironments: Gets information about environments.

  • DescribeEnvironmentStatus: Gets status information for an environment.

  • ListEnvironments: Gets a list of environment identifiers.

  • ListTagsForResource: Gets the tags for an environment.

  • TagResource: Adds tags to an environment.

  • UntagResource: Removes tags from an environment.

  • UpdateEnvironment: Changes the settings of an existing environment.

  • UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment.

" + "documentation":"Cloud9

Cloud9 is a collection of tools that you can use to code, build, run, test, debug, and release software in the cloud.

For more information about Cloud9, see the Cloud9 User Guide.

Cloud9 is no longer available to new customers. Existing customers of Cloud9 can continue to use the service as normal. Learn more\"

Cloud9 supports these operations:

  • CreateEnvironmentEC2: Creates an Cloud9 development environment, launches an Amazon EC2 instance, and then connects from the instance to the environment.

  • CreateEnvironmentMembership: Adds an environment member to an environment.

  • DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.

  • DeleteEnvironmentMembership: Deletes an environment member from an environment.

  • DescribeEnvironmentMemberships: Gets information about environment members for an environment.

  • DescribeEnvironments: Gets information about environments.

  • DescribeEnvironmentStatus: Gets status information for an environment.

  • ListEnvironments: Gets a list of environment identifiers.

  • ListTagsForResource: Gets the tags for an environment.

  • TagResource: Adds tags to an environment.

  • UntagResource: Removes tags from an environment.

  • UpdateEnvironment: Changes the settings of an existing environment.

  • UpdateEnvironmentMembership: Changes the settings of an existing environment member for an environment.

" } diff --git a/botocore/data/dlm/2018-01-12/service-2.json b/botocore/data/dlm/2018-01-12/service-2.json index b94b3b040e..bad90b8f77 100644 --- a/botocore/data/dlm/2018-01-12/service-2.json +++ b/botocore/data/dlm/2018-01-12/service-2.json @@ -5,12 +5,14 @@ "endpointPrefix":"dlm", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Amazon DLM", "serviceFullName":"Amazon Data Lifecycle Manager", "serviceId":"DLM", "signatureVersion":"v4", "signingName":"dlm", - "uid":"dlm-2018-01-12" + "uid":"dlm-2018-01-12", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateLifecyclePolicy":{ @@ -296,7 +298,7 @@ "members":{ "Location":{ "shape":"LocationValues", - "documentation":"

[Custom snapshot policies only] Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default.

If the policy targets resources in an Amazon Web Services Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.

" + "documentation":"

[Custom snapshot policies only] Specifies the destination for snapshots created by the policy. The allowed destinations depend on the location of the targeted resources.

  • If the policy targets resources in a Region, then you must create snapshots in the same Region as the source resource.

  • If the policy targets resources in a Local Zone, you can create snapshots in the same Local Zone or in its parent Region.

  • If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost or in its parent Region.

Specify one of the following values:

  • To create snapshots in the same Region as the source resource, specify CLOUD.

  • To create snapshots in the same Local Zone as the source resource, specify LOCAL_ZONE.

  • To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL.

Default: CLOUD

" }, "Interval":{ "shape":"Interval", @@ -312,7 +314,7 @@ }, "CronExpression":{ "shape":"CronExpression", - "documentation":"

The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see Cron expressions in the Amazon CloudWatch User Guide.

" + "documentation":"

The schedule, as a Cron expression. The schedule interval must be between 1 hour and 1 year. For more information, see the Cron expressions reference in the Amazon EventBridge User Guide.

" }, "Scripts":{ "shape":"ScriptsList", @@ -797,10 +799,10 @@ }, "DefaultPolicy":{ "shape":"DefaultPolicy", - "documentation":"

[Default policies only] The type of default policy. Values include:

  • VOLUME - Default policy for EBS snapshots

  • INSTANCE - Default policy for EBS-backed AMIs

" + "documentation":"

Indicates whether the policy is a default lifecycle policy or a custom lifecycle policy.

  • true - the policy is a default policy.

  • false - the policy is a custom policy.

" } }, - "documentation":"

[Custom policies only] Detailed information about a snapshot, AMI, or event-based lifecycle policy.

" + "documentation":"

Information about a lifecycle policy.

" }, "LifecyclePolicySummary":{ "type":"structure", @@ -875,7 +877,8 @@ "type":"string", "enum":[ "CLOUD", - "OUTPOST_LOCAL" + "OUTPOST_LOCAL", + "LOCAL_ZONE" ] }, "NoReboot":{"type":"boolean"}, @@ -919,7 +922,7 @@ "members":{ "PolicyType":{ "shape":"PolicyTypeValues", - "documentation":"

[Custom policies only] The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account.

The default is EBS_SNAPSHOT_MANAGEMENT.

" + "documentation":"

The type of policy. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account.

The default is EBS_SNAPSHOT_MANAGEMENT.

" }, "ResourceTypes":{ "shape":"ResourceTypeValuesList", @@ -927,7 +930,7 @@ }, "ResourceLocations":{ "shape":"ResourceLocationList", - "documentation":"

[Custom snapshot and AMI policies only] The location of the resources to backup. If the source resources are located in an Amazon Web Services Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST.

If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.

" + "documentation":"

[Custom snapshot and AMI policies only] The location of the resources to backup.

  • If the source resources are located in a Region, specify CLOUD. In this case, the policy targets all resources of the specified type with matching target tags across all Availability Zones in the Region.

  • [Custom snapshot policies only] If the source resources are located in a Local Zone, specify LOCAL_ZONE. In this case, the policy targets all resources of the specified type with matching target tags across all Local Zones in the Region.

  • If the source resources are located on an Outpost in your account, specify OUTPOST. In this case, the policy targets all resources of the specified type with matching target tags across all of the Outposts in your account.

" }, "TargetTags":{ "shape":"TargetTagList", @@ -1019,7 +1022,8 @@ "type":"string", "enum":[ "CLOUD", - "OUTPOST" + "OUTPOST", + "LOCAL_ZONE" ] }, "ResourceNotFoundException":{ @@ -1135,7 +1139,7 @@ }, "CrossRegionCopyRules":{ "shape":"CrossRegionCopyRules", - "documentation":"

Specifies a rule for copying snapshots or AMIs across regions.

You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.

" + "documentation":"

Specifies a rule for copying snapshots or AMIs across Regions.

You can't specify cross-Region copy rules for policies that create snapshots on an Outpost or in a Local Zone. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.

" }, "ShareRules":{ "shape":"ShareRules", diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index c3467f07ab..c9ac69cb76 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -1097,7 +1097,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

You can create snapshots of volumes in a Region and volumes on an Outpost. If you create a snapshot of a volume in a Region, the snapshot must be stored in the same Region as the volume. If you create a snapshot of a volume on an Outpost, the snapshot can be stored on the same Outpost as the volume, or in the Region for that Outpost.

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.

You can tag your snapshots during creation. For more information, see Tag your Amazon EC2 resources in the Amazon EC2 User Guide.

For more information, see Amazon EBS and Amazon EBS encryption in the Amazon EBS User Guide.

" + "documentation":"

Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.

The location of the source EBS volume determines where you can create the snapshot.

  • If the source volume is in a Region, you must create the snapshot in the same Region as the volume.

  • If the source volume is in a Local Zone, you can create the snapshot in the same Local Zone or in parent Amazon Web Services Region.

  • If the source volume is on an Outpost, you can create the snapshot on the same Outpost or in its parent Amazon Web Services Region.

When a snapshot is created, any Amazon Web Services Marketplace product codes that are associated with the source volume are propagated to the snapshot.

You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your Amazon EBS volume at the time the snapshot command is issued; this might exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending.

When you create a snapshot for an EBS volume that serves as a root device, we recommend that you stop the instance before taking the snapshot.

Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected. For more information, Amazon EBS encryption in the Amazon EBS User Guide.

" }, "CreateSnapshots":{ "name":"CreateSnapshots", @@ -1107,7 +1107,7 @@ }, "input":{"shape":"CreateSnapshotsRequest"}, "output":{"shape":"CreateSnapshotsResult"}, - "documentation":"

Creates crash-consistent snapshots of multiple EBS volumes and stores the data in S3. Volumes are chosen by specifying an instance. Any attached volumes will produce one snapshot each that is crash-consistent across the instance.

You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set.

You can create multi-volume snapshots of instances in a Region and instances on an Outpost. If you create snapshots from an instance in a Region, the snapshots must be stored in the same Region as the instance. If you create snapshots from an instance on an Outpost, the snapshots can be stored on the same Outpost as the instance, or in the Region for that Outpost.

" + "documentation":"

Creates crash-consistent snapshots of multiple EBS volumes attached to an Amazon EC2 instance. Volumes are chosen by specifying an instance. Each volume attached to the specified instance will produce one snapshot that is crash-consistent across the instance. You can include all of the volumes currently attached to the instance, or you can exclude the root volume or specific data (non-root) volumes from the multi-volume snapshot set.

The location of the source instance determines where you can create the snapshots.

  • If the source instance is in a Region, you must create the snapshots in the same Region as the instance.

  • If the source instance is in a Local Zone, you can create the snapshots in the same Local Zone or in parent Amazon Web Services Region.

  • If the source instance is on an Outpost, you can create the snapshots on the same Outpost or in its parent Amazon Web Services Region.

" }, "CreateSpotDatafeedSubscription":{ "name":"CreateSpotDatafeedSubscription", @@ -15606,7 +15606,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Outpost on which to create a local snapshot.

  • To create a snapshot of a volume in a Region, omit this parameter. The snapshot is created in the same Region as the volume.

  • To create a snapshot of a volume on an Outpost and store the snapshot in the Region, omit this parameter. The snapshot is created in the Region for the Outpost.

  • To create a snapshot of a volume on an Outpost and store the snapshot on an Outpost, specify the ARN of the destination Outpost. The snapshot must be created on the same Outpost as the volume.

For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" + "documentation":"

Only supported for volumes on Outposts. If the source volume is not on an Outpost, omit this parameter.

  • To create the snapshot on the same Outpost as the source volume, specify the ARN of that Outpost. The snapshot must be created on the same Outpost as the volume.

  • To create the snapshot in the parent Region of the Outpost, omit this parameter.

For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" }, "VolumeId":{ "shape":"VolumeId", @@ -15617,6 +15617,10 @@ "documentation":"

The tags to apply to the snapshot during creation.

", "locationName":"TagSpecification" }, + "Location":{ + "shape":"SnapshotLocationEnum", + "documentation":"

Only supported for volumes in Local Zones. If the source volume is not in a Local Zone, omit this parameter.

  • To create a local snapshot in the same Local Zone as the source volume, specify local.

  • To create a regional snapshot in the parent Region of the Local Zone, specify regional or omit this parameter.

Default value: regional

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -15638,7 +15642,7 @@ }, "OutpostArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the Outpost on which to create the local snapshots.

  • To create snapshots from an instance in a Region, omit this parameter. The snapshots are created in the same Region as the instance.

  • To create snapshots from an instance on an Outpost and store the snapshots in the Region, omit this parameter. The snapshots are created in the Region for the Outpost.

  • To create snapshots from an instance on an Outpost and store the snapshots on an Outpost, specify the ARN of the destination Outpost. The snapshots must be created on the same Outpost as the instance.

For more information, see Create multi-volume local snapshots from instances on an Outpost in the Amazon EBS User Guide.

" + "documentation":"

Only supported for instances on Outposts. If the source instance is not on an Outpost, omit this parameter.

  • To create the snapshots on the same Outpost as the source instance, specify the ARN of that Outpost. The snapshots must be created on the same Outpost as the instance.

  • To create the snapshots in the parent Region of the Outpost, omit this parameter.

For more information, see Create local snapshots from volumes on an Outpost in the Amazon EBS User Guide.

" }, "TagSpecifications":{ "shape":"TagSpecificationList", @@ -15652,6 +15656,10 @@ "CopyTagsFromSource":{ "shape":"CopyTagsFromSource", "documentation":"

Copies the tags from the specified volume to corresponding snapshot.

" + }, + "Location":{ + "shape":"SnapshotLocationEnum", + "documentation":"

Only supported for instances in Local Zones. If the source instance is not in a Local Zone, omit this parameter.

  • To create local snapshots in the same Local Zone as the source instance, specify local.

  • To create a regional snapshots in the parent Region of the Local Zone, specify regional or omit this parameter.

Default value: regional

" } } }, @@ -56961,6 +56969,11 @@ "documentation":"

Reserved for future use.

", "locationName":"sseType" }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone or Local Zone of the snapshot. For example, us-west-1a (Availability Zone) or us-west-2-lax-1a (Local Zone).

", + "locationName":"availabilityZone" + }, "TransferType":{ "shape":"TransferType", "documentation":"

Only for snapshot copies.

Indicates whether the snapshot copy was created with a standard or time-based snapshot copy operation. Time-based snapshot copy operations complete within the completion duration specified in the request. Standard snapshot copy operations are completed on a best-effort basis.

  • standard - The snapshot copy was created with a standard snapshot copy operation.

  • time-based - The snapshot copy was created with a time-based snapshot copy operation.

", @@ -57215,6 +57228,11 @@ "shape":"SSEType", "documentation":"

Reserved for future use.

", "locationName":"sseType" + }, + "AvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone or Local Zone of the snapshots. For example, us-west-1a (Availability Zone) or us-west-2-lax-1a (Local Zone).

", + "locationName":"availabilityZone" } }, "documentation":"

Information about a snapshot.

" @@ -57226,6 +57244,13 @@ "locationName":"item" } }, + "SnapshotLocationEnum":{ + "type":"string", + "enum":[ + "regional", + "local" + ] + }, "SnapshotRecycleBinInfo":{ "type":"structure", "members":{ diff --git a/botocore/data/greengrassv2/2020-11-30/service-2.json b/botocore/data/greengrassv2/2020-11-30/service-2.json index 663522de14..2b3ef57d1b 100644 --- a/botocore/data/greengrassv2/2020-11-30/service-2.json +++ b/botocore/data/greengrassv2/2020-11-30/service-2.json @@ -9,7 +9,8 @@ "serviceFullName":"AWS IoT Greengrass V2", "serviceId":"GreengrassV2", "signatureVersion":"v4", - "uid":"greengrassv2-2020-11-30" + "uid":"greengrassv2-2020-11-30", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateServiceRoleToAccount":{ @@ -369,7 +370,7 @@ {"shape":"InternalServerException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves a paginated list of Greengrass core devices.

IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated.

Core devices send status updates at the following times:

  • When the IoT Greengrass Core software starts

  • When the core device receives a deployment from the Amazon Web Services Cloud

  • When the status of any component on the core device becomes BROKEN

  • At a regular interval that you can configure, which defaults to 24 hours

  • For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment

" + "documentation":"

Retrieves a paginated list of Greengrass core devices.

IoT Greengrass relies on individual devices to send status updates to the Amazon Web Services Cloud. If the IoT Greengrass Core software isn't running on the device, or if device isn't connected to the Amazon Web Services Cloud, then the reported status of that device might not reflect its current status. The status timestamp indicates when the device status was last updated.

Core devices send status updates at the following times:

  • When the IoT Greengrass Core software starts

  • When the core device receives a deployment from the Amazon Web Services Cloud

  • For Greengrass nucleus 2.12.2 and earlier, the core device sends status updates when the status of any component on the core device becomes ERRORED or BROKEN.

  • For Greengrass nucleus 2.12.3 and later, the core device sends status updates when the status of any component on the core device becomes ERRORED, BROKEN, RUNNING, or FINISHED.

  • At a regular interval that you can configure, which defaults to 24 hours

  • For IoT Greengrass Core v2.7.0, the core device sends status updates upon local deployment and cloud deployment

" }, "ListDeployments":{ "name":"ListDeployments", @@ -1011,6 +1012,18 @@ "lastStatusUpdateTimestamp":{ "shape":"Timestamp", "documentation":"

The time at which the core device's status last updated, expressed in ISO 8601 format.

" + }, + "platform":{ + "shape":"CoreDevicePlatformString", + "documentation":"

The operating system platform that the core device runs.

" + }, + "architecture":{ + "shape":"CoreDeviceArchitectureString", + "documentation":"

The computer architecture of the core device.

" + }, + "runtime":{ + "shape":"CoreDeviceRuntimeString", + "documentation":"

The runtime for the core device. The runtime can be:

  • aws_nucleus_classic

  • aws_nucleus_lite

" } }, "documentation":"

Contains information about a Greengrass core device, which is an IoT thing that runs the IoT Greengrass Core software.

" @@ -1025,6 +1038,11 @@ "max":255, "min":1 }, + "CoreDeviceRuntimeString":{ + "type":"string", + "max":255, + "min":1 + }, "CoreDeviceStatus":{ "type":"string", "enum":[ @@ -1704,6 +1722,10 @@ "shape":"CoreDeviceArchitectureString", "documentation":"

The computer architecture of the core device.

" }, + "runtime":{ + "shape":"CoreDeviceRuntimeString", + "documentation":"

The runtime for the core device. The runtime can be:

  • aws_nucleus_classic

  • aws_nucleus_lite

" + }, "status":{ "shape":"CoreDeviceStatus", "documentation":"

The status of the core device. The core device status can be:

  • HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

  • UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state on the core device.

" @@ -2466,6 +2488,12 @@ "box":true, "location":"querystring", "locationName":"nextToken" + }, + "runtime":{ + "shape":"CoreDeviceRuntimeString", + "documentation":"

The runtime to be used by the core device. The runtime can be:

  • aws_nucleus_classic

  • aws_nucleus_lite

", + "location":"querystring", + "locationName":"runtime" } } }, diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index b31a894c23..a69a05153a 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -10183,7 +10183,7 @@ "TimedMetadataBehavior": { "shape": "Fmp4TimedMetadataBehavior", "locationName": "timedMetadataBehavior", - "documentation": "When set to passthrough, timed metadata is passed through from input to output." + "documentation": "Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule." } }, "documentation": "Fmp4 Hls Settings" @@ -11651,15 +11651,15 @@ "Tag": { "shape": "__string", "locationName": "tag", - "documentation": "ID3 tag to insert into each segment. Supports special keyword identifiers to substitute in segment-related values.\\nSupported keyword identifiers: https://docs.aws.amazon.com/medialive/latest/ug/variable-data-identifiers.html" + "documentation": "Complete this parameter if you want to specify only the metadata, not the entire frame. MediaLive will insert the metadata in a TXXX frame. Enter the value as plain text. You can include standard MediaLive variable data such as the current segment number." }, "Id3": { "shape": "__string", "locationName": "id3", - "documentation": "Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure" + "documentation": "Complete this parameter if you want to specify the entire ID3 metadata. Enter a base64 string that contains one or more fully formed ID3 tags, according to the ID3 specification: http://id3.org/id3v2.4.0-structure" } }, - "documentation": "Settings for the action to insert a user-defined ID3 tag in each HLS segment" + "documentation": "Settings for the action to insert ID3 metadata in every segment, in HLS output groups." }, "HlsId3SegmentTaggingState": { "type": "string", @@ -11928,10 +11928,10 @@ "Id3": { "shape": "__string", "locationName": "id3", - "documentation": "Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure" + "documentation": "Enter a base64 string that contains one or more fully formed ID3 tags.See the ID3 specification: http://id3.org/id3v2.4.0-structure" } }, - "documentation": "Settings for the action to emit HLS metadata", + "documentation": "Settings for the action to insert ID3 metadata (as a one-time action) in HLS output groups.", "required": [ "Id3" ] @@ -14444,7 +14444,7 @@ "TimedMetadataBehavior": { "shape": "M3u8TimedMetadataBehavior", "locationName": "timedMetadataBehavior", - "documentation": "When set to passthrough, timed metadata is passed through from input to output." + "documentation": "Set to PASSTHROUGH to enable ID3 metadata insertion. To include metadata, you configure other parameters in the output group or individual outputs, or you add an ID3 action to the channel schedule." }, "TimedMetadataPid": { "shape": "__string", @@ -14607,6 +14607,16 @@ "shape": "__stringMin1", "locationName": "channelId", "documentation": "ID of the channel in MediaPackage that is the destination for this output group. You do not need to specify the individual inputs in MediaPackage; MediaLive will handle the connection of the two MediaLive pipelines to the two MediaPackage inputs. The MediaPackage channel and MediaLive channel must be in the same region." + }, + "ChannelGroup": { + "shape": "__stringMin1", + "locationName": "channelGroup", + "documentation": "Name of the channel group in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2." + }, + "ChannelName": { + "shape": "__stringMin1", + "locationName": "channelName", + "documentation": "Name of the channel in MediaPackageV2. Only use if you are sending CMAF Ingest output to a CMAF ingest endpoint on a MediaPackage channel that uses MediaPackage v2." } }, "documentation": "MediaPackage Output Destination Settings" @@ -16679,12 +16689,12 @@ "HlsId3SegmentTaggingSettings": { "shape": "HlsId3SegmentTaggingScheduleActionSettings", "locationName": "hlsId3SegmentTaggingSettings", - "documentation": "Action to insert HLS ID3 segment tagging" + "documentation": "Action to insert ID3 metadata in every segment, in HLS output groups" }, "HlsTimedMetadataSettings": { "shape": "HlsTimedMetadataScheduleActionSettings", "locationName": "hlsTimedMetadataSettings", - "documentation": "Action to insert HLS metadata" + "documentation": "Action to insert ID3 metadata once, in HLS output groups" }, "InputPrepareSettings": { "shape": "InputPrepareScheduleActionSettings", @@ -20599,6 +20609,26 @@ "shape": "__integerMin0Max2000", "locationName": "sendDelayMs", "documentation": "Number of milliseconds to delay the output from the second pipeline." + }, + "KlvBehavior": { + "shape": "CmafKLVBehavior", + "locationName": "klvBehavior", + "documentation": "If set to passthrough, passes any KLV data from the input source to this output." + }, + "KlvNameModifier": { + "shape": "__stringMax100", + "locationName": "klvNameModifier", + "documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies a KLV track. The default is \"klv\", which means the default name will be Streams(klv.cmfm). Any string you enter here will replace the \"klv\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters." + }, + "NielsenId3NameModifier": { + "shape": "__stringMax100", + "locationName": "nielsenId3NameModifier", + "documentation": "Change the modifier that MediaLive automatically adds to the Streams() name that identifies a Nielsen ID3 track. The default is \"nid3\", which means the default name will be Streams(nid3.cmfm). Any string you enter here will replace the \"nid3\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters." + }, + "Scte35NameModifier": { + "shape": "__stringMax100", + "locationName": "scte35NameModifier", + "documentation": "Change the modifier that MediaLive automatically adds to the Streams() name for a SCTE 35 track. The default is \"scte\", which means the default name will be Streams(scte.cmfm). Any string you enter here will replace the \"scte\" string.\\nThe modifier can only contain: numbers, letters, plus (+), minus (-), underscore (_) and period (.) and has a maximum length of 100 characters." } }, "documentation": "Cmaf Ingest Group Settings", @@ -28094,6 +28124,19 @@ "DISABLED", "ENABLED" ] + }, + "CmafKLVBehavior": { + "type": "string", + "documentation": "Cmaf KLVBehavior", + "enum": [ + "NO_PASSTHROUGH", + "PASSTHROUGH" + ] + }, + "__stringMax100": { + "type": "string", + "max": 100, + "documentation": "Placeholder documentation for __stringMax100" } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 259209cf2b..318afb7638 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -3658,6 +3658,7 @@ "type":"string", "enum":[ "MYSQL_NATIVE_PASSWORD", + "MYSQL_CACHING_SHA2_PASSWORD", "POSTGRES_SCRAM_SHA_256", "POSTGRES_MD5", "SQL_SERVER_AUTHENTICATION" From a106e2595a9ca898bb2aaa9f3a88e8287b7af29f Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Mon, 16 Dec 2024 19:09:27 +0000 Subject: [PATCH 11/20] Bumping version to 1.35.82 --- .changes/1.35.82.json | 32 +++++++++++++++++++ .../next-release/api-change-cloud9-3690.json | 5 --- .../next-release/api-change-dlm-71242.json | 5 --- .../next-release/api-change-ec2-45209.json | 5 --- .../api-change-greengrassv2-86150.json | 5 --- .../api-change-medialive-49946.json | 5 --- .../next-release/api-change-rds-65326.json | 5 --- CHANGELOG.rst | 11 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 10 files changed, 45 insertions(+), 32 deletions(-) create mode 100644 .changes/1.35.82.json delete mode 100644 .changes/next-release/api-change-cloud9-3690.json delete mode 100644 .changes/next-release/api-change-dlm-71242.json delete mode 100644 .changes/next-release/api-change-ec2-45209.json delete mode 100644 .changes/next-release/api-change-greengrassv2-86150.json delete mode 100644 .changes/next-release/api-change-medialive-49946.json delete mode 100644 .changes/next-release/api-change-rds-65326.json diff --git a/.changes/1.35.82.json b/.changes/1.35.82.json new file mode 100644 index 0000000000..4361c9732a --- /dev/null +++ b/.changes/1.35.82.json @@ -0,0 +1,32 @@ +[ + { + "category": "``cloud9``", + "description": "Added information about Ubuntu 18.04 will be removed from the available imageIds for Cloud9 because Ubuntu 18.04 has ended standard support on May 31, 2023.", + "type": "api-change" + }, + { + "category": "``dlm``", + "description": "This release adds support for Local Zones in Amazon Data Lifecycle Manager EBS snapshot lifecycle policies.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds support for EBS local snapshots in AWS Dedicated Local Zones, which allows you to store snapshots of EBS volumes locally in Dedicated Local Zones.", + "type": "api-change" + }, + { + "category": "``greengrassv2``", + "description": "Add support for runtime in GetCoreDevice and ListCoreDevices APIs.", + "type": "api-change" + }, + { + "category": "``medialive``", + "description": "AWS Elemental MediaLive adds three new features: MediaPackage v2 endpoint support for live stream delivery, KLV metadata passthrough in CMAF Ingest output groups, and Metadata Name Modifier in CMAF Ingest output groups for customizing metadata track names in output streams.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "This release adds support for the \"MYSQL_CACHING_SHA2_PASSWORD\" enum value for RDS Proxy ClientPasswordAuthType.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-cloud9-3690.json b/.changes/next-release/api-change-cloud9-3690.json deleted file mode 100644 index 2925fa74c0..0000000000 --- a/.changes/next-release/api-change-cloud9-3690.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cloud9``", - "description": "Added information about Ubuntu 18.04 will be removed from the available imageIds for Cloud9 because Ubuntu 18.04 has ended standard support on May 31, 2023." -} diff --git a/.changes/next-release/api-change-dlm-71242.json b/.changes/next-release/api-change-dlm-71242.json deleted file mode 100644 index 6f093950d3..0000000000 --- a/.changes/next-release/api-change-dlm-71242.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``dlm``", - "description": "This release adds support for Local Zones in Amazon Data Lifecycle Manager EBS snapshot lifecycle policies." -} diff --git a/.changes/next-release/api-change-ec2-45209.json b/.changes/next-release/api-change-ec2-45209.json deleted file mode 100644 index 59616de417..0000000000 --- a/.changes/next-release/api-change-ec2-45209.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ec2``", - "description": "This release adds support for EBS local snapshots in AWS Dedicated Local Zones, which allows you to store snapshots of EBS volumes locally in Dedicated Local Zones." -} diff --git a/.changes/next-release/api-change-greengrassv2-86150.json b/.changes/next-release/api-change-greengrassv2-86150.json deleted file mode 100644 index 0420047e3f..0000000000 --- a/.changes/next-release/api-change-greengrassv2-86150.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``greengrassv2``", - "description": "Add support for runtime in GetCoreDevice and ListCoreDevices APIs." -} diff --git a/.changes/next-release/api-change-medialive-49946.json b/.changes/next-release/api-change-medialive-49946.json deleted file mode 100644 index cfec01bdf2..0000000000 --- a/.changes/next-release/api-change-medialive-49946.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``medialive``", - "description": "AWS Elemental MediaLive adds three new features: MediaPackage v2 endpoint support for live stream delivery, KLV metadata passthrough in CMAF Ingest output groups, and Metadata Name Modifier in CMAF Ingest output groups for customizing metadata track names in output streams." -} diff --git a/.changes/next-release/api-change-rds-65326.json b/.changes/next-release/api-change-rds-65326.json deleted file mode 100644 index eb25275f04..0000000000 --- a/.changes/next-release/api-change-rds-65326.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``rds``", - "description": "This release adds support for the \"MYSQL_CACHING_SHA2_PASSWORD\" enum value for RDS Proxy ClientPasswordAuthType." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3c8f3837bd..75e2b51be4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,17 @@ CHANGELOG ========= +1.35.82 +======= + +* api-change:``cloud9``: Added information about Ubuntu 18.04 will be removed from the available imageIds for Cloud9 because Ubuntu 18.04 has ended standard support on May 31, 2023. +* api-change:``dlm``: This release adds support for Local Zones in Amazon Data Lifecycle Manager EBS snapshot lifecycle policies. +* api-change:``ec2``: This release adds support for EBS local snapshots in AWS Dedicated Local Zones, which allows you to store snapshots of EBS volumes locally in Dedicated Local Zones. +* api-change:``greengrassv2``: Add support for runtime in GetCoreDevice and ListCoreDevices APIs. +* api-change:``medialive``: AWS Elemental MediaLive adds three new features: MediaPackage v2 endpoint support for live stream delivery, KLV metadata passthrough in CMAF Ingest output groups, and Metadata Name Modifier in CMAF Ingest output groups for customizing metadata track names in output streams. +* api-change:``rds``: This release adds support for the "MYSQL_CACHING_SHA2_PASSWORD" enum value for RDS Proxy ClientPasswordAuthType. + + 1.35.81 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index c72c9ac84d..d9e3e0f63f 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.81' +__version__ = '1.35.82' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 18c8d2506c..5349359f96 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.81' +release = '1.35.82' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 7d2c47c850c7bed76f534dd78c24a1273676a03c Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 17 Dec 2024 19:03:39 +0000 Subject: [PATCH 12/20] Update to latest models --- .../api-change-account-96163.json | 5 + .../next-release/api-change-backup-34455.json | 5 + .../api-change-backupsearch-17260.json | 5 + .../next-release/api-change-batch-5312.json | 5 + .../api-change-cleanroomsml-63720.json | 5 + .../api-change-cloudfront-34415.json | 5 + .../api-change-codepipeline-86495.json | 5 + .../next-release/api-change-ecs-68973.json | 5 + .../next-release/api-change-m2-66981.json | 5 + .../api-change-synthetics-5397.json | 5 + .../2021-02-01/endpoint-rule-set-1.json | 476 +++--- .../data/account/2021-02-01/service-2.json | 4 +- .../data/backup/2018-11-15/paginators-1.json | 6 + .../data/backup/2018-11-15/service-2.json | 336 ++++ .../2018-05-10/endpoint-rule-set-1.json | 151 ++ .../backupsearch/2018-05-10/paginators-1.json | 28 + .../backupsearch/2018-05-10/service-2.json | 1471 +++++++++++++++++ .../backupsearch/2018-05-10/waiters-2.json | 5 + botocore/data/batch/2016-08-10/service-2.json | 38 +- .../cleanroomsml/2023-09-06/service-2.json | 5 +- .../data/cloudfront/2020-05-31/service-2.json | 14 +- .../codepipeline/2015-07-09/service-2.json | 16 +- botocore/data/ecs/2014-11-13/service-2.json | 14 +- botocore/data/m2/2021-04-28/service-2.json | 19 + .../data/synthetics/2017-10-11/service-2.json | 20 +- .../account/endpoint-tests-1.json | 318 +++- .../backupsearch/endpoint-tests-1.json | 313 ++++ 27 files changed, 2931 insertions(+), 353 deletions(-) create mode 100644 .changes/next-release/api-change-account-96163.json create mode 100644 .changes/next-release/api-change-backup-34455.json create mode 100644 .changes/next-release/api-change-backupsearch-17260.json create mode 100644 .changes/next-release/api-change-batch-5312.json create mode 100644 .changes/next-release/api-change-cleanroomsml-63720.json create mode 100644 .changes/next-release/api-change-cloudfront-34415.json create mode 100644 .changes/next-release/api-change-codepipeline-86495.json create mode 100644 .changes/next-release/api-change-ecs-68973.json create mode 100644 .changes/next-release/api-change-m2-66981.json create mode 100644 .changes/next-release/api-change-synthetics-5397.json create mode 100644 botocore/data/backupsearch/2018-05-10/endpoint-rule-set-1.json create mode 100644 botocore/data/backupsearch/2018-05-10/paginators-1.json create mode 100644 botocore/data/backupsearch/2018-05-10/service-2.json create mode 100644 botocore/data/backupsearch/2018-05-10/waiters-2.json create mode 100644 tests/functional/endpoint-rules/backupsearch/endpoint-tests-1.json diff --git a/.changes/next-release/api-change-account-96163.json b/.changes/next-release/api-change-account-96163.json new file mode 100644 index 0000000000..075e391a7a --- /dev/null +++ b/.changes/next-release/api-change-account-96163.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``account``", + "description": "Update endpoint configuration." +} diff --git a/.changes/next-release/api-change-backup-34455.json b/.changes/next-release/api-change-backup-34455.json new file mode 100644 index 0000000000..6d5e5fc9d9 --- /dev/null +++ b/.changes/next-release/api-change-backup-34455.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``backup``", + "description": "Add Support for Backup Indexing" +} diff --git a/.changes/next-release/api-change-backupsearch-17260.json b/.changes/next-release/api-change-backupsearch-17260.json new file mode 100644 index 0000000000..070702a841 --- /dev/null +++ b/.changes/next-release/api-change-backupsearch-17260.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``backupsearch``", + "description": "Add support for searching backups" +} diff --git a/.changes/next-release/api-change-batch-5312.json b/.changes/next-release/api-change-batch-5312.json new file mode 100644 index 0000000000..31f5afe1f3 --- /dev/null +++ b/.changes/next-release/api-change-batch-5312.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``batch``", + "description": "This feature allows AWS Batch on Amazon EKS to support configuration of Pod Annotations, overriding Namespace on which the Batch job's Pod runs on, and allows Subpath and Persistent Volume claim to be set for AWS Batch on Amazon EKS jobs." +} diff --git a/.changes/next-release/api-change-cleanroomsml-63720.json b/.changes/next-release/api-change-cleanroomsml-63720.json new file mode 100644 index 0000000000..47e5d9c770 --- /dev/null +++ b/.changes/next-release/api-change-cleanroomsml-63720.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cleanroomsml``", + "description": "Add support for SQL compute configuration for StartAudienceGenerationJob API." +} diff --git a/.changes/next-release/api-change-cloudfront-34415.json b/.changes/next-release/api-change-cloudfront-34415.json new file mode 100644 index 0000000000..9e97799d46 --- /dev/null +++ b/.changes/next-release/api-change-cloudfront-34415.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloudfront``", + "description": "Adds support for OriginReadTimeout and OriginKeepaliveTimeout to create CloudFront Distributions with VPC Origins." +} diff --git a/.changes/next-release/api-change-codepipeline-86495.json b/.changes/next-release/api-change-codepipeline-86495.json new file mode 100644 index 0000000000..0c18d9c3b2 --- /dev/null +++ b/.changes/next-release/api-change-codepipeline-86495.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``codepipeline``", + "description": "AWS CodePipeline V2 type pipelines now support Managed Compute Rule." +} diff --git a/.changes/next-release/api-change-ecs-68973.json b/.changes/next-release/api-change-ecs-68973.json new file mode 100644 index 0000000000..1318990553 --- /dev/null +++ b/.changes/next-release/api-change-ecs-68973.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ecs``", + "description": "Added support for enableFaultInjection task definition parameter which can be used to enable Fault Injection feature on ECS tasks." +} diff --git a/.changes/next-release/api-change-m2-66981.json b/.changes/next-release/api-change-m2-66981.json new file mode 100644 index 0000000000..8538008340 --- /dev/null +++ b/.changes/next-release/api-change-m2-66981.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``m2``", + "description": "This release adds support for AWS Mainframe Modernization(M2) Service to allow specifying network type(ipv4, dual) for the environment instances. For dual network type, m2 environment applications will serve both IPv4 and IPv6 requests, whereas for ipv4 it will serve only IPv4 requests." +} diff --git a/.changes/next-release/api-change-synthetics-5397.json b/.changes/next-release/api-change-synthetics-5397.json new file mode 100644 index 0000000000..dc398e9f8e --- /dev/null +++ b/.changes/next-release/api-change-synthetics-5397.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``synthetics``", + "description": "Add support to toggle outbound IPv6 traffic on canaries connected to dualstack subnets. This behavior can be controlled via the new Ipv6AllowedForDualStack parameter of the VpcConfig input object in CreateCanary and UpdateCanary APIs." +} diff --git a/botocore/data/account/2021-02-01/endpoint-rule-set-1.json b/botocore/data/account/2021-02-01/endpoint-rule-set-1.json index 8f8a08191d..ca2a9ade39 100644 --- a/botocore/data/account/2021-02-01/endpoint-rule-set-1.json +++ b/botocore/data/account/2021-02-01/endpoint-rule-set-1.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "String" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" } }, "rules": [ @@ -57,263 +57,235 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws" - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - false - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], - "endpoint": { - "url": "https://account.us-east-1.amazonaws.com", - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseFIPS" }, - "name" + true ] }, - "aws-cn" - ] - }, - { - "fn": "booleanEquals", - "argv": [ { - "ref": "UseFIPS" - }, - false - ] - }, - { - "fn": "booleanEquals", - "argv": [ + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ { - "ref": "UseDualStack" + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" }, - false - ] - } - ], - "endpoint": { - "url": "https://account.cn-northwest-1.amazonaws.com.cn", - "properties": { - "authSchemes": [ { - "name": "sigv4", - "signingName": "account", - "signingRegion": "cn-northwest-1" + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } - ] - }, - "headers": {} - }, - "type": "endpoint" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] + ], + "type": "tree" }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseDualStack" }, true ] @@ -321,96 +293,80 @@ ], "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://account-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ + true, { - "ref": "PartitionResult" - }, - "supportsDualStack" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } - ] - } - ], - "rules": [ + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://account.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://account.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://account.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://account.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/botocore/data/account/2021-02-01/service-2.json b/botocore/data/account/2021-02-01/service-2.json index 5620b7d1c0..821c3d79e9 100644 --- a/botocore/data/account/2021-02-01/service-2.json +++ b/botocore/data/account/2021-02-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"account", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"AWS Account", "serviceId":"Account", "signatureVersion":"v4", "signingName":"account", - "uid":"account-2021-02-01" + "uid":"account-2021-02-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AcceptPrimaryEmailUpdate":{ diff --git a/botocore/data/backup/2018-11-15/paginators-1.json b/botocore/data/backup/2018-11-15/paginators-1.json index 1720297a2e..f19ea3db26 100644 --- a/botocore/data/backup/2018-11-15/paginators-1.json +++ b/botocore/data/backup/2018-11-15/paginators-1.json @@ -101,6 +101,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "RestoreTestingSelections" + }, + "ListIndexedRecoveryPoints": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "IndexedRecoveryPoints" } } } diff --git a/botocore/data/backup/2018-11-15/service-2.json b/botocore/data/backup/2018-11-15/service-2.json index 4adb46a318..4727e6f741 100644 --- a/botocore/data/backup/2018-11-15/service-2.json +++ b/botocore/data/backup/2018-11-15/service-2.json @@ -722,6 +722,23 @@ "documentation":"

This action returns details for a specified legal hold. The details are the body of a legal hold in JSON format, in addition to metadata.

", "idempotent":true }, + "GetRecoveryPointIndexDetails":{ + "name":"GetRecoveryPointIndexDetails", + "http":{ + "method":"GET", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/index" + }, + "input":{"shape":"GetRecoveryPointIndexDetailsInput"}, + "output":{"shape":"GetRecoveryPointIndexDetailsOutput"}, + "errors":[ + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

This operation returns the metadata and details specific to the backup index associated with the specified recovery point.

", + "idempotent":true + }, "GetRecoveryPointRestoreMetadata":{ "name":"GetRecoveryPointRestoreMetadata", "http":{ @@ -969,6 +986,22 @@ ], "documentation":"

Returns a list of all frameworks for an Amazon Web Services account and Amazon Web Services Region.

" }, + "ListIndexedRecoveryPoints":{ + "name":"ListIndexedRecoveryPoints", + "http":{ + "method":"GET", + "requestUri":"/indexes/recovery-point/" + }, + "input":{"shape":"ListIndexedRecoveryPointsInput"}, + "output":{"shape":"ListIndexedRecoveryPointsOutput"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

This operation returns a list of recovery points that have an associated index, belonging to the specified account.

Optional parameters you can include are: MaxResults; NextToken; SourceResourceArns; CreatedBefore; CreatedAfter; and ResourceType.

", + "idempotent":true + }, "ListLegalHolds":{ "name":"ListLegalHolds", "http":{ @@ -1429,6 +1462,24 @@ ], "documentation":"

Updates whether the Amazon Web Services account is opted in to cross-account backup. Returns an error if the account is not an Organizations management account. Use the DescribeGlobalSettings API to determine the current settings.

" }, + "UpdateRecoveryPointIndexSettings":{ + "name":"UpdateRecoveryPointIndexSettings", + "http":{ + "method":"POST", + "requestUri":"/backup-vaults/{backupVaultName}/recovery-points/{recoveryPointArn}/index" + }, + "input":{"shape":"UpdateRecoveryPointIndexSettingsInput"}, + "output":{"shape":"UpdateRecoveryPointIndexSettingsOutput"}, + "errors":[ + {"shape":"MissingParameterValueException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

This operation updates the settings of a recovery point index.

Required: BackupVaultName, RecoveryPointArn, and IAMRoleArn

", + "idempotent":true + }, "UpdateRecoveryPointLifecycle":{ "name":"UpdateRecoveryPointLifecycle", "http":{ @@ -1938,6 +1989,10 @@ "ScheduleExpressionTimezone":{ "shape":"Timezone", "documentation":"

The timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

" + }, + "IndexActions":{ + "shape":"IndexActions", + "documentation":"

IndexActions is an array you use to specify how backup data should be indexed.

eEach BackupRule can have 0 or 1 IndexAction, as each backup can have up to one index associated with it.

Within the array is ResourceType. Only one will be accepted for each BackupRule.

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -1988,6 +2043,10 @@ "ScheduleExpressionTimezone":{ "shape":"Timezone", "documentation":"

The timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.

" + }, + "IndexActions":{ + "shape":"IndexActions", + "documentation":"

There can up to one IndexAction in each BackupRule, as each backup can have 0 or 1 backup index associated with it.

Within the array is ResourceTypes. Only 1 resource type will be accepted for each BackupRule. Valid values:

  • EBS for Amazon Elastic Block Store

  • S3 for Amazon Simple Storage Service (Amazon S3)

" } }, "documentation":"

Specifies a scheduled task used to back up a selection of resources.

" @@ -3627,6 +3686,14 @@ "VaultType":{ "shape":"VaultType", "documentation":"

The type of vault in which the described recovery point is stored.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "IndexStatusMessage":{ + "shape":"string", + "documentation":"

A string in the form of a detailed message explaining the status of a backup index associated with the recovery point.

" } } }, @@ -4183,6 +4250,68 @@ } } }, + "GetRecoveryPointIndexDetailsInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created.

Accepted characters include lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + } + } + }, + "GetRecoveryPointIndexDetailsOutput":{ + "type":"structure", + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies the backup vault where the recovery point index is stored.

For example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault.

" + }, + "SourceResourceArn":{ + "shape":"ARN", + "documentation":"

A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource.

" + }, + "IndexCreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup index was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "IndexDeletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup index was deleted, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "IndexCompletionDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup index finished creation, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "IndexStatusMessage":{ + "shape":"string", + "documentation":"

A detailed message explaining the status of a backup index associated with the recovery point.

" + }, + "TotalItemsIndexed":{ + "shape":"Long", + "documentation":"

Count of items within the backup index associated with the recovery point.

" + } + } + }, "GetRecoveryPointRestoreMetadataInput":{ "type":"structure", "required":[ @@ -4364,6 +4493,82 @@ "GlobalSettingsValue":{"type":"string"}, "IAMPolicy":{"type":"string"}, "IAMRoleArn":{"type":"string"}, + "Index":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "IndexAction":{ + "type":"structure", + "members":{ + "ResourceTypes":{ + "shape":"ResourceTypes", + "documentation":"

0 or 1 index action will be accepted for each BackupRule.

Valid values:

  • EBS for Amazon Elastic Block Store

  • S3 for Amazon Simple Storage Service (Amazon S3)

" + } + }, + "documentation":"

This is an optional array within a BackupRule.

IndexAction consists of one ResourceTypes.

" + }, + "IndexActions":{ + "type":"list", + "member":{"shape":"IndexAction"} + }, + "IndexStatus":{ + "type":"string", + "enum":[ + "PENDING", + "ACTIVE", + "FAILED", + "DELETING" + ] + }, + "IndexedRecoveryPoint":{ + "type":"structure", + "members":{ + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45

" + }, + "SourceResourceArn":{ + "shape":"ARN", + "documentation":"

A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource.

" + }, + "IamRoleArn":{ + "shape":"ARN", + "documentation":"

This specifies the IAM role ARN used for this operation.

For example, arn:aws:iam::123456789012:role/S3Access

" + }, + "BackupCreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The resource type of the indexed recovery point.

  • EBS for Amazon Elastic Block Store

  • S3 for Amazon Simple Storage Service (Amazon S3)

" + }, + "IndexCreationDate":{ + "shape":"timestamp", + "documentation":"

The date and time that a backup index was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationDate is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "IndexStatusMessage":{ + "shape":"string", + "documentation":"

A string in the form of a detailed message explaining the status of a backup index associated with the recovery point.

" + }, + "BackupVaultArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies the backup vault where the recovery point index is stored.

For example, arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault.

" + } + }, + "documentation":"

This is a recovery point that has an associated backup index.

Only recovery points with a backup index can be included in a search.

" + }, + "IndexedRecoveryPointList":{ + "type":"list", + "member":{"shape":"IndexedRecoveryPoint"} + }, "InvalidParameterValueException":{ "type":"structure", "members":{ @@ -5052,6 +5257,66 @@ } } }, + "ListIndexedRecoveryPointsInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned recovery points.

For example, if a request is made to return MaxResults number of indexed recovery points, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of resource list items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "SourceResourceArn":{ + "shape":"ARN", + "documentation":"

A string of the Amazon Resource Name (ARN) that uniquely identifies the source resource.

", + "location":"querystring", + "locationName":"sourceResourceArn" + }, + "CreatedBefore":{ + "shape":"timestamp", + "documentation":"

Returns only indexed recovery points that were created before the specified date.

", + "location":"querystring", + "locationName":"createdBefore" + }, + "CreatedAfter":{ + "shape":"timestamp", + "documentation":"

Returns only indexed recovery points that were created after the specified date.

", + "location":"querystring", + "locationName":"createdAfter" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

Returns a list of indexed recovery points for the specified resource type(s).

Accepted values include:

  • EBS for Amazon Elastic Block Store

  • S3 for Amazon Simple Storage Service (Amazon S3)

", + "location":"querystring", + "locationName":"resourceType" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

Include this parameter to filter the returned list by the indicated statuses.

Accepted values: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

", + "location":"querystring", + "locationName":"indexStatus" + } + } + }, + "ListIndexedRecoveryPointsOutput":{ + "type":"structure", + "members":{ + "IndexedRecoveryPoints":{ + "shape":"IndexedRecoveryPointList", + "documentation":"

This is a list of recovery points that have an associated index, belonging to the specified account.

" + }, + "NextToken":{ + "shape":"string", + "documentation":"

The next item following a partial list of returned recovery points.

For example, if a request is made to return MaxResults number of indexed recovery points, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, "ListLegalHoldsInput":{ "type":"structure", "members":{ @@ -5979,6 +6244,14 @@ "VaultType":{ "shape":"VaultType", "documentation":"

The type of vault in which the described recovery point is stored.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "IndexStatusMessage":{ + "shape":"string", + "documentation":"

A string in the form of a detailed message explaining the status of a backup index associated with the recovery point.

" } }, "documentation":"

Contains detailed information about the recovery points stored in a backup vault.

" @@ -6033,6 +6306,14 @@ "VaultType":{ "shape":"VaultType", "documentation":"

The type of vault in which the described recovery point is stored.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "IndexStatusMessage":{ + "shape":"string", + "documentation":"

A string in the form of a detailed message explaining the status of a backup index associated with the recovery point.

" } }, "documentation":"

Contains detailed information about a saved recovery point.

" @@ -6925,6 +7206,10 @@ "BackupOptions":{ "shape":"BackupOptions", "documentation":"

The backup option for a selected resource. This option is only available for Windows Volume Shadow Copy Service (VSS) backup jobs.

Valid values: Set to \"WindowsVSS\":\"enabled\" to enable the WindowsVSS backup option and create a Windows VSS backup. Set to \"WindowsVSS\"\"disabled\" to create a regular backup. The WindowsVSS option is not enabled by default.

" + }, + "Index":{ + "shape":"Index", + "documentation":"

Include this parameter to enable index creation if your backup job has a resource type that supports backup indexes.

Resource types that support backup indexes include:

  • EBS for Amazon Elastic Block Store

  • S3 for Amazon Simple Storage Service (Amazon S3)

Index can have 1 of 2 possible values, either ENABLED or DISABLED.

To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED.

To delete a backup index, set value to DISABLED.

" } } }, @@ -7235,6 +7520,57 @@ } } }, + "UpdateRecoveryPointIndexSettingsInput":{ + "type":"structure", + "required":[ + "BackupVaultName", + "RecoveryPointArn", + "Index" + ], + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created.

Accepted characters include lowercase letters, numbers, and hyphens.

", + "location":"uri", + "locationName":"backupVaultName" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

", + "location":"uri", + "locationName":"recoveryPointArn" + }, + "IamRoleArn":{ + "shape":"IAMRoleArn", + "documentation":"

This specifies the IAM role ARN used for this operation.

For example, arn:aws:iam::123456789012:role/S3Access

" + }, + "Index":{ + "shape":"Index", + "documentation":"

Index can have 1 of 2 possible values, either ENABLED or DISABLED.

To create a backup index for an eligible ACTIVE recovery point that does not yet have a backup index, set value to ENABLED.

To delete a backup index, set value to DISABLED.

" + } + } + }, + "UpdateRecoveryPointIndexSettingsOutput":{ + "type":"structure", + "members":{ + "BackupVaultName":{ + "shape":"BackupVaultName", + "documentation":"

The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the Region where they are created.

" + }, + "RecoveryPointArn":{ + "shape":"ARN", + "documentation":"

An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45.

" + }, + "IndexStatus":{ + "shape":"IndexStatus", + "documentation":"

This is the current status for the backup index associated with the specified recovery point.

Statuses are: PENDING | ACTIVE | FAILED | DELETING

A recovery point with an index that has the status of ACTIVE can be included in a search.

" + }, + "Index":{ + "shape":"Index", + "documentation":"

Index can have 1 of 2 possible values, either ENABLED or DISABLED.

A value of ENABLED means a backup index for an eligible ACTIVE recovery point has been created.

A value of DISABLED means a backup index was deleted.

" + } + } + }, "UpdateRecoveryPointLifecycleInput":{ "type":"structure", "required":[ diff --git a/botocore/data/backupsearch/2018-05-10/endpoint-rule-set-1.json b/botocore/data/backupsearch/2018-05-10/endpoint-rule-set-1.json new file mode 100644 index 0000000000..6bd2aba542 --- /dev/null +++ b/botocore/data/backupsearch/2018-05-10/endpoint-rule-set-1.json @@ -0,0 +1,151 @@ +{ + "version": "1.0", + "parameters": { + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://backup-search-fips.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://backup-search.{PartitionResult#implicitGlobalRegion}.{PartitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "{PartitionResult#implicitGlobalRegion}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/botocore/data/backupsearch/2018-05-10/paginators-1.json b/botocore/data/backupsearch/2018-05-10/paginators-1.json new file mode 100644 index 0000000000..bc482fde26 --- /dev/null +++ b/botocore/data/backupsearch/2018-05-10/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListSearchJobBackups": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Results" + }, + "ListSearchJobResults": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Results" + }, + "ListSearchJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "SearchJobs" + }, + "ListSearchResultExportJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "ExportJobs" + } + } +} diff --git a/botocore/data/backupsearch/2018-05-10/service-2.json b/botocore/data/backupsearch/2018-05-10/service-2.json new file mode 100644 index 0000000000..28b36780ab --- /dev/null +++ b/botocore/data/backupsearch/2018-05-10/service-2.json @@ -0,0 +1,1471 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"backup-search", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS Backup Search", + "serviceId":"BackupSearch", + "signatureVersion":"v4", + "signingName":"backup-search", + "uid":"backupsearch-2018-05-10" + }, + "operations":{ + "GetSearchJob":{ + "name":"GetSearchJob", + "http":{ + "method":"GET", + "requestUri":"/search-jobs/{SearchJobIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetSearchJobInput"}, + "output":{"shape":"GetSearchJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation retrieves metadata of a search job, including its progress.

" + }, + "GetSearchResultExportJob":{ + "name":"GetSearchResultExportJob", + "http":{ + "method":"GET", + "requestUri":"/export-search-jobs/{ExportJobIdentifier}", + "responseCode":200 + }, + "input":{"shape":"GetSearchResultExportJobInput"}, + "output":{"shape":"GetSearchResultExportJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation retrieves the metadata of an export job.

An export job is an operation that transmits the results of a search job to a specified S3 bucket in a .csv file.

An export job allows you to retain results of a search beyond the search job's scheduled retention of 7 days.

" + }, + "ListSearchJobBackups":{ + "name":"ListSearchJobBackups", + "http":{ + "method":"GET", + "requestUri":"/search-jobs/{SearchJobIdentifier}/backups", + "responseCode":200 + }, + "input":{"shape":"ListSearchJobBackupsInput"}, + "output":{"shape":"ListSearchJobBackupsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation returns a list of all backups (recovery points) in a paginated format that were included in the search job.

If a search does not display an expected backup in the results, you can call this operation to display each backup included in the search. Any backups that were not included because they have a FAILED status from a permissions issue will be displayed, along with a status message.

Only recovery points with a backup index that has a status of ACTIVE will be included in search results. If the index has any other status, its status will be displayed along with a status message.

" + }, + "ListSearchJobResults":{ + "name":"ListSearchJobResults", + "http":{ + "method":"GET", + "requestUri":"/search-jobs/{SearchJobIdentifier}/search-results", + "responseCode":200 + }, + "input":{"shape":"ListSearchJobResultsInput"}, + "output":{"shape":"ListSearchJobResultsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation returns a list of a specified search job.

" + }, + "ListSearchJobs":{ + "name":"ListSearchJobs", + "http":{ + "method":"GET", + "requestUri":"/search-jobs", + "responseCode":200 + }, + "input":{"shape":"ListSearchJobsInput"}, + "output":{"shape":"ListSearchJobsOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation returns a list of search jobs belonging to an account.

" + }, + "ListSearchResultExportJobs":{ + "name":"ListSearchResultExportJobs", + "http":{ + "method":"GET", + "requestUri":"/export-search-jobs", + "responseCode":200 + }, + "input":{"shape":"ListSearchResultExportJobsInput"}, + "output":{"shape":"ListSearchResultExportJobsOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

This operation exports search results of a search job to a specified destination S3 bucket.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation returns the tags for a resource type.

" + }, + "StartSearchJob":{ + "name":"StartSearchJob", + "http":{ + "method":"PUT", + "requestUri":"/search-jobs", + "responseCode":200 + }, + "input":{"shape":"StartSearchJobInput"}, + "output":{"shape":"StartSearchJobOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

This operation creates a search job which returns recovery points filtered by SearchScope and items filtered by ItemFilters.

You can optionally include ClientToken, EncryptionKeyArn, Name, and/or Tags.

", + "idempotent":true + }, + "StartSearchResultExportJob":{ + "name":"StartSearchResultExportJob", + "http":{ + "method":"PUT", + "requestUri":"/export-search-jobs", + "responseCode":200 + }, + "input":{"shape":"StartSearchResultExportJobInput"}, + "output":{"shape":"StartSearchResultExportJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"} + ], + "documentation":"

This operations starts a job to export the results of search job to a designated S3 bucket.

", + "idempotent":true + }, + "StopSearchJob":{ + "name":"StopSearchJob", + "http":{ + "method":"PUT", + "requestUri":"/search-jobs/{SearchJobIdentifier}/actions/cancel", + "responseCode":200 + }, + "input":{"shape":"StopSearchJobInput"}, + "output":{"shape":"StopSearchJobOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

This operations ends a search job.

Only a search job with a status of RUNNING can be stopped.

", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation puts tags on the resource you indicate.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{ResourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

This operation removes tags from the specified resource.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

User does not have sufficient access to perform this action.

" + } + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "BackupCreationTimeFilter":{ + "type":"structure", + "members":{ + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

This timestamp includes recovery points only created after the specified time.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

This timestamp includes recovery points only created before the specified time.

" + } + }, + "documentation":"

This filters by recovery points within the CreatedAfter and CreatedBefore timestamps.

" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Updating or deleting a resource can cause an inconsistent state.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

Identifier of the resource affected.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Type of the resource affected.

" + } + }, + "documentation":"

This exception occurs when a conflict with a previous successful operation is detected. This generally occurs when the previous operation did not have time to propagate to the host serving the current request.

A retry (with appropriate backoff logic) is the recommended response to this exception.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CurrentSearchProgress":{ + "type":"structure", + "members":{ + "RecoveryPointsScannedCount":{ + "shape":"Integer", + "documentation":"

This number is the sum of all backups that have been scanned so far during a search job.

" + }, + "ItemsScannedCount":{ + "shape":"Long", + "documentation":"

This number is the sum of all items that have been scanned so far during a search job.

" + }, + "ItemsMatchedCount":{ + "shape":"Long", + "documentation":"

This number is the sum of all items that match the item filters in a search job in progress.

" + } + }, + "documentation":"

This contains information results retrieved from a search job that may not have completed.

" + }, + "EBSItemFilter":{ + "type":"structure", + "members":{ + "FilePaths":{ + "shape":"StringConditionList", + "documentation":"

You can include 1 to 10 values.

If one file path is included, the results will return only items that match the file path.

If more than one file path is included, the results will return all items that match any of the file paths.

" + }, + "Sizes":{ + "shape":"LongConditionList", + "documentation":"

You can include 1 to 10 values.

If one is included, the results will return only items that match.

If more than one is included, the results will return all items that match any of the included values.

" + }, + "CreationTimes":{ + "shape":"TimeConditionList", + "documentation":"

You can include 1 to 10 values.

If one is included, the results will return only items that match.

If more than one is included, the results will return all items that match any of the included values.

" + }, + "LastModificationTimes":{ + "shape":"TimeConditionList", + "documentation":"

You can include 1 to 10 values.

If one is included, the results will return only items that match.

If more than one is included, the results will return all items that match any of the included values.

" + } + }, + "documentation":"

This contains arrays of objects, which may include CreationTimes time condition objects, FilePaths string objects, LastModificationTimes time condition objects,

" + }, + "EBSItemFilters":{ + "type":"list", + "member":{"shape":"EBSItemFilter"}, + "max":10, + "min":0 + }, + "EBSResultItem":{ + "type":"structure", + "members":{ + "BackupResourceArn":{ + "shape":"String", + "documentation":"

These are one or more items in the results that match values for the Amazon Resource Name (ARN) of recovery points returned in a search of Amazon EBS backup metadata.

" + }, + "SourceResourceArn":{ + "shape":"String", + "documentation":"

These are one or more items in the results that match values for the Amazon Resource Name (ARN) of source resources returned in a search of Amazon EBS backup metadata.

" + }, + "BackupVaultName":{ + "shape":"String", + "documentation":"

The name of the backup vault.

" + }, + "FileSystemIdentifier":{ + "shape":"String", + "documentation":"

These are one or more items in the results that match values for file systems returned in a search of Amazon EBS backup metadata.

" + }, + "FilePath":{ + "shape":"FilePath", + "documentation":"

These are one or more items in the results that match values for file paths returned in a search of Amazon EBS backup metadata.

" + }, + "FileSize":{ + "shape":"Long", + "documentation":"

These are one or more items in the results that match values for file sizes returned in a search of Amazon EBS backup metadata.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

These are one or more items in the results that match values for creation times returned in a search of Amazon EBS backup metadata.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

These are one or more items in the results that match values for Last Modified Time returned in a search of Amazon EBS backup metadata.

" + } + }, + "documentation":"

These are the items returned in the results of a search of Amazon EBS backup metadata.

" + }, + "EncryptionKeyArn":{"type":"string"}, + "ExportJobArn":{"type":"string"}, + "ExportJobStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "FAILED", + "COMPLETED" + ] + }, + "ExportJobSummaries":{ + "type":"list", + "member":{"shape":"ExportJobSummary"} + }, + "ExportJobSummary":{ + "type":"structure", + "required":["ExportJobIdentifier"], + "members":{ + "ExportJobIdentifier":{ + "shape":"GenericId", + "documentation":"

This is the unique string that identifies a specific export job.

" + }, + "ExportJobArn":{ + "shape":"ExportJobArn", + "documentation":"

This is the unique ARN (Amazon Resource Name) that belongs to the new export job.

" + }, + "Status":{ + "shape":"ExportJobStatus", + "documentation":"

The status of the export job is one of the following:

CREATED; RUNNING; FAILED; or COMPLETED.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

This is a timestamp of the time the export job was created.

" + }, + "CompletionTime":{ + "shape":"Timestamp", + "documentation":"

This is a timestamp of the time the export job compeleted.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

A status message is a string that is returned for an export job.

A status message is included for any status other than COMPLETED without issues.

" + }, + "SearchJobArn":{ + "shape":"SearchJobArn", + "documentation":"

The unique string that identifies the Amazon Resource Name (ARN) of the specified search job.

" + } + }, + "documentation":"

This is the summary of an export job.

" + }, + "ExportSpecification":{ + "type":"structure", + "members":{ + "s3ExportSpecification":{ + "shape":"S3ExportSpecification", + "documentation":"

This specifies the destination Amazon S3 bucket for the export job. And, if included, it also specifies the destination prefix.

" + } + }, + "documentation":"

This contains the export specification object.

", + "union":true + }, + "FilePath":{ + "type":"string", + "sensitive":true + }, + "GenericId":{"type":"string"}, + "GetSearchJobInput":{ + "type":"structure", + "required":["SearchJobIdentifier"], + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

Required unique string that specifies the search job.

", + "location":"uri", + "locationName":"SearchJobIdentifier" + } + } + }, + "GetSearchJobOutput":{ + "type":"structure", + "required":[ + "Status", + "SearchScope", + "ItemFilters", + "CreationTime", + "SearchJobIdentifier", + "SearchJobArn" + ], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

Returned name of the specified search job.

" + }, + "SearchScopeSummary":{ + "shape":"SearchScopeSummary", + "documentation":"

Returned summary of the specified search job scope, including:

  • TotalBackupsToScanCount, the number of recovery points returned by the search.

  • TotalItemsToScanCount, the number of items returned by the search.

" + }, + "CurrentSearchProgress":{ + "shape":"CurrentSearchProgress", + "documentation":"

Returns numbers representing BackupsScannedCount, ItemsScanned, and ItemsMatched.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

A status message will be returned for either a earch job with a status of ERRORED or a status of COMPLETED jobs with issues.

For example, a message may say that a search contained recovery points unable to be scanned because of a permissions issue.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The encryption key for the specified search job.

Example: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab.

" + }, + "CompletionTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that a search job completed, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "Status":{ + "shape":"SearchJobState", + "documentation":"

The current status of the specified search job.

A search job may have one of the following statuses: RUNNING; COMPLETED; STOPPED; FAILED; TIMED_OUT; or EXPIRED .

" + }, + "SearchScope":{ + "shape":"SearchScope", + "documentation":"

The search scope is all backup properties input into a search.

" + }, + "ItemFilters":{ + "shape":"ItemFilters", + "documentation":"

Item Filters represent all input item properties specified when the search was created.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that a search job was created, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that identifies the specified search job.

" + }, + "SearchJobArn":{ + "shape":"SearchJobArn", + "documentation":"

The unique string that identifies the Amazon Resource Name (ARN) of the specified search job.

" + } + } + }, + "GetSearchResultExportJobInput":{ + "type":"structure", + "required":["ExportJobIdentifier"], + "members":{ + "ExportJobIdentifier":{ + "shape":"GenericId", + "documentation":"

This is the unique string that identifies a specific export job.

Required for this operation.

", + "location":"uri", + "locationName":"ExportJobIdentifier" + } + } + }, + "GetSearchResultExportJobOutput":{ + "type":"structure", + "required":["ExportJobIdentifier"], + "members":{ + "ExportJobIdentifier":{ + "shape":"GenericId", + "documentation":"

This is the unique string that identifies the specified export job.

" + }, + "ExportJobArn":{ + "shape":"ExportJobArn", + "documentation":"

The unique Amazon Resource Name (ARN) that uniquely identifies the export job.

" + }, + "Status":{ + "shape":"ExportJobStatus", + "documentation":"

This is the current status of the export job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that an export job was created, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "CompletionTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that an export job completed, in Unix format and Coordinated Universal Time (UTC). The value of CreationTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

A status message is a string that is returned for search job with a status of FAILED, along with steps to remedy and retry the operation.

" + }, + "ExportSpecification":{ + "shape":"ExportSpecification", + "documentation":"

The export specification consists of the destination S3 bucket to which the search results were exported, along with the destination prefix.

" + }, + "SearchJobArn":{ + "shape":"SearchJobArn", + "documentation":"

The unique string that identifies the Amazon Resource Name (ARN) of the specified search job.

" + } + } + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:(?:aws|aws-cn|aws-us-gov):iam::[a-z0-9-]+:role/(.+)" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Unexpected error during processing of request.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Retry the call after number of seconds.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

An internal server error occurred. Retry your request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ItemFilters":{ + "type":"structure", + "members":{ + "S3ItemFilters":{ + "shape":"S3ItemFilters", + "documentation":"

This array can contain CreationTimes, ETags, ObjectKeys, Sizes, or VersionIds objects.

" + }, + "EBSItemFilters":{ + "shape":"EBSItemFilters", + "documentation":"

This array can contain CreationTimes, FilePaths, LastModificationTimes, or Sizes objects.

" + } + }, + "documentation":"

Item Filters represent all input item properties specified when the search was created.

Contains either EBSItemFilters or S3ItemFilters

" + }, + "ListSearchJobBackupsInput":{ + "type":"structure", + "required":["SearchJobIdentifier"], + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

", + "location":"uri", + "locationName":"SearchJobIdentifier" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned backups included in a search job.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"ListSearchJobBackupsInputMaxResultsInteger", + "documentation":"

The maximum number of resource list items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSearchJobBackupsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListSearchJobBackupsOutput":{ + "type":"structure", + "required":["Results"], + "members":{ + "Results":{ + "shape":"SearchJobBackupsResults", + "documentation":"

The recovery points returned the results of a search job

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned backups included in a search job.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListSearchJobResultsInput":{ + "type":"structure", + "required":["SearchJobIdentifier"], + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

", + "location":"uri", + "locationName":"SearchJobIdentifier" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned search job results.

For example, if a request is made to return MaxResults number of search job results, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"ListSearchJobResultsInputMaxResultsInteger", + "documentation":"

The maximum number of resource list items to be returned.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListSearchJobResultsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListSearchJobResultsOutput":{ + "type":"structure", + "required":["Results"], + "members":{ + "Results":{ + "shape":"Results", + "documentation":"

The results consist of either EBSResultItem or S3ResultItem.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of search job results.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListSearchJobsInput":{ + "type":"structure", + "members":{ + "ByStatus":{ + "shape":"SearchJobState", + "documentation":"

Include this parameter to filter list by search job status.

", + "location":"querystring", + "locationName":"Status" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned search jobs.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListSearchJobsInputMaxResultsInteger", + "documentation":"

The maximum number of resource list items to be returned.

", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListSearchJobsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListSearchJobsOutput":{ + "type":"structure", + "required":["SearchJobs"], + "members":{ + "SearchJobs":{ + "shape":"SearchJobs", + "documentation":"

The search jobs among the list, with details of the returned search jobs.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned backups included in a search job.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListSearchResultExportJobsInput":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"ExportJobStatus", + "documentation":"

The search jobs to be included in the export job can be filtered by including this parameter.

", + "location":"querystring", + "locationName":"Status" + }, + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

", + "location":"querystring", + "locationName":"SearchJobIdentifier" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned backups included in a search job.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

", + "location":"querystring", + "locationName":"NextToken" + }, + "MaxResults":{ + "shape":"ListSearchResultExportJobsInputMaxResultsInteger", + "documentation":"

The maximum number of resource list items to be returned.

", + "location":"querystring", + "locationName":"MaxResults" + } + } + }, + "ListSearchResultExportJobsInputMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListSearchResultExportJobsOutput":{ + "type":"structure", + "required":["ExportJobs"], + "members":{ + "ExportJobs":{ + "shape":"ExportJobSummaries", + "documentation":"

The operation returns the included export jobs.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The next item following a partial list of returned backups included in a search job.

For example, if a request is made to return MaxResults number of backups, NextToken allows you to return more items in your list starting at the location pointed to by the next token.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the resource.>

", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

List of tags returned by the operation.

" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "LongCondition":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{ + "shape":"Long", + "documentation":"

The value of an item included in one of the search item filters.

" + }, + "Operator":{ + "shape":"LongConditionOperator", + "documentation":"

A string that defines what values will be returned.

If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values.

" + } + }, + "documentation":"

The long condition contains a Value and can optionally contain an Operator.

" + }, + "LongConditionList":{ + "type":"list", + "member":{"shape":"LongCondition"}, + "max":10, + "min":1 + }, + "LongConditionOperator":{ + "type":"string", + "enum":[ + "EQUALS_TO", + "NOT_EQUALS_TO", + "LESS_THAN_EQUAL_TO", + "GREATER_THAN_EQUAL_TO" + ] + }, + "ObjectKey":{ + "type":"string", + "sensitive":true + }, + "RecoveryPoint":{"type":"string"}, + "RecoveryPointArnList":{ + "type":"list", + "member":{"shape":"RecoveryPoint"}, + "max":50, + "min":0 + }, + "ResourceArnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":50, + "min":0 + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Request references a resource which does not exist.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

Hypothetical identifier of the resource affected.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Hypothetical type of the resource affected.

" + } + }, + "documentation":"

The resource was not found for this request.

Confirm the resource information, such as the ARN or type is correct and exists, then retry the request.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "S3", + "EBS" + ] + }, + "ResourceTypeList":{ + "type":"list", + "member":{"shape":"ResourceType"}, + "max":1, + "min":1 + }, + "ResultItem":{ + "type":"structure", + "members":{ + "S3ResultItem":{ + "shape":"S3ResultItem", + "documentation":"

These are items returned in the search results of an Amazon S3 search.

" + }, + "EBSResultItem":{ + "shape":"EBSResultItem", + "documentation":"

These are items returned in the search results of an Amazon EBS search.

" + } + }, + "documentation":"

This is an object representing the item returned in the results of a search for a specific resource type.

", + "union":true + }, + "Results":{ + "type":"list", + "member":{"shape":"ResultItem"} + }, + "S3ExportSpecification":{ + "type":"structure", + "required":["DestinationBucket"], + "members":{ + "DestinationBucket":{ + "shape":"String", + "documentation":"

This specifies the destination Amazon S3 bucket for the export job.

" + }, + "DestinationPrefix":{ + "shape":"String", + "documentation":"

This specifies the prefix for the destination Amazon S3 bucket for the export job.

" + } + }, + "documentation":"

This specification contains a required string of the destination bucket; optionally, you can include the destination prefix.

" + }, + "S3ItemFilter":{ + "type":"structure", + "members":{ + "ObjectKeys":{ + "shape":"StringConditionList", + "documentation":"

You can include 1 to 10 values.

If one value is included, the results will return only items that match the value.

If more than one value is included, the results will return all items that match any of the values.

" + }, + "Sizes":{ + "shape":"LongConditionList", + "documentation":"

You can include 1 to 10 values.

If one value is included, the results will return only items that match the value.

If more than one value is included, the results will return all items that match any of the values.

" + }, + "CreationTimes":{ + "shape":"TimeConditionList", + "documentation":"

You can include 1 to 10 values.

If one value is included, the results will return only items that match the value.

If more than one value is included, the results will return all items that match any of the values.

" + }, + "VersionIds":{ + "shape":"StringConditionList", + "documentation":"

You can include 1 to 10 values.

If one value is included, the results will return only items that match the value.

If more than one value is included, the results will return all items that match any of the values.

" + }, + "ETags":{ + "shape":"StringConditionList", + "documentation":"

You can include 1 to 10 values.

If one value is included, the results will return only items that match the value.

If more than one value is included, the results will return all items that match any of the values.

" + } + }, + "documentation":"

This contains arrays of objects, which may include ObjectKeys, Sizes, CreationTimes, VersionIds, and/or Etags.

" + }, + "S3ItemFilters":{ + "type":"list", + "member":{"shape":"S3ItemFilter"}, + "max":10, + "min":0 + }, + "S3ResultItem":{ + "type":"structure", + "members":{ + "BackupResourceArn":{ + "shape":"String", + "documentation":"

These are items in the returned results that match recovery point Amazon Resource Names (ARN) input during a search of Amazon S3 backup metadata.

" + }, + "SourceResourceArn":{ + "shape":"String", + "documentation":"

These are items in the returned results that match source Amazon Resource Names (ARN) input during a search of Amazon S3 backup metadata.

" + }, + "BackupVaultName":{ + "shape":"String", + "documentation":"

The name of the backup vault.

" + }, + "ObjectKey":{ + "shape":"ObjectKey", + "documentation":"

This is one or more items returned in the results of a search of Amazon S3 backup metadata that match the values input for object key.

" + }, + "ObjectSize":{ + "shape":"Long", + "documentation":"

These are items in the returned results that match values for object size(s) input during a search of Amazon S3 backup metadata.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

These are one or more items in the returned results that match values for item creation time input during a search of Amazon S3 backup metadata.

" + }, + "ETag":{ + "shape":"String", + "documentation":"

These are one or more items in the returned results that match values for ETags input during a search of Amazon S3 backup metadata.

" + }, + "VersionId":{ + "shape":"String", + "documentation":"

These are one or more items in the returned results that match values for version IDs input during a search of Amazon S3 backup metadata.

" + } + }, + "documentation":"

These are the items returned in the results of a search of Amazon S3 backup metadata.

" + }, + "SearchJobArn":{"type":"string"}, + "SearchJobBackupsResult":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"SearchJobState", + "documentation":"

This is the status of the search job backup result.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

This is the status message included with the results.

" + }, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

This is the resource type of the search.

" + }, + "BackupResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the backup resources.

" + }, + "SourceResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the source resources.

" + }, + "IndexCreationTime":{ + "shape":"Timestamp", + "documentation":"

This is the creation time of the backup index.

" + }, + "BackupCreationTime":{ + "shape":"Timestamp", + "documentation":"

This is the creation time of the backup (recovery point).

" + } + }, + "documentation":"

This contains the information about recovery points returned in results of a search job.

" + }, + "SearchJobBackupsResults":{ + "type":"list", + "member":{"shape":"SearchJobBackupsResult"} + }, + "SearchJobState":{ + "type":"string", + "enum":[ + "RUNNING", + "COMPLETED", + "STOPPING", + "STOPPED", + "FAILED" + ] + }, + "SearchJobSummary":{ + "type":"structure", + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

" + }, + "SearchJobArn":{ + "shape":"SearchJobArn", + "documentation":"

The unique string that identifies the Amazon Resource Name (ARN) of the specified search job.

" + }, + "Name":{ + "shape":"String", + "documentation":"

This is the name of the search job.

" + }, + "Status":{ + "shape":"SearchJobState", + "documentation":"

This is the status of the search job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

This is the creation time of the search job.

" + }, + "CompletionTime":{ + "shape":"Timestamp", + "documentation":"

This is the completion time of the search job.

" + }, + "SearchScopeSummary":{ + "shape":"SearchScopeSummary", + "documentation":"

Returned summary of the specified search job scope, including:

  • TotalBackupsToScanCount, the number of recovery points returned by the search.

  • TotalItemsToScanCount, the number of items returned by the search.

" + }, + "StatusMessage":{ + "shape":"String", + "documentation":"

A status message will be returned for either a earch job with a status of ERRORED or a status of COMPLETED jobs with issues.

For example, a message may say that a search contained recovery points unable to be scanned because of a permissions issue.

" + } + }, + "documentation":"

This is information pertaining to a search job.

" + }, + "SearchJobs":{ + "type":"list", + "member":{"shape":"SearchJobSummary"} + }, + "SearchScope":{ + "type":"structure", + "required":["BackupResourceTypes"], + "members":{ + "BackupResourceTypes":{ + "shape":"ResourceTypeList", + "documentation":"

The resource types included in a search.

Eligible resource types include S3 and EBS.

" + }, + "BackupResourceCreationTime":{ + "shape":"BackupCreationTimeFilter", + "documentation":"

This is the time a backup resource was created.

" + }, + "SourceResourceArns":{ + "shape":"ResourceArnList", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the source resources.

" + }, + "BackupResourceArns":{ + "shape":"RecoveryPointArnList", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the backup resources.

" + }, + "BackupResourceTags":{ + "shape":"TagMap", + "documentation":"

These are one or more tags on the backup (recovery point).

" + } + }, + "documentation":"

The search scope is all backup properties input into a search.

" + }, + "SearchScopeSummary":{ + "type":"structure", + "members":{ + "TotalRecoveryPointsToScanCount":{ + "shape":"Integer", + "documentation":"

This is the count of the total number of backups that will be scanned in a search.

" + }, + "TotalItemsToScanCount":{ + "shape":"Long", + "documentation":"

This is the count of the total number of items that will be scanned in a search.

" + } + }, + "documentation":"

The summary of the specified search job scope, including:

  • TotalBackupsToScanCount, the number of recovery points returned by the search.

  • TotalItemsToScanCount, the number of items returned by the search.

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

This request was not successful due to a service quota exceeding limits.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

Identifier of the resource.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Type of resource.

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

This is the code unique to the originating service with the quota.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

This is the code specific to the quota type.

" + } + }, + "documentation":"

The request denied due to exceeding the quota limits permitted.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "StartSearchJobInput":{ + "type":"structure", + "required":["SearchScope"], + "members":{ + "Tags":{ + "shape":"TagMap", + "documentation":"

List of tags returned by the operation.

" + }, + "Name":{ + "shape":"StartSearchJobInputNameString", + "documentation":"

Include alphanumeric characters to create a name for this search job.

" + }, + "EncryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The encryption key for the specified search job.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Include this parameter to allow multiple identical calls for idempotency.

A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request.

" + }, + "SearchScope":{ + "shape":"SearchScope", + "documentation":"

This object can contain BackupResourceTypes, BackupResourceArns, BackupResourceCreationTime, BackupResourceTags, and SourceResourceArns to filter the recovery points returned by the search job.

" + }, + "ItemFilters":{ + "shape":"ItemFilters", + "documentation":"

Item Filters represent all input item properties specified when the search was created.

Contains either EBSItemFilters or S3ItemFilters

" + } + } + }, + "StartSearchJobInputNameString":{ + "type":"string", + "max":500, + "min":0 + }, + "StartSearchJobOutput":{ + "type":"structure", + "members":{ + "SearchJobArn":{ + "shape":"SearchJobArn", + "documentation":"

The unique string that identifies the Amazon Resource Name (ARN) of the specified search job.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The date and time that a job was created, in Unix format and Coordinated Universal Time (UTC). The value of CompletionTime is accurate to milliseconds. For example, the value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM.

" + }, + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

" + } + } + }, + "StartSearchResultExportJobInput":{ + "type":"structure", + "required":[ + "SearchJobIdentifier", + "ExportSpecification" + ], + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

" + }, + "ExportSpecification":{ + "shape":"ExportSpecification", + "documentation":"

This specification contains a required string of the destination bucket; optionally, you can include the destination prefix.

" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Include this parameter to allow multiple identical calls for idempotency.

A client token is valid for 8 hours after the first request that uses it is completed. After this time, any request with the same token is treated as a new request.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /.

" + }, + "RoleArn":{ + "shape":"IamRoleArn", + "documentation":"

This parameter specifies the role ARN used to start the search results export jobs.

" + } + } + }, + "StartSearchResultExportJobOutput":{ + "type":"structure", + "required":["ExportJobIdentifier"], + "members":{ + "ExportJobArn":{ + "shape":"ExportJobArn", + "documentation":"

This is the unique ARN (Amazon Resource Name) that belongs to the new export job.

" + }, + "ExportJobIdentifier":{ + "shape":"GenericId", + "documentation":"

This is the unique identifier that specifies the new export job.

" + } + } + }, + "StopSearchJobInput":{ + "type":"structure", + "required":["SearchJobIdentifier"], + "members":{ + "SearchJobIdentifier":{ + "shape":"GenericId", + "documentation":"

The unique string that specifies the search job.

", + "location":"uri", + "locationName":"SearchJobIdentifier" + } + } + }, + "StopSearchJobOutput":{ + "type":"structure", + "members":{ + } + }, + "String":{"type":"string"}, + "StringCondition":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{ + "shape":"String", + "documentation":"

The value of the string.

" + }, + "Operator":{ + "shape":"StringConditionOperator", + "documentation":"

A string that defines what values will be returned.

If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values.

" + } + }, + "documentation":"

This contains the value of the string and can contain one or more operators.

" + }, + "StringConditionList":{ + "type":"list", + "member":{"shape":"StringCondition"}, + "max":10, + "min":1 + }, + "StringConditionOperator":{ + "type":"string", + "enum":[ + "EQUALS_TO", + "NOT_EQUALS_TO", + "CONTAINS", + "DOES_NOT_CONTAIN", + "BEGINS_WITH", + "ENDS_WITH", + "DOES_NOT_BEGIN_WITH", + "DOES_NOT_END_WITH" + ] + }, + "TagKeys":{ + "type":"list", + "member":{"shape":"String"} + }, + "TagMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the resource.

This is the resource that will have the indicated tags.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

Required tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Request was unsuccessful due to request throttling.

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

This is the code unique to the originating service.

" + }, + "quotaCode":{ + "shape":"String", + "documentation":"

This is the code unique to the originating service with the quota.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Retry the call after number of seconds.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The request was denied due to request throttling.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "TimeCondition":{ + "type":"structure", + "required":["Value"], + "members":{ + "Value":{ + "shape":"Timestamp", + "documentation":"

This is the timestamp value of the time condition.

" + }, + "Operator":{ + "shape":"TimeConditionOperator", + "documentation":"

A string that defines what values will be returned.

If this is included, avoid combinations of operators that will return all possible values. For example, including both EQUALS_TO and NOT_EQUALS_TO with a value of 4 will return all values.

" + } + }, + "documentation":"

A time condition denotes a creation time, last modification time, or other time.

" + }, + "TimeConditionList":{ + "type":"list", + "member":{"shape":"TimeCondition"}, + "max":10, + "min":1 + }, + "TimeConditionOperator":{ + "type":"string", + "enum":[ + "EQUALS_TO", + "NOT_EQUALS_TO", + "LESS_THAN_EQUAL_TO", + "GREATER_THAN_EQUAL_TO" + ] + }, + "Timestamp":{"type":"timestamp"}, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the resource where you want to remove tags.

", + "location":"uri", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeys", + "documentation":"

This required parameter contains the tag keys you want to remove from the source.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

The input fails to satisfy the constraints specified by an Amazon service.

" + } + }, + "documentation":"

The input fails to satisfy the constraints specified by a service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

Backup Search

Backup Search is the recovery point and item level search for Backup.

For additional information, see:

" +} diff --git a/botocore/data/backupsearch/2018-05-10/waiters-2.json b/botocore/data/backupsearch/2018-05-10/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/backupsearch/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index bdb73b7e2f..2f2550a1f2 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -1742,6 +1742,11 @@ }, "documentation":"

The properties for a task definition that describes the container and volume definitions of an Amazon ECS task. You can specify which Docker images to use, the required resources, and other configurations related to launching the task definition through an Amazon ECS service or task.

" }, + "EksAnnotationsMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, "EksAttemptContainerDetail":{ "type":"structure", "members":{ @@ -2037,6 +2042,10 @@ "shape":"String", "documentation":"

The path on the container where the volume is mounted.

" }, + "subPath":{ + "shape":"String", + "documentation":"

A sub-path inside the referenced volume instead of its root.

" + }, "readOnly":{ "shape":"Boolean", "documentation":"

If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. The default value is false.

" @@ -2092,9 +2101,32 @@ "labels":{ "shape":"EksLabelsMap", "documentation":"

Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object.

" + }, + "annotations":{ + "shape":"EksAnnotationsMap", + "documentation":"

Key-value pairs used to attach arbitrary, non-identifying metadata to Kubernetes objects. Valid annotation keys have two segments: an optional prefix and a name, separated by a slash (/).

  • The prefix is optional and must be 253 characters or less. If specified, the prefix must be a DNS subdomain− a series of DNS labels separated by dots (.), and it must end with a slash (/).

  • The name segment is required and must be 63 characters or less. It can include alphanumeric characters ([a-z0-9A-Z]), dashes (-), underscores (_), and dots (.), but must begin and end with an alphanumeric character.

Annotation values must be 255 characters or less.

Annotations can be added or modified at any time. Each resource can have multiple annotations.

" + }, + "namespace":{ + "shape":"String", + "documentation":"

The namespace of the Amazon EKS cluster. In Kubernetes, namespaces provide a mechanism for isolating groups of resources within a single cluster. Names of resources need to be unique within a namespace, but not across namespaces. Batch places Batch Job pods in this namespace. If this field is provided, the value can't be empty or null. It must meet the following requirements:

  • 1-63 characters long

  • Can't be set to default

  • Can't start with kube

  • Must match the following regular expression: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$

For more information, see Namespaces in the Kubernetes documentation. This namespace can be different from the kubernetesNamespace set in the compute environment's EksConfiguration, but must have identical role-based access control (RBAC) roles as the compute environment's kubernetesNamespace. For multi-node parallel jobs, the same value must be provided across all the node ranges.

" } }, - "documentation":"

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that a pod runs in or the jobID for a job running in the pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

" + "documentation":"

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that a pod runs in or the jobID for a job running in the pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

" + }, + "EksPersistentVolumeClaim":{ + "type":"structure", + "required":["claimName"], + "members":{ + "claimName":{ + "shape":"String", + "documentation":"

The name of the persistentVolumeClaim bounded to a persistentVolume. For more information, see Persistent Volume Claims in the Kubernetes documentation.

" + }, + "readOnly":{ + "shape":"Boolean", + "documentation":"

An optional boolean value indicating if the mount is read only. Default is false. For more information, see Read Only Mounts in the Kubernetes documentation.

" + } + }, + "documentation":"

A persistentVolumeClaim volume is used to mount a PersistentVolume into a Pod. PersistentVolumeClaims are a way for users to \"claim\" durable storage without knowing the details of the particular cloud environment. See the information about PersistentVolumes in the Kubernetes documentation.

" }, "EksPodProperties":{ "type":"structure", @@ -2275,6 +2307,10 @@ "secret":{ "shape":"EksSecret", "documentation":"

Specifies the configuration of a Kubernetes secret volume. For more information, see secret in the Kubernetes documentation.

" + }, + "persistentVolumeClaim":{ + "shape":"EksPersistentVolumeClaim", + "documentation":"

Specifies the configuration of a Kubernetes persistentVolumeClaim bounded to a persistentVolume. For more information, see Persistent Volume Claims in the Kubernetes documentation.

" } }, "documentation":"

Specifies an Amazon EKS volume for a job definition.

" diff --git a/botocore/data/cleanroomsml/2023-09-06/service-2.json b/botocore/data/cleanroomsml/2023-09-06/service-2.json index f0878885bc..d9804c45da 100644 --- a/botocore/data/cleanroomsml/2023-09-06/service-2.json +++ b/botocore/data/cleanroomsml/2023-09-06/service-2.json @@ -1094,7 +1094,8 @@ "sqlParameters":{ "shape":"ProtectedQuerySQLParameters", "documentation":"

The protected SQL query parameters.

" - } + }, + "sqlComputeConfiguration":{"shape":"ComputeConfiguration"} }, "documentation":"

Defines the Amazon S3 bucket where the seed audience for the generating audience is stored.

" }, @@ -5419,7 +5420,7 @@ }, "dataSource":{ "shape":"ModelInferenceDataSource", - "documentation":"

Defines he data source that is used for the trained model inference job.

" + "documentation":"

Defines the data source that is used for the trained model inference job.

" }, "description":{ "shape":"ResourceDescription", diff --git a/botocore/data/cloudfront/2020-05-31/service-2.json b/botocore/data/cloudfront/2020-05-31/service-2.json index 6f29f51d32..450b373251 100644 --- a/botocore/data/cloudfront/2020-05-31/service-2.json +++ b/botocore/data/cloudfront/2020-05-31/service-2.json @@ -4519,11 +4519,11 @@ }, "OriginReadTimeout":{ "shape":"integer", - "documentation":"

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the origin response timeout. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.

For more information, see Origin Response Timeout in the Amazon CloudFront Developer Guide.

" + "documentation":"

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the origin response timeout. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.

For more information, see Response timeout (custom origins only) in the Amazon CloudFront Developer Guide.

" }, "OriginKeepaliveTimeout":{ "shape":"integer", - "documentation":"

Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.

For more information, see Origin Keep-alive Timeout in the Amazon CloudFront Developer Guide.

" + "documentation":"

Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.

For more information, see Keep-alive timeout (custom origins only) in the Amazon CloudFront Developer Guide.

" } }, "documentation":"

A custom origin. A custom origin is any origin that is not an Amazon S3 bucket, with one exception. An Amazon S3 bucket that is configured with static website hosting is a custom origin.

" @@ -5113,7 +5113,7 @@ }, "DefaultRootObject":{ "shape":"string", - "documentation":"

The object that you want CloudFront to request from your origin (for example, index.html) when a viewer requests the root URL for your distribution (https://www.example.com) instead of an object in your distribution (https://www.example.com/product-description.html). Specifying a default root object avoids exposing the contents of your distribution.

Specify only the object name, for example, index.html. Don't add a / before the object name.

If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.

To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.

To replace the default root object, update the distribution configuration and specify the new object.

For more information about the default root object, see Creating a Default Root Object in the Amazon CloudFront Developer Guide.

" + "documentation":"

When a viewer requests the root URL for your distribution, the default root object is the object that you want CloudFront to request from your origin. For example, if your root URL is https://www.example.com, you can specify CloudFront to return the index.html file as the default root object. You can specify a default root object so that viewers see a specific file or object, instead of another object in your distribution (for example, https://www.example.com/product-description.html). A default root object avoids exposing the contents of your distribution.

You can specify the object name or a path to the object name (for example, index.html or exampleFolderName/index.html). Your string can't begin with a forward slash (/). Only specify the object name or the path to the object.

If you don't want to specify a default root object when you create a distribution, include an empty DefaultRootObject element.

To delete the default root object from an existing distribution, update the distribution configuration and include an empty DefaultRootObject element.

To replace the default root object, update the distribution configuration and specify the new object.

For more information about the default root object, see Specify a default root object in the Amazon CloudFront Developer Guide.

" }, "Origins":{ "shape":"Origins", @@ -12865,6 +12865,14 @@ "VpcOriginId":{ "shape":"string", "documentation":"

The VPC origin ID.

" + }, + "OriginReadTimeout":{ + "shape":"integer", + "documentation":"

Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the origin response timeout. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 30 seconds.

For more information, see Response timeout (custom origins only) in the Amazon CloudFront Developer Guide.

" + }, + "OriginKeepaliveTimeout":{ + "shape":"integer", + "documentation":"

Specifies how long, in seconds, CloudFront persists its connection to the origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't specify otherwise) is 5 seconds.

For more information, see Keep-alive timeout (custom origins only) in the Amazon CloudFront Developer Guide.

" } }, "documentation":"

An Amazon CloudFront VPC origin configuration.

" diff --git a/botocore/data/codepipeline/2015-07-09/service-2.json b/botocore/data/codepipeline/2015-07-09/service-2.json index 9ae1256b9b..f3b79d28d2 100644 --- a/botocore/data/codepipeline/2015-07-09/service-2.json +++ b/botocore/data/codepipeline/2015-07-09/service-2.json @@ -342,7 +342,7 @@ {"shape":"ValidationException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists the rules for the condition.

" + "documentation":"

Lists the rules for the condition. For more information about conditions, see Stage conditions. For more information about rules, see the CodePipeline rule reference.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1394,7 +1394,7 @@ "members":{ "category":{ "shape":"ActionCategory", - "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

  • Source

  • Build

  • Test

  • Deploy

  • Invoke

  • Approval

" + "documentation":"

A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the following values.

  • Source

  • Build

  • Test

  • Deploy

  • Invoke

  • Approval

  • Compute

" }, "owner":{ "shape":"ActionOwner", @@ -1848,7 +1848,7 @@ "documentation":"

The rules that make up the condition.

" } }, - "documentation":"

The condition for the stage. A condition is made up of the rules and the result for the condition.

" + "documentation":"

The condition for the stage. A condition is made up of the rules and the result for the condition. For more information about conditions, see Stage conditions. For more information about rules, see the CodePipeline rule reference.

" }, "ConditionExecution":{ "type":"structure", @@ -2375,7 +2375,7 @@ "members":{ "category":{ "shape":"ActionCategory", - "documentation":"

Defines what kind of action can be taken in the stage. The following are the valid values:

  • Source

  • Build

  • Test

  • Deploy

  • Approval

  • Invoke

" + "documentation":"

Defines what kind of action can be taken in the stage. The following are the valid values:

  • Source

  • Build

  • Test

  • Deploy

  • Approval

  • Invoke

  • Compute

" }, "owner":{ "shape":"ActionTypeOwner", @@ -4426,7 +4426,7 @@ "members":{ "name":{ "shape":"RuleName", - "documentation":"

The name of the rule that is created for the condition, such as CheckAllResults.

" + "documentation":"

The name of the rule that is created for the condition, such as VariableCheck.

" }, "ruleTypeId":{ "shape":"RuleTypeId", @@ -4436,6 +4436,10 @@ "shape":"RuleConfigurationMap", "documentation":"

The action configuration fields for the rule.

" }, + "commands":{ + "shape":"CommandList", + "documentation":"

The shell commands to run with your commands rule in CodePipeline. All commands are supported except multi-line formats. While CodeBuild logs and permissions are used, you do not need to create any resources in CodeBuild.

Using compute time for this action will incur separate charges in CodeBuild.

" + }, "inputArtifacts":{ "shape":"InputArtifactList", "documentation":"

The input artifacts fields for the rule, such as specifying an input file for the rule.

" @@ -4453,7 +4457,7 @@ "documentation":"

The action timeout for the rule.

" } }, - "documentation":"

Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage.

" + "documentation":"

Represents information about the rule to be created for an associated condition. An example would be creating a new rule for an entry condition, such as a rule that checks for a test result before allowing the run to enter the deployment stage. For more information about conditions, see Stage conditions. For more information about rules, see the CodePipeline rule reference.

" }, "RuleDeclarationList":{ "type":"list", diff --git a/botocore/data/ecs/2014-11-13/service-2.json b/botocore/data/ecs/2014-11-13/service-2.json index 8f5a1fdf9a..71db4e50d2 100644 --- a/botocore/data/ecs/2014-11-13/service-2.json +++ b/botocore/data/ecs/2014-11-13/service-2.json @@ -2646,7 +2646,7 @@ }, "maximumPercent":{ "shape":"BoxedInteger", - "documentation":"

If a service is using the rolling update (ECS) deployment type, the maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the REPLICA service scheduler and has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default maximumPercent value for a service using the REPLICA service scheduler is 200%.

The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see Amazon ECS services.

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types, and tasks in the service use the EC2 launch type, the maximum percent value is set to the default value. The maximum percent value is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state.

You can't specify a custom maximumPercent value for a service that uses either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and has tasks that use the EC2 launch type.

If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.

" + "documentation":"

If a service is using the rolling update (ECS) deployment type, the maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the REPLICA service scheduler and has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default maximumPercent value for a service using the REPLICA service scheduler is 200%.

The Amazon ECS scheduler uses this parameter to replace unhealthy tasks by starting replacement tasks first and then stopping the unhealthy tasks, as long as cluster resources for starting replacement tasks are available. For more information about how the scheduler replaces unhealthy tasks, see Amazon ECS services.

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types, and tasks in the service use the EC2 launch type, the maximum percent value is set to the default value. The maximum percent value is used to define the upper limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state.

You can't specify a custom maximumPercent value for a service that uses either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and has tasks that use the EC2 launch type.

If the service uses either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types, and the tasks in the service use the Fargate launch type, the maximum percent value is not used. The value is still returned when describing your service.

" }, "minimumHealthyPercent":{ "shape":"BoxedInteger", @@ -3886,7 +3886,7 @@ }, "cluster":{ "shape":"String", - "documentation":"

The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performanceIf you don't specify a cluster, default is used.

" + "documentation":"

The cluster that hosts the service. This can either be the cluster name or ARN. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. If you don't specify a cluster, default is used.

" }, "status":{ "shape":"ServiceDeploymentStatusList", @@ -4169,7 +4169,7 @@ }, "options":{ "shape":"LogConfigurationOptionsMap", - "documentation":"

The configuration options to send to the log driver.

The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

awslogs-create-group

Required: No

Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

awslogs-region

Required: Yes

Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

awslogs-group

Required: Yes

Make sure to specify a log group that the awslogs log driver sends its log streams to.

awslogs-stream-prefix

Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.

Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

awslogs-datetime-format

Required: No

This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

For more information, see awslogs-datetime-format.

You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

awslogs-multiline-pattern

Required: No

This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

For more information, see awslogs-multiline-pattern.

This option is ignored if awslogs-datetime-format is also configured.

You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

mode

Required: No

Valid values: non-blocking | blocking

This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.

If you use the blocking mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

max-buffer-size

Required: No

Default value: 1m

When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls.

When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "documentation":"

The configuration options to send to the log driver.

The options you can specify depend on the log driver. Some of the options you can specify when you use the awslogs log driver to route logs to Amazon CloudWatch include the following:

awslogs-create-group

Required: No

Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false.

Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group.

awslogs-region

Required: Yes

Specify the Amazon Web Services Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.

awslogs-group

Required: Yes

Make sure to specify a log group that the awslogs log driver sends its log streams to.

awslogs-stream-prefix

Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.

Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id.

If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.

For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.

You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.

awslogs-datetime-format

Required: No

This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.

For more information, see awslogs-datetime-format.

You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

awslogs-multiline-pattern

Required: No

This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages.

For more information, see awslogs-multiline-pattern.

This option is ignored if awslogs-datetime-format is also configured.

You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options.

Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.

mode

Required: No

Valid values: non-blocking | blocking

This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.

If you use the blocking mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.

If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver.

max-buffer-size

Required: No

Default value: 1m

When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.

To route logs using the splunk log router, you need to specify a splunk-token and a splunk-url.

When you use the awsfirelens log router to route logs to an Amazon Web Services Service or Amazon Web Services Partner Network destination for log storage and analytics, you can set the log-driver-buffer-limit option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.

Other options you can specify when using awsfirelens to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the Amazon Web Services Region with region and a name for the log stream with delivery_stream.

When you export logs to Amazon Kinesis Data Streams, you can specify an Amazon Web Services Region with region and a data stream name with stream.

When you export logs to Amazon OpenSearch Service, you can specify options like Name, Host (OpenSearch Service endpoint without protocol), Port, Index, Type, Aws_auth, Aws_region, Suppress_Type_Name, and tls. For more information, see Under the hood: FireLens for Amazon ECS Tasks.

When you export logs to Amazon S3, you can specify the bucket using the bucket option. You can also specify region, total_file_size, upload_timeout, and use_put_object as options.

This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" }, "secretOptions":{ "shape":"SecretList", @@ -4894,6 +4894,10 @@ "runtimePlatform":{ "shape":"RuntimePlatform", "documentation":"

The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.

" + }, + "enableFaultInjection":{ + "shape":"BoxedBoolean", + "documentation":"

Enables fault injection when you register your task definition and allows for fault injection requests to be accepted from the task's containers. The default value is false.

" } } }, @@ -6540,6 +6544,10 @@ "ephemeralStorage":{ "shape":"EphemeralStorage", "documentation":"

The ephemeral storage settings to use for tasks run with the task definition.

" + }, + "enableFaultInjection":{ + "shape":"BoxedBoolean", + "documentation":"

Enables fault injection and allows for fault injection requests to be accepted from the task's containers. The default value is false.

" } }, "documentation":"

The details of a task definition which describes the container and volume definitions of an Amazon Elastic Container Service task. You can specify which Docker images to use, the required resources, and other configurations related to launching the task definition through an Amazon ECS service or task.

" diff --git a/botocore/data/m2/2021-04-28/service-2.json b/botocore/data/m2/2021-04-28/service-2.json index f42b9c245d..129a76a80d 100644 --- a/botocore/data/m2/2021-04-28/service-2.json +++ b/botocore/data/m2/2021-04-28/service-2.json @@ -1228,6 +1228,10 @@ "shape":"EntityName", "documentation":"

The name of the runtime environment. Must be unique within the account.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type required for the runtime environment.

" + }, "preferredMaintenanceWindow":{ "shape":"String50", "documentation":"

Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00.

If you do not provide a value, a random system-generated value will be assigned.

" @@ -1760,6 +1764,10 @@ "shape":"EntityName", "documentation":"

The name of the runtime environment.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type supported by the runtime environment.

" + }, "status":{ "shape":"EnvironmentLifecycle", "documentation":"

The status of the runtime environment

" @@ -2368,6 +2376,10 @@ "shape":"EntityName", "documentation":"

The name of the runtime environment. Must be unique within the account.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type supported by the runtime environment.

" + }, "pendingMaintenance":{ "shape":"PendingMaintenance", "documentation":"

Indicates the pending maintenance scheduled on this environment.

" @@ -3036,6 +3048,13 @@ "max":2000, "min":1 }, + "NetworkType":{ + "type":"string", + "enum":[ + "ipv4", + "dual" + ] + }, "NextToken":{ "type":"string", "pattern":"^\\S{1,2000}$" diff --git a/botocore/data/synthetics/2017-10-11/service-2.json b/botocore/data/synthetics/2017-10-11/service-2.json index e241625299..dc279bb736 100644 --- a/botocore/data/synthetics/2017-10-11/service-2.json +++ b/botocore/data/synthetics/2017-10-11/service-2.json @@ -513,7 +513,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}" + "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:canary:[0-9a-z_\\-]{1,255}" }, "CanaryCodeInput":{ "type":"structure", @@ -1093,7 +1093,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" }, "GetCanaryRequest":{ "type":"structure", @@ -1204,7 +1204,7 @@ "type":"string", "max":128, "min":1, - "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:group:[0-9a-z]+" + "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:group:[0-9a-z]+" }, "GroupIdentifier":{ "type":"string", @@ -1260,7 +1260,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+" + "pattern":"arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+" }, "ListAssociatedGroupsRequest":{ "type":"structure", @@ -1446,7 +1446,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+" + "pattern":"arn:(aws[a-zA-Z-]*)?:synthetics:[a-z]{2,4}(-[a-z]{2,4})?-[a-z]+-\\d{1}:\\d{12}:(canary|group):[0-9a-z_\\-]+" }, "ResourceList":{ "type":"list", @@ -1755,7 +1755,7 @@ }, "BaseCanaryRunId":{ "shape":"String", - "documentation":"

Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run before this update was made, or the value of Id in the CanaryRun from any past run of this canary.

" + "documentation":"

Specifies which canary run to use the screenshots from as the baseline for future visual monitoring with this canary. Valid values are nextrun to use the screenshots from the next run after this update is made, lastrun to use the screenshots from the most recent run before this update was made, or the value of Id in the CanaryRun from a run of this a canary in the past 31 days. If you specify the Id of a canary run older than 31 days, the operation returns a 400 validation exception error..

" } }, "documentation":"

An object that specifies what screenshots to use as a baseline for visual monitoring by this canary. It can optionally also specify parts of the screenshots to ignore during the visual monitoring comparison.

Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2 runtime or later. For more information, see Visual monitoring and Visual monitoring blueprint

" @@ -1784,6 +1784,10 @@ "SecurityGroupIds":{ "shape":"SecurityGroupIds", "documentation":"

The IDs of the security groups for this canary.

" + }, + "Ipv6AllowedForDualStack":{ + "shape":"NullableBoolean", + "documentation":"

Set this to true to allow outbound IPv6 traffic on VPC canaries that are connected to dual-stack subnets. The default is false

" } }, "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" @@ -1802,6 +1806,10 @@ "SecurityGroupIds":{ "shape":"SecurityGroupIds", "documentation":"

The IDs of the security groups for this canary.

" + }, + "Ipv6AllowedForDualStack":{ + "shape":"NullableBoolean", + "documentation":"

Indicates whether this canary allows outbound IPv6 traffic if it is connected to dual-stack subnets.

" } }, "documentation":"

If this canary is to test an endpoint in a VPC, this structure contains information about the subnets and security groups of the VPC endpoint. For more information, see Running a Canary in a VPC.

" diff --git a/tests/functional/endpoint-rules/account/endpoint-tests-1.json b/tests/functional/endpoint-rules/account/endpoint-tests-1.json index ac318cb0f9..640b9eadf6 100644 --- a/tests/functional/endpoint-rules/account/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/account/endpoint-tests-1.json @@ -1,31 +1,50 @@ { "testCases": [ { - "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingName": "account", - "signingRegion": "us-east-1" - } - ] - }, - "url": "https://account.us-east-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "aws-global", + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account-fips.us-east-1.api.aws" } }, @@ -39,6 +58,14 @@ "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account-fips.us-east-1.amazonaws.com" } }, @@ -52,6 +79,14 @@ "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, "url": "https://account.us-east-1.api.aws" } }, @@ -69,7 +104,6 @@ "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "us-east-1" } ] @@ -84,75 +118,76 @@ } }, { - "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "cn-northwest-1" } ] }, - "url": "https://account.cn-northwest-1.amazonaws.com.cn" + "url": "https://account-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "aws-cn-global", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://account-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account-fips.cn-north-1.amazonaws.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://account-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account.cn-north-1.api.amazonwebservices.com.cn" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://account.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingName": "account", "signingRegion": "cn-northwest-1" } ] @@ -161,59 +196,91 @@ } }, "params": { - "Region": "cn-north-1", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account-fips.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account-fips.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account-fips.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://account.us-gov-east-1.api.aws" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": true } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://account.us-gov-east-1.amazonaws.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-gov-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } @@ -233,6 +300,14 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://account-fips.us-iso-east-1.c2s.ic.gov" } }, @@ -257,6 +332,14 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, "url": "https://account.us-iso-east-1.c2s.ic.gov" } }, @@ -281,6 +364,14 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://account-fips.us-isob-east-1.sc2s.sgov.gov" } }, @@ -305,6 +396,14 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, "url": "https://account.us-isob-east-1.sc2s.sgov.gov" } }, @@ -315,54 +414,131 @@ } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://account-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-east-1", + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://account.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { + "Region": "eu-isoe-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://account-fips.us-isof-south-1.csp.hci.ic.gov" + } }, "params": { - "Region": "us-east-1", + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://account.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/tests/functional/endpoint-rules/backupsearch/endpoint-tests-1.json b/tests/functional/endpoint-rules/backupsearch/endpoint-tests-1.json new file mode 100644 index 0000000000..5986f9074b --- /dev/null +++ b/tests/functional/endpoint-rules/backupsearch/endpoint-tests-1.json @@ -0,0 +1,313 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://backup-search.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://backup-search-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://backup-search.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://backup-search-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://backup-search.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://backup-search.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://backup-search-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://backup-search.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://backup-search-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "eu-isoe-west-1" + } + ] + }, + "url": "https://backup-search.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://backup-search-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-isof-south-1" + } + ] + }, + "url": "https://backup-search.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file From c34e3c388a9bf03f1860f6616363b7e4246bc8ee Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 17 Dec 2024 19:03:40 +0000 Subject: [PATCH 13/20] Update endpoints model --- botocore/data/endpoints.json | 390 ++++++++++++++++++++++++++++++----- 1 file changed, 340 insertions(+), 50 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 05a28b33ff..1c7d34cccf 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -2229,6 +2229,12 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "athena.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "athena-fips.ca-central-1.amazonaws.com", @@ -10698,37 +10704,81 @@ }, "endpoints" : { "af-south-1" : { - "hostname" : "internetmonitor.af-south-1.api.aws" + "hostname" : "internetmonitor.af-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-east-1" : { - "hostname" : "internetmonitor.ap-east-1.api.aws" + "hostname" : "internetmonitor.ap-east-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-1" : { - "hostname" : "internetmonitor.ap-northeast-1.api.aws" + "hostname" : "internetmonitor.ap-northeast-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-2" : { - "hostname" : "internetmonitor.ap-northeast-2.api.aws" + "hostname" : "internetmonitor.ap-northeast-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-northeast-3" : { - "hostname" : "internetmonitor.ap-northeast-3.api.aws" + "hostname" : "internetmonitor.ap-northeast-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-1" : { - "hostname" : "internetmonitor.ap-south-1.api.aws" + "hostname" : "internetmonitor.ap-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-south-2" : { - "hostname" : "internetmonitor.ap-south-2.api.aws" + "hostname" : "internetmonitor.ap-south-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-1" : { - "hostname" : "internetmonitor.ap-southeast-1.api.aws" + "hostname" : "internetmonitor.ap-southeast-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-2" : { - "hostname" : "internetmonitor.ap-southeast-2.api.aws" + "hostname" : "internetmonitor.ap-southeast-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-3" : { - "hostname" : "internetmonitor.ap-southeast-3.api.aws" + "hostname" : "internetmonitor.ap-southeast-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-4" : { - "hostname" : "internetmonitor.ap-southeast-4.api.aws" + "hostname" : "internetmonitor.ap-southeast-4.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] }, "ap-southeast-5" : { "hostname" : "internetmonitor.ap-southeast-5.api.aws" @@ -10738,52 +10788,108 @@ "variants" : [ { "hostname" : "internetmonitor-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "hostname" : "internetmonitor.ca-west-1.api.aws" }, "eu-central-1" : { - "hostname" : "internetmonitor.eu-central-1.api.aws" + "hostname" : "internetmonitor.eu-central-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-central-2" : { - "hostname" : "internetmonitor.eu-central-2.api.aws" + "hostname" : "internetmonitor.eu-central-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-north-1" : { - "hostname" : "internetmonitor.eu-north-1.api.aws" + "hostname" : "internetmonitor.eu-north-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-1" : { - "hostname" : "internetmonitor.eu-south-1.api.aws" + "hostname" : "internetmonitor.eu-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-south-2" : { - "hostname" : "internetmonitor.eu-south-2.api.aws" + "hostname" : "internetmonitor.eu-south-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-1" : { - "hostname" : "internetmonitor.eu-west-1.api.aws" + "hostname" : "internetmonitor.eu-west-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-2" : { - "hostname" : "internetmonitor.eu-west-2.api.aws" + "hostname" : "internetmonitor.eu-west-2.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] }, "eu-west-3" : { - "hostname" : "internetmonitor.eu-west-3.api.aws" + "hostname" : "internetmonitor.eu-west-3.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] }, "il-central-1" : { "hostname" : "internetmonitor.il-central-1.api.aws" }, "me-central-1" : { - "hostname" : "internetmonitor.me-central-1.api.aws" + "hostname" : "internetmonitor.me-central-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "me-south-1" : { - "hostname" : "internetmonitor.me-south-1.api.aws" + "hostname" : "internetmonitor.me-south-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "sa-east-1" : { - "hostname" : "internetmonitor.sa-east-1.api.aws" + "hostname" : "internetmonitor.sa-east-1.api.aws", + "variants" : [ { + "hostname" : "internetmonitor.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] }, "us-east-1" : { "hostname" : "internetmonitor.us-east-1.api.aws", "variants" : [ { "hostname" : "internetmonitor-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { @@ -10791,6 +10897,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { @@ -10798,6 +10910,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { @@ -10805,6 +10923,12 @@ "variants" : [ { "hostname" : "internetmonitor-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "internetmonitor-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "internetmonitor.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -12392,28 +12516,138 @@ }, "lakeformation" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, - "ca-central-1" : { }, - "ca-west-1" : { }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "lakeformation.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "lakeformation.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.ca-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "lakeformation.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "lakeformation.eu-west-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -12442,32 +12676,76 @@ "deprecated" : true, "hostname" : "lakeformation-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "lakeformation.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "lakeformation.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "lakeformation.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "lakeformation-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "lakeformation-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "lakeformation-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "lakeformation-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "lakeformation-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "lakeformation.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -22318,10 +22596,12 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ca-central-1" : { }, + "ca-west-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -24505,8 +24785,18 @@ }, "lakeformation" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "lakeformation.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "lakeformation.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "lambda" : { From 4c6a3cb4edde904319a31717f08dd9b859fc5ae8 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 17 Dec 2024 19:04:33 +0000 Subject: [PATCH 14/20] Bumping version to 1.35.83 --- .changes/1.35.83.json | 52 +++++++++++++++++++ .../api-change-account-96163.json | 5 -- .../next-release/api-change-backup-34455.json | 5 -- .../api-change-backupsearch-17260.json | 5 -- .../next-release/api-change-batch-5312.json | 5 -- .../api-change-cleanroomsml-63720.json | 5 -- .../api-change-cloudfront-34415.json | 5 -- .../api-change-codepipeline-86495.json | 5 -- .../next-release/api-change-ecs-68973.json | 5 -- .../next-release/api-change-m2-66981.json | 5 -- .../api-change-synthetics-5397.json | 5 -- CHANGELOG.rst | 15 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 14 files changed, 69 insertions(+), 52 deletions(-) create mode 100644 .changes/1.35.83.json delete mode 100644 .changes/next-release/api-change-account-96163.json delete mode 100644 .changes/next-release/api-change-backup-34455.json delete mode 100644 .changes/next-release/api-change-backupsearch-17260.json delete mode 100644 .changes/next-release/api-change-batch-5312.json delete mode 100644 .changes/next-release/api-change-cleanroomsml-63720.json delete mode 100644 .changes/next-release/api-change-cloudfront-34415.json delete mode 100644 .changes/next-release/api-change-codepipeline-86495.json delete mode 100644 .changes/next-release/api-change-ecs-68973.json delete mode 100644 .changes/next-release/api-change-m2-66981.json delete mode 100644 .changes/next-release/api-change-synthetics-5397.json diff --git a/.changes/1.35.83.json b/.changes/1.35.83.json new file mode 100644 index 0000000000..45fb0379d4 --- /dev/null +++ b/.changes/1.35.83.json @@ -0,0 +1,52 @@ +[ + { + "category": "``account``", + "description": "Update endpoint configuration.", + "type": "api-change" + }, + { + "category": "``backup``", + "description": "Add Support for Backup Indexing", + "type": "api-change" + }, + { + "category": "``backupsearch``", + "description": "Add support for searching backups", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "This feature allows AWS Batch on Amazon EKS to support configuration of Pod Annotations, overriding Namespace on which the Batch job's Pod runs on, and allows Subpath and Persistent Volume claim to be set for AWS Batch on Amazon EKS jobs.", + "type": "api-change" + }, + { + "category": "``cleanroomsml``", + "description": "Add support for SQL compute configuration for StartAudienceGenerationJob API.", + "type": "api-change" + }, + { + "category": "``cloudfront``", + "description": "Adds support for OriginReadTimeout and OriginKeepaliveTimeout to create CloudFront Distributions with VPC Origins.", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "AWS CodePipeline V2 type pipelines now support Managed Compute Rule.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Added support for enableFaultInjection task definition parameter which can be used to enable Fault Injection feature on ECS tasks.", + "type": "api-change" + }, + { + "category": "``m2``", + "description": "This release adds support for AWS Mainframe Modernization(M2) Service to allow specifying network type(ipv4, dual) for the environment instances. For dual network type, m2 environment applications will serve both IPv4 and IPv6 requests, whereas for ipv4 it will serve only IPv4 requests.", + "type": "api-change" + }, + { + "category": "``synthetics``", + "description": "Add support to toggle outbound IPv6 traffic on canaries connected to dualstack subnets. This behavior can be controlled via the new Ipv6AllowedForDualStack parameter of the VpcConfig input object in CreateCanary and UpdateCanary APIs.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-account-96163.json b/.changes/next-release/api-change-account-96163.json deleted file mode 100644 index 075e391a7a..0000000000 --- a/.changes/next-release/api-change-account-96163.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``account``", - "description": "Update endpoint configuration." -} diff --git a/.changes/next-release/api-change-backup-34455.json b/.changes/next-release/api-change-backup-34455.json deleted file mode 100644 index 6d5e5fc9d9..0000000000 --- a/.changes/next-release/api-change-backup-34455.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``backup``", - "description": "Add Support for Backup Indexing" -} diff --git a/.changes/next-release/api-change-backupsearch-17260.json b/.changes/next-release/api-change-backupsearch-17260.json deleted file mode 100644 index 070702a841..0000000000 --- a/.changes/next-release/api-change-backupsearch-17260.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``backupsearch``", - "description": "Add support for searching backups" -} diff --git a/.changes/next-release/api-change-batch-5312.json b/.changes/next-release/api-change-batch-5312.json deleted file mode 100644 index 31f5afe1f3..0000000000 --- a/.changes/next-release/api-change-batch-5312.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``batch``", - "description": "This feature allows AWS Batch on Amazon EKS to support configuration of Pod Annotations, overriding Namespace on which the Batch job's Pod runs on, and allows Subpath and Persistent Volume claim to be set for AWS Batch on Amazon EKS jobs." -} diff --git a/.changes/next-release/api-change-cleanroomsml-63720.json b/.changes/next-release/api-change-cleanroomsml-63720.json deleted file mode 100644 index 47e5d9c770..0000000000 --- a/.changes/next-release/api-change-cleanroomsml-63720.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cleanroomsml``", - "description": "Add support for SQL compute configuration for StartAudienceGenerationJob API." -} diff --git a/.changes/next-release/api-change-cloudfront-34415.json b/.changes/next-release/api-change-cloudfront-34415.json deleted file mode 100644 index 9e97799d46..0000000000 --- a/.changes/next-release/api-change-cloudfront-34415.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cloudfront``", - "description": "Adds support for OriginReadTimeout and OriginKeepaliveTimeout to create CloudFront Distributions with VPC Origins." -} diff --git a/.changes/next-release/api-change-codepipeline-86495.json b/.changes/next-release/api-change-codepipeline-86495.json deleted file mode 100644 index 0c18d9c3b2..0000000000 --- a/.changes/next-release/api-change-codepipeline-86495.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``codepipeline``", - "description": "AWS CodePipeline V2 type pipelines now support Managed Compute Rule." -} diff --git a/.changes/next-release/api-change-ecs-68973.json b/.changes/next-release/api-change-ecs-68973.json deleted file mode 100644 index 1318990553..0000000000 --- a/.changes/next-release/api-change-ecs-68973.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ecs``", - "description": "Added support for enableFaultInjection task definition parameter which can be used to enable Fault Injection feature on ECS tasks." -} diff --git a/.changes/next-release/api-change-m2-66981.json b/.changes/next-release/api-change-m2-66981.json deleted file mode 100644 index 8538008340..0000000000 --- a/.changes/next-release/api-change-m2-66981.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``m2``", - "description": "This release adds support for AWS Mainframe Modernization(M2) Service to allow specifying network type(ipv4, dual) for the environment instances. For dual network type, m2 environment applications will serve both IPv4 and IPv6 requests, whereas for ipv4 it will serve only IPv4 requests." -} diff --git a/.changes/next-release/api-change-synthetics-5397.json b/.changes/next-release/api-change-synthetics-5397.json deleted file mode 100644 index dc398e9f8e..0000000000 --- a/.changes/next-release/api-change-synthetics-5397.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``synthetics``", - "description": "Add support to toggle outbound IPv6 traffic on canaries connected to dualstack subnets. This behavior can be controlled via the new Ipv6AllowedForDualStack parameter of the VpcConfig input object in CreateCanary and UpdateCanary APIs." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 75e2b51be4..4b9560642f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,21 @@ CHANGELOG ========= +1.35.83 +======= + +* api-change:``account``: Update endpoint configuration. +* api-change:``backup``: Add Support for Backup Indexing +* api-change:``backupsearch``: Add support for searching backups +* api-change:``batch``: This feature allows AWS Batch on Amazon EKS to support configuration of Pod Annotations, overriding Namespace on which the Batch job's Pod runs on, and allows Subpath and Persistent Volume claim to be set for AWS Batch on Amazon EKS jobs. +* api-change:``cleanroomsml``: Add support for SQL compute configuration for StartAudienceGenerationJob API. +* api-change:``cloudfront``: Adds support for OriginReadTimeout and OriginKeepaliveTimeout to create CloudFront Distributions with VPC Origins. +* api-change:``codepipeline``: AWS CodePipeline V2 type pipelines now support Managed Compute Rule. +* api-change:``ecs``: Added support for enableFaultInjection task definition parameter which can be used to enable Fault Injection feature on ECS tasks. +* api-change:``m2``: This release adds support for AWS Mainframe Modernization(M2) Service to allow specifying network type(ipv4, dual) for the environment instances. For dual network type, m2 environment applications will serve both IPv4 and IPv6 requests, whereas for ipv4 it will serve only IPv4 requests. +* api-change:``synthetics``: Add support to toggle outbound IPv6 traffic on canaries connected to dualstack subnets. This behavior can be controlled via the new Ipv6AllowedForDualStack parameter of the VpcConfig input object in CreateCanary and UpdateCanary APIs. + + 1.35.82 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index d9e3e0f63f..acc42c21d3 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.82' +__version__ = '1.35.83' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 5349359f96..25e3bb5e4a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.82' +release = '1.35.83' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From b2a0102728e3245f5998873203df16d6defed176 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 18 Dec 2024 19:05:02 +0000 Subject: [PATCH 15/20] Update to latest models --- .../api-change-amplify-47495.json | 5 + .../api-change-budgets-45312.json | 5 + .../api-change-connect-85567.json | 5 + .../api-change-connectparticipant-91624.json | 5 + .../api-change-datasync-24714.json | 5 + .../next-release/api-change-iot-57585.json | 5 + .../next-release/api-change-mwaa-81386.json | 5 + .../api-change-quicksight-44739.json | 5 + .../api-change-resiliencehub-9972.json | 5 + .../api-change-transfer-79171.json | 5 + .../data/amplify/2017-07-25/service-2.json | 60 +++- .../2016-10-20/endpoint-rule-set-1.json | 102 ++++++ .../data/connect/2017-08-08/service-2.json | 103 +++++- .../2018-09-07/service-2.json | 145 +++++++- .../data/datasync/2018-11-09/service-2.json | 313 +++++++++++++++++- botocore/data/iot/2015-05-28/service-2.json | 86 ++++- botocore/data/mwaa/2020-07-01/service-2.json | 6 +- .../data/quicksight/2018-04-01/service-2.json | 45 +++ .../resiliencehub/2020-04-30/service-2.json | 52 ++- .../data/transfer/2018-11-05/service-2.json | 51 ++- .../budgets/endpoint-tests-1.json | 66 +++- 21 files changed, 1024 insertions(+), 55 deletions(-) create mode 100644 .changes/next-release/api-change-amplify-47495.json create mode 100644 .changes/next-release/api-change-budgets-45312.json create mode 100644 .changes/next-release/api-change-connect-85567.json create mode 100644 .changes/next-release/api-change-connectparticipant-91624.json create mode 100644 .changes/next-release/api-change-datasync-24714.json create mode 100644 .changes/next-release/api-change-iot-57585.json create mode 100644 .changes/next-release/api-change-mwaa-81386.json create mode 100644 .changes/next-release/api-change-quicksight-44739.json create mode 100644 .changes/next-release/api-change-resiliencehub-9972.json create mode 100644 .changes/next-release/api-change-transfer-79171.json diff --git a/.changes/next-release/api-change-amplify-47495.json b/.changes/next-release/api-change-amplify-47495.json new file mode 100644 index 0000000000..1c7f21adbe --- /dev/null +++ b/.changes/next-release/api-change-amplify-47495.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``amplify``", + "description": "Added WAF Configuration to Amplify Apps" +} diff --git a/.changes/next-release/api-change-budgets-45312.json b/.changes/next-release/api-change-budgets-45312.json new file mode 100644 index 0000000000..08bc6647f9 --- /dev/null +++ b/.changes/next-release/api-change-budgets-45312.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``budgets``", + "description": "Releasing minor partition endpoint updates" +} diff --git a/.changes/next-release/api-change-connect-85567.json b/.changes/next-release/api-change-connect-85567.json new file mode 100644 index 0000000000..d2190b5fda --- /dev/null +++ b/.changes/next-release/api-change-connect-85567.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``connect``", + "description": "This release adds support for the UpdateParticipantAuthentication API used for customer authentication within Amazon Connect chats." +} diff --git a/.changes/next-release/api-change-connectparticipant-91624.json b/.changes/next-release/api-change-connectparticipant-91624.json new file mode 100644 index 0000000000..a4a5e8e253 --- /dev/null +++ b/.changes/next-release/api-change-connectparticipant-91624.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``connectparticipant``", + "description": "This release adds support for the GetAuthenticationUrl and CancelParticipantAuthentication APIs used for customer authentication within Amazon Connect chats. There are also minor updates to the GetAttachment API." +} diff --git a/.changes/next-release/api-change-datasync-24714.json b/.changes/next-release/api-change-datasync-24714.json new file mode 100644 index 0000000000..9da116e9ea --- /dev/null +++ b/.changes/next-release/api-change-datasync-24714.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``datasync``", + "description": "AWS DataSync introduces the ability to update attributes for in-cloud locations." +} diff --git a/.changes/next-release/api-change-iot-57585.json b/.changes/next-release/api-change-iot-57585.json new file mode 100644 index 0000000000..09f87d478c --- /dev/null +++ b/.changes/next-release/api-change-iot-57585.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``iot``", + "description": "Release connectivity status query API which is a dedicated high throughput(TPS) API to query a specific device's most recent connectivity state and metadata." +} diff --git a/.changes/next-release/api-change-mwaa-81386.json b/.changes/next-release/api-change-mwaa-81386.json new file mode 100644 index 0000000000..6b6c47e331 --- /dev/null +++ b/.changes/next-release/api-change-mwaa-81386.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mwaa``", + "description": "Added support for Apache Airflow version 2.10.3 to MWAA." +} diff --git a/.changes/next-release/api-change-quicksight-44739.json b/.changes/next-release/api-change-quicksight-44739.json new file mode 100644 index 0000000000..1807e2639e --- /dev/null +++ b/.changes/next-release/api-change-quicksight-44739.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``quicksight``", + "description": "Add support for PerformanceConfiguration attribute to Dataset entity. Allow PerformanceConfiguration specification in CreateDataset and UpdateDataset APIs." +} diff --git a/.changes/next-release/api-change-resiliencehub-9972.json b/.changes/next-release/api-change-resiliencehub-9972.json new file mode 100644 index 0000000000..db8444a81d --- /dev/null +++ b/.changes/next-release/api-change-resiliencehub-9972.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``resiliencehub``", + "description": "AWS Resilience Hub now automatically detects already configured CloudWatch alarms and FIS experiments as part of the assessment process and returns the discovered resources in the corresponding list API responses. It also allows you to include or exclude test recommendations for an AppComponent." +} diff --git a/.changes/next-release/api-change-transfer-79171.json b/.changes/next-release/api-change-transfer-79171.json new file mode 100644 index 0000000000..5245befcf4 --- /dev/null +++ b/.changes/next-release/api-change-transfer-79171.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``transfer``", + "description": "Added AS2 agreement configurations to control filename preservation and message signing enforcement. Added AS2 connector configuration to preserve content type from S3 objects." +} diff --git a/botocore/data/amplify/2017-07-25/service-2.json b/botocore/data/amplify/2017-07-25/service-2.json index 920f60c5a3..f9ae5dd025 100644 --- a/botocore/data/amplify/2017-07-25/service-2.json +++ b/botocore/data/amplify/2017-07-25/service-2.json @@ -683,11 +683,11 @@ }, "createTime":{ "shape":"CreateTime", - "documentation":"

Creates a date and time for the Amplify app.

" + "documentation":"

A timestamp of when Amplify created the application.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Updates the date and time for the Amplify app.

" + "documentation":"

A timestamp of when Amplify updated the application.

" }, "iamServiceRoleArn":{ "shape":"ServiceRoleArn", @@ -752,6 +752,14 @@ "cacheConfig":{ "shape":"CacheConfig", "documentation":"

The cache configuration for the Amplify app. If you don't specify the cache configuration type, Amplify uses the default AMPLIFY_MANAGED setting.

" + }, + "webhookCreateTime":{ + "shape":"webhookCreateTime", + "documentation":"

A timestamp of when Amplify created the webhook in your Git repository.

" + }, + "wafConfiguration":{ + "shape":"WafConfiguration", + "documentation":"

Describes the Firewall configuration for the Amplify app. Firewall support enables you to protect your hosted applications with a direct integration with WAF.

" } }, "documentation":"

Represents the different branches of a repository for building, deploying, and hosting an Amplify app.

" @@ -1012,11 +1020,11 @@ }, "createTime":{ "shape":"CreateTime", - "documentation":"

The creation date and time for a branch that is part of an Amplify app.

" + "documentation":"

A timestamp of when Amplify created the branch.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

The last updated date and time for a branch that is part of an Amplify app.

" + "documentation":"

A timestamp for the last updated time for a branch.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", @@ -2266,6 +2274,7 @@ "JobStatus":{ "type":"string", "enum":[ + "CREATED", "PENDING", "PROVISIONING", "RUNNING", @@ -2310,7 +2319,7 @@ }, "commitTime":{ "shape":"CommitTime", - "documentation":"

The commit date and time for the job.

" + "documentation":"

The commit date and time for the job.

" }, "startTime":{ "shape":"StartTime", @@ -3540,6 +3549,40 @@ "max":1000 }, "Verified":{"type":"boolean"}, + "WafConfiguration":{ + "type":"structure", + "members":{ + "webAclArn":{ + "shape":"WebAclArn", + "documentation":"

The Amazon Resource Name (ARN) for the web ACL associated with an Amplify app.

" + }, + "wafStatus":{ + "shape":"WafStatus", + "documentation":"

The status of the process to associate or disassociate a web ACL to an Amplify app.

" + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

The reason for the current status of the Firewall configuration.

" + } + }, + "documentation":"

Describes the Firewall configuration for a hosted Amplify application. Firewall support enables you to protect your web applications with a direct integration with WAF. For more information about using WAF protections for an Amplify application, see Firewall support for hosted sites in the Amplify User Guide.

" + }, + "WafStatus":{ + "type":"string", + "enum":[ + "ASSOCIATING", + "ASSOCIATION_FAILED", + "ASSOCIATION_SUCCESS", + "DISASSOCIATING", + "DISASSOCIATION_FAILED" + ] + }, + "WebAclArn":{ + "type":"string", + "max":512, + "min":0, + "pattern":"^arn:aws:wafv2:.*" + }, "Webhook":{ "type":"structure", "required":[ @@ -3574,11 +3617,11 @@ }, "createTime":{ "shape":"CreateTime", - "documentation":"

The create date and time for a webhook.

" + "documentation":"

A timestamp of when Amplify created the webhook in your Git repository.

" }, "updateTime":{ "shape":"UpdateTime", - "documentation":"

Updates the date and time for a webhook.

" + "documentation":"

A timestamp of when Amplify updated the webhook in your Git repository.

" } }, "documentation":"

Describes a webhook that connects repository events to an Amplify app.

" @@ -3599,7 +3642,8 @@ "Webhooks":{ "type":"list", "member":{"shape":"Webhook"} - } + }, + "webhookCreateTime":{"type":"timestamp"} }, "documentation":"

Amplify enables developers to develop and deploy cloud-powered mobile and web apps. Amplify Hosting provides a continuous delivery and hosting service for web applications. For more information, see the Amplify Hosting User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation for client app development. For more information, see the Amplify Framework.

" } diff --git a/botocore/data/budgets/2016-10-20/endpoint-rule-set-1.json b/botocore/data/budgets/2016-10-20/endpoint-rule-set-1.json index 2899674a8a..35ad019a45 100644 --- a/botocore/data/budgets/2016-10-20/endpoint-rule-set-1.json +++ b/botocore/data/budgets/2016-10-20/endpoint-rule-set-1.json @@ -212,6 +212,108 @@ }, "type": "endpoint" }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.c2s.ic.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://budgets.global.sc2s.sgov.gov", + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, { "conditions": [ { diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index 04c3bf33e8..81d1e2e048 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -701,7 +701,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates registration for a device token and a chat contact to receive real-time push notifications. For more information about push notifications, see Set up push notifications in Amazon Connect for mobile chat in the Amazon Connect Administrator Guide.

" + "documentation":"

Creates registration for a device token and a chat contact to receive real-time push notifications. For more information about push notifications, see Set up push notifications in Amazon Connect for mobile chat in the Amazon Connect Administrator Guide.

" }, "CreateQueue":{ "name":"CreateQueue", @@ -2420,7 +2420,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to returns both Amazon Lex V1 and V2 bots.

" + "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

For the specified version of Amazon Lex, returns a paginated list of all the Amazon Lex bots currently associated with the instance. Use this API to return both Amazon Lex V1 and V2 bots.

" }, "ListContactEvaluations":{ "name":"ListContactEvaluations", @@ -4304,6 +4304,24 @@ ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Updates an existing configuration for a resource type. This API is idempotent.

" }, + "UpdateParticipantAuthentication":{ + "name":"UpdateParticipantAuthentication", + "http":{ + "method":"POST", + "requestUri":"/contact/update-participant-authentication" + }, + "input":{"shape":"UpdateParticipantAuthenticationRequest"}, + "output":{"shape":"UpdateParticipantAuthenticationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServiceException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Instructs Amazon Connect to resume the authentication process. The subsequent actions depend on the request body contents:

  • If a code is provided: Connect retrieves the identity information from Amazon Cognito and imports it into Connect Customer Profiles.

  • If an error is provided: The error branch of the Authenticate Customer block is executed.

The API returns a success response to acknowledge the request. However, the interaction and exchange of identity information occur asynchronously after the response is returned.

" + }, "UpdateParticipantRoleConfig":{ "name":"UpdateParticipantRoleConfig", "http":{ @@ -6108,6 +6126,20 @@ "documentation":"

Contains information for score and potential quality issues for Audio

" }, "AudioQualityScore":{"type":"float"}, + "AuthenticationError":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "sensitive":true + }, + "AuthenticationErrorDescription":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^[\\x20-\\x21\\x23-\\x5B\\x5D-\\x7E]*$", + "sensitive":true + }, "AuthenticationProfile":{ "type":"structure", "members":{ @@ -6211,6 +6243,12 @@ "type":"list", "member":{"shape":"AuthenticationProfileSummary"} }, + "AuthorizationCode":{ + "type":"string", + "max":2048, + "min":1, + "sensitive":true + }, "AutoAccept":{"type":"boolean"}, "AvailableNumberSummary":{ "type":"structure", @@ -6880,6 +6918,10 @@ "shape":"WisdomInfo", "documentation":"

Information about Amazon Connect Wisdom.

" }, + "CustomerId":{ + "shape":"CustomerId", + "documentation":"

The customer's identification number. For example, the CustomerId may be a customer number from your CRM. You can create a Lambda function to pull the unique customer ID of the caller from your CRM system. If you enable Amazon Connect Voice ID capability, this attribute is populated with the CustomerSpeakerId of the caller.

" + }, "CustomerEndpoint":{ "shape":"EndpointInfo", "documentation":"

The customer or external third party participant endpoint.

" @@ -9287,6 +9329,17 @@ }, "documentation":"

Information about the Customer on the contact.

" }, + "CustomerId":{ + "type":"string", + "max":128, + "min":0 + }, + "CustomerIdNonEmpty":{ + "type":"string", + "max":128, + "min":1, + "sensitive":true + }, "CustomerProfileAttributesSerialized":{"type":"string"}, "CustomerQualityMetrics":{ "type":"structure", @@ -14465,7 +14518,8 @@ "MULTI_PARTY_CONFERENCE", "HIGH_VOLUME_OUTBOUND", "ENHANCED_CONTACT_MONITORING", - "ENHANCED_CHAT_MONITORING" + "ENHANCED_CHAT_MONITORING", + "MULTI_PARTY_CHAT_CONFERENCE" ] }, "InstanceAttributeValue":{ @@ -14683,7 +14737,8 @@ "FILE_SCANNER", "SES_IDENTITY", "ANALYTICS_CONNECTOR", - "CALL_TRANSFER_CONNECTOR" + "CALL_TRANSFER_CONNECTOR", + "COGNITO_USER_POOL" ] }, "InternalServiceException":{ @@ -22205,6 +22260,10 @@ "SegmentAttributes":{ "shape":"SegmentAttributes", "documentation":"

A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows.

Attribute keys can include only alphanumeric, -, and _.

This field can be used to show channel subtype, such as connect:Guide.

The types application/vnd.amazonaws.connect.message.interactive and application/vnd.amazonaws.connect.message.interactive.response must be present in the SupportedMessagingContentTypes field of this API in order to set SegmentAttributes as { \"connect:Subtype\": {\"valueString\" : \"connect:Guide\" }}.

" + }, + "CustomerId":{ + "shape":"CustomerIdNonEmpty", + "documentation":"

The customer's identification number. For example, the CustomerId may be a customer number from your CRM.

" } } }, @@ -24584,6 +24643,40 @@ "StorageConfig":{"shape":"InstanceStorageConfig"} } }, + "UpdateParticipantAuthenticationRequest":{ + "type":"structure", + "required":[ + "State", + "InstanceId" + ], + "members":{ + "State":{ + "shape":"ParticipantToken", + "documentation":"

The state query parameter that was provided by Cognito in the redirectUri. This will also match the state parameter provided in the AuthenticationUrl from the GetAuthenticationUrl response.

" + }, + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

" + }, + "Code":{ + "shape":"AuthorizationCode", + "documentation":"

The code query parameter provided by Cognito in the redirectUri.

" + }, + "Error":{ + "shape":"AuthenticationError", + "documentation":"

The error query parameter provided by Cognito in the redirectUri.

" + }, + "ErrorDescription":{ + "shape":"AuthenticationErrorDescription", + "documentation":"

The error_description parameter provided by Cognito in the redirectUri.

" + } + } + }, + "UpdateParticipantAuthenticationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateParticipantRoleConfigChannelInfo":{ "type":"structure", "members":{ @@ -26572,7 +26665,7 @@ }, "IvrRecordingTrack":{ "shape":"IvrRecordingTrack", - "documentation":"

Identifies which IVR track is being recorded.

" + "documentation":"

Identifies which IVR track is being recorded.

One and only one of the track configurations should be presented in the request.

" } }, "documentation":"

Contains information about the recording configuration settings.

" diff --git a/botocore/data/connectparticipant/2018-09-07/service-2.json b/botocore/data/connectparticipant/2018-09-07/service-2.json index 41f7f09550..1a82276705 100644 --- a/botocore/data/connectparticipant/2018-09-07/service-2.json +++ b/botocore/data/connectparticipant/2018-09-07/service-2.json @@ -5,14 +5,32 @@ "endpointPrefix":"participant.connect", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"Amazon Connect Participant", "serviceFullName":"Amazon Connect Participant Service", "serviceId":"ConnectParticipant", "signatureVersion":"v4", "signingName":"execute-api", - "uid":"connectparticipant-2018-09-07" + "uid":"connectparticipant-2018-09-07", + "auth":["aws.auth#sigv4"] }, "operations":{ + "CancelParticipantAuthentication":{ + "name":"CancelParticipantAuthentication", + "http":{ + "method":"POST", + "requestUri":"/participant/cancel-authentication" + }, + "input":{"shape":"CancelParticipantAuthenticationRequest"}, + "output":{"shape":"CancelParticipantAuthenticationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Cancels the authentication session. The opted out branch of the Authenticate Customer flow block will be taken.

The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats.

" + }, "CompleteAttachmentUpload":{ "name":"CompleteAttachmentUpload", "http":{ @@ -29,7 +47,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "CreateParticipantConnection":{ "name":"CreateParticipantConnection", @@ -45,7 +63,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Creates the participant's connection.

ParticipantToken is used for invoking this API instead of ConnectionToken.

The participant token is valid for the lifetime of the participant – until they are part of a contact.

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic.

For chat, you need to publish the following on the established websocket connection:

{\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before.

Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Creates the participant's connection.

For security recommendations, see Amazon Connect Chat security best practices.

ParticipantToken is used for invoking this API instead of ConnectionToken.

The participant token is valid for the lifetime of the participant – until they are part of a contact.

The response URL for WEBSOCKET Type has a connect expiry timeout of 100s. Clients must manually connect to the returned websocket URL and subscribe to the desired topic.

For chat, you need to publish the following on the established websocket connection:

{\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}

Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter, clients need to call this API again to obtain a new websocket URL and perform the same steps as before.

Message streaming support: This API can also be used together with the StartContactStreaming API to create a participant connection for chat contacts that are not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect Administrator Guide.

Feature specifications: For information about feature specifications, such as the allowed number of open websocket connections per participant, see Feature specifications in the Amazon Connect Administrator Guide.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "DescribeView":{ "name":"DescribeView", @@ -62,7 +80,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves the view for the specified view token.

" + "documentation":"

Retrieves the view for the specified view token.

For security recommendations, see Amazon Connect Chat security best practices.

" }, "DisconnectParticipant":{ "name":"DisconnectParticipant", @@ -78,7 +96,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Disconnects a participant.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Disconnects a participant.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "GetAttachment":{ "name":"GetAttachment", @@ -94,7 +112,23 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Provides a pre-signed URL for download of a completed attachment. This is an asynchronous API for use with active contacts.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + }, + "GetAuthenticationUrl":{ + "name":"GetAuthenticationUrl", + "http":{ + "method":"POST", + "requestUri":"/participant/authentication-url" + }, + "input":{"shape":"GetAuthenticationUrlRequest"}, + "output":{"shape":"GetAuthenticationUrlResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Retrieves the AuthenticationUrl for the current authentication session for the AuthenticateCustomer flow block.

For security recommendations, see Amazon Connect Chat security best practices.

  • This API can only be called within one minute of receiving the authenticationInitiated event.

  • The current supported channel is chat. This API is not supported for Apple Messages for Business, WhatsApp, or SMS chats.

" }, "GetTranscript":{ "name":"GetTranscript", @@ -110,7 +144,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:

  • application/vnd.amazonaws.connect.event.participant.left

  • application/vnd.amazonaws.connect.event.participant.joined

  • application/vnd.amazonaws.connect.event.chat.ended

  • application/vnd.amazonaws.connect.event.transfer.succeeded

  • application/vnd.amazonaws.connect.event.transfer.failed

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

For security recommendations, see Amazon Connect Chat security best practices.

If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:

  • application/vnd.amazonaws.connect.event.participant.left

  • application/vnd.amazonaws.connect.event.participant.joined

  • application/vnd.amazonaws.connect.event.chat.ended

  • application/vnd.amazonaws.connect.event.transfer.succeeded

  • application/vnd.amazonaws.connect.event.transfer.failed

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "SendEvent":{ "name":"SendEvent", @@ -127,7 +161,7 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"} ], - "documentation":"

The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field.

Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field.

Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "SendMessage":{ "name":"SendMessage", @@ -143,7 +177,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Sends a message.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Sends a message.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "StartAttachmentUpload":{ "name":"StartAttachmentUpload", @@ -160,7 +194,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Provides a pre-signed Amazon S3 URL in response for uploading the file directly to S3.

For security recommendations, see Amazon Connect Chat security best practices.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" } }, "shapes":{ @@ -229,7 +263,36 @@ "type":"list", "member":{"shape":"AttachmentItem"} }, + "AuthenticationUrl":{ + "type":"string", + "max":2083, + "min":1 + }, "Bool":{"type":"boolean"}, + "CancelParticipantAuthenticationRequest":{ + "type":"structure", + "required":[ + "SessionId", + "ConnectionToken" + ], + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The sessionId provided in the authenticationInitiated event.

" + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

The authentication token associated with the participant's connection.

", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "CancelParticipantAuthenticationResponse":{ + "type":"structure", + "members":{ + } + }, "ChatContent":{ "type":"string", "max":16384, @@ -448,11 +511,16 @@ "documentation":"

The authentication token associated with the participant's connection.

", "location":"header", "locationName":"X-Amz-Bearer" + }, + "UrlExpiryInSeconds":{ + "shape":"URLExpiryInSeconds", + "documentation":"

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" } } }, "GetAttachmentResponse":{ "type":"structure", + "required":["AttachmentSizeInBytes"], "members":{ "Url":{ "shape":"PreSignedAttachmentUrl", @@ -461,6 +529,44 @@ "UrlExpiry":{ "shape":"ISO8601Datetime", "documentation":"

The expiration time of the URL in ISO timestamp. It's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example, 2019-11-08T02:41:28.172Z.

" + }, + "AttachmentSizeInBytes":{ + "shape":"AttachmentSizeInBytes", + "documentation":"

The size of the attachment in bytes.

", + "box":true + } + } + }, + "GetAuthenticationUrlRequest":{ + "type":"structure", + "required":[ + "SessionId", + "RedirectUri", + "ConnectionToken" + ], + "members":{ + "SessionId":{ + "shape":"SessionId", + "documentation":"

The sessionId provided in the authenticationInitiated event.

" + }, + "RedirectUri":{ + "shape":"RedirectURI", + "documentation":"

The URL where the customer will be redirected after Amazon Cognito authorizes the user.

" + }, + "ConnectionToken":{ + "shape":"ParticipantToken", + "documentation":"

The authentication token associated with the participant's connection.

", + "location":"header", + "locationName":"X-Amz-Bearer" + } + } + }, + "GetAuthenticationUrlResponse":{ + "type":"structure", + "members":{ + "AuthenticationUrl":{ + "shape":"AuthenticationUrl", + "documentation":"

The URL where the customer will sign in to the identity provider. This URL contains the authorize endpoint for the Cognito UserPool used in the authentication.

" } } }, @@ -681,6 +787,11 @@ "type":"list", "member":{"shape":"Receipt"} }, + "RedirectURI":{ + "type":"string", + "max":1024, + "min":1 + }, "ResourceId":{"type":"string"}, "ResourceNotFoundException":{ "type":"structure", @@ -812,6 +923,11 @@ "error":{"httpStatusCode":402}, "exception":true }, + "SessionId":{ + "type":"string", + "max":36, + "min":36 + }, "SortKey":{ "type":"string", "enum":[ @@ -863,7 +979,7 @@ }, "UploadMetadata":{ "shape":"UploadMetadata", - "documentation":"

Fields to be used while uploading the attachment.

" + "documentation":"

The headers to be provided while uploading the file to the URL.

" } } }, @@ -899,6 +1015,11 @@ "type":"list", "member":{"shape":"Item"} }, + "URLExpiryInSeconds":{ + "type":"integer", + "max":300, + "min":5 + }, "UploadMetadata":{ "type":"structure", "members":{ @@ -1044,5 +1165,5 @@ "documentation":"

The websocket for the participant's connection.

" } }, - "documentation":"

Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat.

Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts.

" + "documentation":"

Amazon Connect is an easy-to-use omnichannel cloud contact center service that enables companies of any size to deliver superior customer service at a lower cost. Amazon Connect communications capabilities make it easy for companies to deliver personalized interactions across communication channels, including chat.

Use the Amazon Connect Participant Service to manage participants (for example, agents, customers, and managers listening in), and to send messages and events within a chat contact. The APIs in the service enable the following: sending chat messages, attachment sharing, managing a participant's connection state and message events, and retrieving chat transcripts.

" } diff --git a/botocore/data/datasync/2018-11-09/service-2.json b/botocore/data/datasync/2018-11-09/service-2.json index 8549334faa..547f692751 100644 --- a/botocore/data/datasync/2018-11-09/service-2.json +++ b/botocore/data/datasync/2018-11-09/service-2.json @@ -169,7 +169,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses NFS file servers.

If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.

" + "documentation":"

Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses NFS file servers.

" }, "CreateLocationObjectStorage":{ "name":"CreateLocationObjectStorage", @@ -768,7 +768,77 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Modifies some configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

" + "documentation":"

Modifies the following configurations of the Microsoft Azure Blob Storage transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with Azure Blob Storage.

" + }, + "UpdateLocationEfs":{ + "name":"UpdateLocationEfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationEfsRequest"}, + "output":{"shape":"UpdateLocationEfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon EFS transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with Amazon EFS.

" + }, + "UpdateLocationFsxLustre":{ + "name":"UpdateLocationFsxLustre", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationFsxLustreRequest"}, + "output":{"shape":"UpdateLocationFsxLustreResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon FSx for Lustre transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with FSx for Lustre.

" + }, + "UpdateLocationFsxOntap":{ + "name":"UpdateLocationFsxOntap", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationFsxOntapRequest"}, + "output":{"shape":"UpdateLocationFsxOntapResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon FSx for NetApp ONTAP transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with FSx for ONTAP.

" + }, + "UpdateLocationFsxOpenZfs":{ + "name":"UpdateLocationFsxOpenZfs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationFsxOpenZfsRequest"}, + "output":{"shape":"UpdateLocationFsxOpenZfsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon FSx for OpenZFS transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with FSx for OpenZFS.

Request parameters related to SMB aren't supported with the UpdateLocationFsxOpenZfs operation.

" + }, + "UpdateLocationFsxWindows":{ + "name":"UpdateLocationFsxWindows", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationFsxWindowsRequest"}, + "output":{"shape":"UpdateLocationFsxWindowsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon FSx for Windows File Server transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with FSx for Windows File Server.

" }, "UpdateLocationHdfs":{ "name":"UpdateLocationHdfs", @@ -782,7 +852,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates some parameters of a previously created location for a Hadoop Distributed File System cluster.

" + "documentation":"

Modifies the following configuration parameters of the Hadoop Distributed File System (HDFS) transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with an HDFS cluster.

" }, "UpdateLocationNfs":{ "name":"UpdateLocationNfs", @@ -796,7 +866,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Modifies some configurations of the Network File System (NFS) transfer location that you're using with DataSync.

For more information, see Configuring transfers to or from an NFS file server.

" + "documentation":"

Modifies the following configuration parameters of the Network File System (NFS) transfer location that you're using with DataSync.

For more information, see Configuring transfers with an NFS file server.

" }, "UpdateLocationObjectStorage":{ "name":"UpdateLocationObjectStorage", @@ -810,7 +880,21 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates some parameters of an existing DataSync location for an object storage system.

" + "documentation":"

Modifies the following configuration parameters of the object storage transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with an object storage system.

" + }, + "UpdateLocationS3":{ + "name":"UpdateLocationS3", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateLocationS3Request"}, + "output":{"shape":"UpdateLocationS3Response"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalException"} + ], + "documentation":"

Modifies the following configuration parameters of the Amazon S3 transfer location that you're using with DataSync.

Before you begin, make sure that you read the following topics:

" }, "UpdateLocationSmb":{ "name":"UpdateLocationSmb", @@ -824,7 +908,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates some of the parameters of a Server Message Block (SMB) file server location that you can use for DataSync transfers.

" + "documentation":"

Modifies the following configuration parameters of the Server Message Block (SMB) transfer location that you're using with DataSync.

For more information, see Configuring DataSync transfers with an SMB file server.

" }, "UpdateStorageSystem":{ "name":"UpdateStorageSystem", @@ -1180,7 +1264,7 @@ "members":{ "Subdirectory":{ "shape":"EfsSubdirectory", - "documentation":"

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.

By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder).

" + "documentation":"

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder).

" }, "EfsFilesystemArn":{ "shape":"EfsFilesystemArn", @@ -1228,19 +1312,19 @@ "members":{ "FsxFilesystemArn":{ "shape":"FsxFilesystemArn", - "documentation":"

The Amazon Resource Name (ARN) for the FSx for Lustre file system.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre file system.

" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are used to configure the FSx for Lustre file system.

" + "documentation":"

Specifies the Amazon Resource Names (ARNs) of up to five security groups that provide access to your FSx for Lustre file system.

The security groups must be able to access the file system's ports. The file system must also allow access from the security groups. For information about file system access, see the Amazon FSx for Lustre User Guide .

" }, "Subdirectory":{ "shape":"FsxLustreSubdirectory", - "documentation":"

A subdirectory in the location's path. This subdirectory in the FSx for Lustre file system is used to read data from the FSx for Lustre source location or write data to the FSx for Lustre destination.

" + "documentation":"

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" }, "Tags":{ "shape":"InputTagList", - "documentation":"

The key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location.

" + "documentation":"

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your location.

" } } }, @@ -1249,7 +1333,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that's created.

" + "documentation":"

The Amazon Resource Name (ARN) of the FSx for Lustre file system location that you created.

" } } }, @@ -1272,7 +1356,7 @@ }, "Subdirectory":{ "shape":"FsxOntapSubdirectory", - "documentation":"

Specifies a path to the file share in the SVM where you'll copy your data.

You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1.

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

" + "documentation":"

Specifies a path to the file share in the SVM where you want to transfer data to or from.

You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1.

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

" }, "Tags":{ "shape":"InputTagList", @@ -1359,7 +1443,7 @@ }, "Domain":{ "shape":"SmbDomain", - "documentation":"

Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" + "documentation":"

Specifies the name of the Windows domain that the FSx for Windows File Server file system belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" }, "Password":{ "shape":"SmbPassword", @@ -2950,7 +3034,7 @@ "members":{ "MountOptions":{"shape":"NfsMountOptions"} }, - "documentation":"

Specifies the Network File System (NFS) protocol configuration that DataSync uses to access your Amazon FSx for OpenZFS or Amazon FSx for NetApp ONTAP file system.

" + "documentation":"

Specifies the Network File System (NFS) protocol configuration that DataSync uses to access your FSx for OpenZFS file system or FSx for ONTAP file system's storage virtual machine (SVM).

" }, "FsxProtocolSmb":{ "type":"structure", @@ -2961,7 +3045,37 @@ "members":{ "Domain":{ "shape":"SmbDomain", - "documentation":"

Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.

If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.

" + "documentation":"

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" + }, + "MountOptions":{"shape":"SmbMountOptions"}, + "Password":{ + "shape":"SmbPassword", + "documentation":"

Specifies the password of a user who has permission to access your SVM.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

Specifies a user that can mount and access the files, folders, and metadata in your SVM.

For information about choosing a user with the right level of access for your transfer, see Using the SMB protocol.

" + } + }, + "documentation":"

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see Providing DataSync access to FSx for ONTAP file systems.

" + }, + "FsxUpdateProtocol":{ + "type":"structure", + "members":{ + "NFS":{"shape":"FsxProtocolNfs"}, + "SMB":{ + "shape":"FsxUpdateProtocolSmb", + "documentation":"

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your FSx for ONTAP file system's storage virtual machine (SVM).

" + } + }, + "documentation":"

Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.

You can't update the Network File System (NFS) protocol configuration for FSx for ONTAP locations. DataSync currently only supports NFS version 3 with this location type.

" + }, + "FsxUpdateProtocolSmb":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"FsxUpdateSmbDomain", + "documentation":"

Specifies the name of the Windows domain that your storage virtual machine (SVM) belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right SVM.

" }, "MountOptions":{"shape":"SmbMountOptions"}, "Password":{ @@ -2973,7 +3087,12 @@ "documentation":"

Specifies a user that can mount and access the files, folders, and metadata in your SVM.

For information about choosing a user with the right level of access for your transfer, see Using the SMB protocol.

" } }, - "documentation":"

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system. For more information, see Accessing FSx for ONTAP file systems.

" + "documentation":"

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system's storage virtual machine (SVM). For more information, see Providing DataSync access to FSx for ONTAP file systems.

" + }, + "FsxUpdateSmbDomain":{ + "type":"string", + "max":253, + "pattern":"^([A-Za-z0-9]((\\.|-+)?[A-Za-z0-9]){0,252})?$" }, "FsxWindowsSubdirectory":{ "type":"string", @@ -4317,7 +4436,7 @@ "documentation":"

Specifies the ARN of the IAM role that DataSync uses to access your S3 bucket.

" } }, - "documentation":"

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

For more information, see Accessing S3 buckets.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

For more information, see Providing DataSync access to S3 buckets.

" }, "S3ManifestConfig":{ "type":"structure", @@ -5068,6 +5187,130 @@ "members":{ } }, + "UpdateLocationEfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Amazon EFS transfer location that you're updating.

" + }, + "Subdirectory":{ + "shape":"EfsSubdirectory", + "documentation":"

Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data on your file system (depending on if this is a source or destination location).

By default, DataSync uses the root directory (or access point if you provide one by using AccessPointArn). You can also include subdirectories using forward slashes (for example, /path/to/folder).

" + }, + "AccessPointArn":{ + "shape":"UpdatedEfsAccessPointArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.

For more information, see Accessing restricted Amazon EFS file systems.

" + }, + "FileSystemAccessRoleArn":{ + "shape":"UpdatedEfsIamRoleArn", + "documentation":"

Specifies an Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.

For information on creating this role, see Creating a DataSync IAM role for Amazon EFS file system access.

" + }, + "InTransitEncryption":{ + "shape":"EfsInTransitEncryption", + "documentation":"

Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.

If you specify an access point using AccessPointArn or an IAM role using FileSystemAccessRoleArn, you must set this parameter to TLS1_2.

" + } + } + }, + "UpdateLocationEfsResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationFsxLustreRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the FSx for Lustre transfer location that you're updating.

" + }, + "Subdirectory":{ + "shape":"SmbSubdirectory", + "documentation":"

Specifies a mount path for your FSx for Lustre file system. The path can include subdirectories.

When the location is used as a source, DataSync reads data from the mount path. When the location is used as a destination, DataSync writes data to the mount path. If you don't include this parameter, DataSync uses the file system's root directory (/).

" + } + } + }, + "UpdateLocationFsxLustreResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationFsxOntapRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the FSx for ONTAP transfer location that you're updating.

" + }, + "Protocol":{ + "shape":"FsxUpdateProtocol", + "documentation":"

Specifies the data transfer protocol that DataSync uses to access your Amazon FSx file system.

" + }, + "Subdirectory":{ + "shape":"FsxOntapSubdirectory", + "documentation":"

Specifies a path to the file share in the storage virtual machine (SVM) where you want to transfer data to or from.

You can specify a junction path (also known as a mount point), qtree path (for NFS file shares), or share name (for SMB file shares). For example, your mount path might be /vol1, /vol1/tree1, or /share1.

Don't specify a junction path in the SVM's root volume. For more information, see Managing FSx for ONTAP storage virtual machines in the Amazon FSx for NetApp ONTAP User Guide.

" + } + } + }, + "UpdateLocationFsxOntapResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationFsxOpenZfsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the FSx for OpenZFS transfer location that you're updating.

" + }, + "Protocol":{"shape":"FsxProtocol"}, + "Subdirectory":{ + "shape":"SmbSubdirectory", + "documentation":"

Specifies a subdirectory in the location's path that must begin with /fsx. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location).

" + } + } + }, + "UpdateLocationFsxOpenZfsResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateLocationFsxWindowsRequest":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the ARN of the FSx for Windows File Server transfer location that you're updating.

" + }, + "Subdirectory":{ + "shape":"FsxWindowsSubdirectory", + "documentation":"

Specifies a mount path for your file system using forward slashes. DataSync uses this subdirectory to read or write data (depending on whether the file system is a source or destination location).

" + }, + "Domain":{ + "shape":"FsxUpdateSmbDomain", + "documentation":"

Specifies the name of the Windows domain that your FSx for Windows File Server file system belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" + }, + "User":{ + "shape":"SmbUser", + "documentation":"

Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system.

For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations.

" + }, + "Password":{ + "shape":"SmbPassword", + "documentation":"

Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system.

" + } + } + }, + "UpdateLocationFsxWindowsResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationHdfsRequest":{ "type":"structure", "required":["LocationArn"], @@ -5195,6 +5438,30 @@ "members":{ } }, + "UpdateLocationS3Request":{ + "type":"structure", + "required":["LocationArn"], + "members":{ + "LocationArn":{ + "shape":"LocationArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Amazon S3 transfer location that you're updating.

" + }, + "Subdirectory":{ + "shape":"S3Subdirectory", + "documentation":"

Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location).

DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example:

  • /photos

  • photos//2006/January

  • photos/./2006/February

  • photos/../2006/March

" + }, + "S3StorageClass":{ + "shape":"S3StorageClass", + "documentation":"

Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination.

For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS.

For more information, see Storage class considerations with Amazon S3 transfers.

" + }, + "S3Config":{"shape":"S3Config"} + } + }, + "UpdateLocationS3Response":{ + "type":"structure", + "members":{ + } + }, "UpdateLocationSmbRequest":{ "type":"structure", "required":["LocationArn"], @@ -5330,6 +5597,16 @@ "members":{ } }, + "UpdatedEfsAccessPointArn":{ + "type":"string", + "max":128, + "pattern":"(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):elasticfilesystem:[a-z\\-0-9]+:[0-9]{12}:access-point/fsap-[0-9a-f]{8,40}$)|(^$)" + }, + "UpdatedEfsIamRoleArn":{ + "type":"string", + "max":2048, + "pattern":"(^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):iam::[0-9]{12}:role/.*$)|(^$)" + }, "VerifyMode":{ "type":"string", "enum":[ diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index b352490225..4162a60d22 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -2527,6 +2527,25 @@ ], "documentation":"

Returns the count, average, sum, minimum, maximum, sum of squares, variance, and standard deviation for the specified aggregated field. If the aggregation field is of type String, only the count statistic is returned.

Requires permission to access the GetStatistics action.

" }, + "GetThingConnectivityData":{ + "name":"GetThingConnectivityData", + "http":{ + "method":"POST", + "requestUri":"/things/{thingName}/connectivity-data" + }, + "input":{"shape":"GetThingConnectivityDataRequest"}, + "output":{"shape":"GetThingConnectivityDataResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"IndexNotReadyException"} + ], + "documentation":"

Retrieves the live connectivity status per device.

" + }, "GetTopicRule":{ "name":"GetTopicRule", "http":{ @@ -2799,7 +2818,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

List all command executions.

You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to find command executions that started within a specific timeframe.

" + "documentation":"

List all command executions.

  • You must provide only the startedTimeFilter or the completedTimeFilter information. If you provide both time filters, the API will generate an error. You can use this information to retrieve a list of command executions within a specific timeframe.

  • You must provide only the commandArn or the thingArn information depending on whether you want to list executions for a specific command or an IoT thing. If you provide both fields, the API will generate an error.

For more information about considerations for using this API, see List command executions in your account (CLI).

" }, "ListCommands":{ "name":"ListCommands", @@ -7242,6 +7261,13 @@ "max":128, "pattern":"[a-zA-Z0-9:.]+" }, + "ConnectivityApiThingName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9:_-]+", + "sensitive":true + }, "ConnectivityTimestamp":{"type":"long"}, "ConsecutiveDatapointsToAlarm":{ "type":"integer", @@ -7504,7 +7530,7 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The IAM role that allows access to create the command.

" + "documentation":"

The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace.

" }, "tags":{ "shape":"TagList", @@ -11429,6 +11455,25 @@ } }, "DisconnectReason":{"type":"string"}, + "DisconnectReasonValue":{ + "type":"string", + "enum":[ + "AUTH_ERROR", + "CLIENT_INITIATED_DISCONNECT", + "CLIENT_ERROR", + "CONNECTION_LOST", + "DUPLICATE_CLIENTID", + "FORBIDDEN_ACCESS", + "MQTT_KEEP_ALIVE_TIMEOUT", + "SERVER_ERROR", + "SERVER_INITIATED_DISCONNECT", + "THROTTLED", + "WEBSOCKET_TTL_EXPIRATION", + "CUSTOMAUTH_TTL_EXPIRATION", + "UNKNOWN", + "NONE" + ] + }, "DisplayName":{ "type":"string", "max":64, @@ -12192,7 +12237,7 @@ }, "timeToLive":{ "shape":"DateType", - "documentation":"

The time to live (TTL) parameter for the GetCommandExecution API.

" + "documentation":"

The time to live (TTL) parameter that indicates the duration for which executions will be retained in your account. The default value is six months.

" } } }, @@ -12241,7 +12286,7 @@ }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The IAM role that allows access to retrieve information about the command.

" + "documentation":"

The IAM role that you provided when creating the command with AWS-IoT-FleetWise as the namespace.

" }, "createdAt":{ "shape":"DateType", @@ -12697,6 +12742,39 @@ } } }, + "GetThingConnectivityDataRequest":{ + "type":"structure", + "required":["thingName"], + "members":{ + "thingName":{ + "shape":"ConnectivityApiThingName", + "documentation":"

The name of your IoT thing.

", + "location":"uri", + "locationName":"thingName" + } + } + }, + "GetThingConnectivityDataResponse":{ + "type":"structure", + "members":{ + "thingName":{ + "shape":"ConnectivityApiThingName", + "documentation":"

The name of your IoT thing.

" + }, + "connected":{ + "shape":"Boolean", + "documentation":"

A Boolean that indicates the connectivity status.

" + }, + "timestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp of when the event occurred.

" + }, + "disconnectReason":{ + "shape":"DisconnectReasonValue", + "documentation":"

The reason why the client is disconnecting.

" + } + } + }, "GetTopicRuleDestinationRequest":{ "type":"structure", "required":["arn"], diff --git a/botocore/data/mwaa/2020-07-01/service-2.json b/botocore/data/mwaa/2020-07-01/service-2.json index f8919ba056..f3e7465469 100644 --- a/botocore/data/mwaa/2020-07-01/service-2.json +++ b/botocore/data/mwaa/2020-07-01/service-2.json @@ -377,7 +377,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1.

" + "documentation":"

The Apache Airflow version for your environment. If no value is specified, it defaults to the latest version. For more information, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (Amazon MWAA).

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" }, "LoggingConfiguration":{ "shape":"LoggingConfigurationInput", @@ -558,7 +558,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version on your environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1.

" + "documentation":"

The Apache Airflow version on your environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" }, "SourceBucketArn":{ "shape":"S3BucketArn", @@ -1377,7 +1377,7 @@ }, "AirflowVersion":{ "shape":"AirflowVersion", - "documentation":"

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, and 2.10.1.

" + "documentation":"

The Apache Airflow version for your environment. To upgrade your environment, specify a newer version of Apache Airflow supported by Amazon MWAA.

Before you upgrade an environment, make sure your requirements, DAGs, plugins, and other resources used in your workflows are compatible with the new Apache Airflow version. For more information about updating your resources, see Upgrading an Amazon MWAA environment.

Valid values: 1.10.12, 2.0.2, 2.2.2, 2.4.3, 2.5.1, 2.6.3, 2.7.2, 2.8.1, 2.9.2, 2.10.1, and 2.10.3.

" }, "SourceBucketArn":{ "shape":"S3BucketArn", diff --git a/botocore/data/quicksight/2018-04-01/service-2.json b/botocore/data/quicksight/2018-04-01/service-2.json index abf1c2f26f..2668fd01ea 100644 --- a/botocore/data/quicksight/2018-04-01/service-2.json +++ b/botocore/data/quicksight/2018-04-01/service-2.json @@ -9255,6 +9255,10 @@ "FolderArns":{ "shape":"FolderArnList", "documentation":"

When you create the dataset, Amazon QuickSight adds the dataset to these folders.

" + }, + "PerformanceConfiguration":{ + "shape":"PerformanceConfiguration", + "documentation":"

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" } } }, @@ -11632,6 +11636,10 @@ "DatasetParameters":{ "shape":"DatasetParameterList", "documentation":"

The parameters that are declared in a dataset.

" + }, + "PerformanceConfiguration":{ + "shape":"PerformanceConfiguration", + "documentation":"

The performance optimization configuration of a dataset.

" } }, "documentation":"

Dataset.

" @@ -26127,6 +26135,16 @@ "max":100, "min":0 }, + "PerformanceConfiguration":{ + "type":"structure", + "members":{ + "UniqueKeys":{ + "shape":"UniqueKeyList", + "documentation":"

A UniqueKey configuration.

" + } + }, + "documentation":"

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" + }, "PeriodOverPeriodComputation":{ "type":"structure", "required":["ComputationId"], @@ -34725,6 +34743,29 @@ "type":"string", "pattern":"^[^\\u0000-\\u00FF]$" }, + "UniqueKey":{ + "type":"structure", + "required":["ColumnNames"], + "members":{ + "ColumnNames":{ + "shape":"UniqueKeyColumnNameList", + "documentation":"

The name of the column that is referenced in the UniqueKey configuration.

" + } + }, + "documentation":"

A UniqueKey configuration that references a dataset column.

" + }, + "UniqueKeyColumnNameList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":1, + "min":1 + }, + "UniqueKeyList":{ + "type":"list", + "member":{"shape":"UniqueKey"}, + "max":1, + "min":1 + }, "UniqueValuesComputation":{ "type":"structure", "required":["ComputationId"], @@ -35638,6 +35679,10 @@ "DatasetParameters":{ "shape":"DatasetParameterList", "documentation":"

The parameter declarations of the dataset.

" + }, + "PerformanceConfiguration":{ + "shape":"PerformanceConfiguration", + "documentation":"

The configuration for the performance optimization of the dataset that contains a UniqueKey configuration.

" } } }, diff --git a/botocore/data/resiliencehub/2020-04-30/service-2.json b/botocore/data/resiliencehub/2020-04-30/service-2.json index 6a8aaa3029..bd7de97ab5 100644 --- a/botocore/data/resiliencehub/2020-04-30/service-2.json +++ b/botocore/data/resiliencehub/2020-04-30/service-2.json @@ -1296,6 +1296,20 @@ "max":10, "min":1 }, + "Alarm":{ + "type":"structure", + "members":{ + "alarmArn":{ + "shape":"Arn", + "documentation":"

Amazon Resource Name (ARN) of the Amazon CloudWatch alarm.

" + }, + "source":{ + "shape":"String255", + "documentation":"

Indicates the source of the Amazon CloudWatch alarm. That is, it indicates if the alarm was created using Resilience Hub recommendation (AwsResilienceHub), or if you had created the alarm in Amazon CloudWatch (Customer).

" + } + }, + "documentation":"

Indicates the Amazon CloudWatch alarm detected while running an assessment.

" + }, "AlarmRecommendation":{ "type":"structure", "required":[ @@ -1871,7 +1885,7 @@ "members":{ "appComponents":{ "shape":"AppComponentNameList", - "documentation":"

Indicates the Application Components (AppComponents) that were assessed as part of the assessnent and are associated with the identified risk and recommendation.

This property is available only in the US East (N. Virginia) Region.

" + "documentation":"

Indicates the Application Components (AppComponents) that were assessed as part of the assessment and are associated with the identified risk and recommendation.

This property is available only in the US East (N. Virginia) Region.

" }, "recommendation":{ "shape":"String255", @@ -1994,6 +2008,10 @@ "referenceId" ], "members":{ + "appComponentId":{ + "shape":"EntityName255", + "documentation":"

Indicates the identifier of an AppComponent.

" + }, "entryId":{ "shape":"String255", "documentation":"

An identifier for an entry in this batch that is used to communicate the result.

The entryIds of a batch request need to be unique within a request.

" @@ -3477,6 +3495,20 @@ "ComplexityOfImplementation" ] }, + "Experiment":{ + "type":"structure", + "members":{ + "experimentArn":{ + "shape":"String255", + "documentation":"

Amazon Resource Name (ARN) of the FIS experiment.

" + }, + "experimentTemplateId":{ + "shape":"String255", + "documentation":"

Identifier of the FIS experiment template.

" + } + }, + "documentation":"

Indicates the FIS experiment detected while running an assessment.

" + }, "FailedGroupingRecommendationEntries":{ "type":"list", "member":{"shape":"FailedGroupingRecommendationEntry"} @@ -4703,7 +4735,7 @@ }, "invokerRoleName":{ "shape":"IamRoleName", - "documentation":"

Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.

  • You must have iam:passRole permission for this role while creating or updating the application.

  • Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters.

" + "documentation":"

Existing Amazon Web Services IAM role name in the primary Amazon Web Services account that will be assumed by Resilience Hub Service Principle to obtain a read-only access to your application resources while running an assessment.

If your IAM role includes a path, you must include the path in the invokerRoleName parameter. For example, if your IAM role's ARN is arn:aws:iam:123456789012:role/my-path/role-name, you should pass my-path/role-name.

  • You must have iam:passRole permission for this role while creating or updating the application.

  • Currently, invokerRoleName accepts only [A-Za-z0-9_+=,.@-] characters.

" }, "type":{ "shape":"PermissionModelType", @@ -4923,6 +4955,10 @@ "shape":"BooleanOptional", "documentation":"

Specifies if the recommendation has already been implemented.

" }, + "discoveredAlarm":{ + "shape":"Alarm", + "documentation":"

Indicates the previously implemented Amazon CloudWatch alarm discovered by Resilience Hub.

" + }, "excludeReason":{ "shape":"ExcludeRecommendationReason", "documentation":"

Indicates the reason for excluding an operational recommendation.

" @@ -4931,6 +4967,10 @@ "shape":"BooleanOptional", "documentation":"

Indicates if an operational recommendation item is excluded.

" }, + "latestDiscoveredExperiment":{ + "shape":"Experiment", + "documentation":"

Indicates the experiment created in FIS that was discovered by Resilience Hub, which matches the recommendation.

" + }, "resourceId":{ "shape":"String500", "documentation":"

Identifier of the resource.

" @@ -5875,6 +5915,10 @@ "type":"structure", "required":["referenceId"], "members":{ + "appComponentId":{ + "shape":"EntityName255", + "documentation":"

Indicates the identifier of the AppComponent.

" + }, "appComponentName":{ "shape":"EntityId", "documentation":"

Name of the Application Component.

" @@ -6249,6 +6293,10 @@ "referenceId" ], "members":{ + "appComponentId":{ + "shape":"EntityName255", + "documentation":"

Indicates the identifier of the AppComponent.

" + }, "entryId":{ "shape":"String255", "documentation":"

An identifier for an entry in this batch that is used to communicate the result.

The entryIds of a batch request need to be unique within a request.

" diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index 85036600de..7f7778b79a 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -1244,6 +1244,10 @@ "BasicAuthSecretId":{ "shape":"As2ConnectorSecretId", "documentation":"

Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.

The default value for this parameter is null, which indicates that Basic authentication is not enabled for the connector.

If the connector should use Basic authentication, the secret needs to be in the following format:

{ \"Username\": \"user-name\", \"Password\": \"user-password\" }

Replace user-name and user-password with the credentials for the actual user that is being authenticated.

Note the following:

  • You are storing these credentials in Secrets Manager, not passing them directly into this API.

  • If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.

If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:

update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'

" + }, + "PreserveContentType":{ + "shape":"PreserveContentType", + "documentation":"

Allows you to use the Amazon S3 Content-Type that is associated with objects in S3 instead of having the content type mapped based on the file extension. This parameter is enabled by default when you create an AS2 connector from the console, but disabled by default when you create an AS2 connector by calling the API directly.

" } }, "documentation":"

Contains the details for an AS2 connector object. The connector object is used for AS2 outbound processes, to connect the Transfer Family customer with the trading partner.

" @@ -1513,6 +1517,14 @@ "Tags":{ "shape":"Tags", "documentation":"

Key-value pairs that can be used to group and search for agreements.

" + }, + "PreserveFilename":{ + "shape":"PreserveFilenameType", + "documentation":"

Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.

  • ENABLED: the filename provided by your trading parter is preserved when the file is saved.

  • DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.

" + }, + "EnforceMessageSigning":{ + "shape":"EnforceMessageSigningType", + "documentation":"

Determines whether or not unsigned messages from your trading partners will be accepted.

  • ENABLED: Transfer Family rejects unsigned messages from your trading partner.

  • DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

" } } }, @@ -2458,6 +2470,14 @@ "Tags":{ "shape":"Tags", "documentation":"

Key-value pairs that can be used to group and search for agreements.

" + }, + "PreserveFilename":{ + "shape":"PreserveFilenameType", + "documentation":"

Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.

  • ENABLED: the filename provided by your trading parter is preserved when the file is saved.

  • DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.

" + }, + "EnforceMessageSigning":{ + "shape":"EnforceMessageSigningType", + "documentation":"

Determines whether or not unsigned messages from your trading partners will be accepted.

  • ENABLED: Transfer Family rejects unsigned messages from your trading partner.

  • DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

" } }, "documentation":"

Describes the properties of an agreement.

" @@ -2480,7 +2500,7 @@ }, "Status":{ "shape":"CertificateStatusType", - "documentation":"

The certificate can be either ACTIVE, PENDING_ROTATION, or INACTIVE. PENDING_ROTATION means that this certificate will replace the current certificate when it expires.

" + "documentation":"

Currently, the only available status is ACTIVE: all other values are reserved for future use.

" }, "Certificate":{ "shape":"CertificateBodyType", @@ -3087,6 +3107,13 @@ "VPC_ENDPOINT" ] }, + "EnforceMessageSigningType":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "ExecutionError":{ "type":"structure", "required":[ @@ -4486,6 +4513,20 @@ "min":0, "pattern":"[\\x09-\\x0D\\x20-\\x7E]*" }, + "PreserveContentType":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "PreserveFilenameType":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "PrivateKeyType":{ "type":"string", "max":16384, @@ -5357,6 +5398,14 @@ "AccessRole":{ "shape":"Role", "documentation":"

Connectors are used to send files using either the AS2 or SFTP protocol. For the access role, provide the Amazon Resource Name (ARN) of the Identity and Access Management role to use.

For AS2 connectors

With AS2, you can send files by calling StartFileTransfer and specifying the file paths in the request parameter, SendFilePaths. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt, parent directory is /bucket/dir/) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer.

If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt permission for that key.

For SFTP connectors

Make sure that the access role provides read and write access to the parent directory of the file location that's used in the StartFileTransfer request. Additionally, make sure that the role provides secretsmanager:GetSecretValue permission to Secrets Manager.

" + }, + "PreserveFilename":{ + "shape":"PreserveFilenameType", + "documentation":"

Determines whether or not Transfer Family appends a unique string of characters to the end of the AS2 message payload filename when saving it.

  • ENABLED: the filename provided by your trading parter is preserved when the file is saved.

  • DISABLED (default value): when Transfer Family saves the file, the filename is adjusted, as described in File names and locations.

" + }, + "EnforceMessageSigning":{ + "shape":"EnforceMessageSigningType", + "documentation":"

Determines whether or not unsigned messages from your trading partners will be accepted.

  • ENABLED: Transfer Family rejects unsigned messages from your trading partner.

  • DISABLED (default value): Transfer Family accepts unsigned messages from your trading partner.

" } } }, diff --git a/tests/functional/endpoint-rules/budgets/endpoint-tests-1.json b/tests/functional/endpoint-rules/budgets/endpoint-tests-1.json index b97c4559b5..c4254a59cd 100644 --- a/tests/functional/endpoint-rules/budgets/endpoint-tests-1.json +++ b/tests/functional/endpoint-rules/budgets/endpoint-tests-1.json @@ -218,6 +218,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" + } + }, + "params": { + "Region": "aws-iso-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -257,7 +279,16 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-iso-east-1.c2s.ic.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-iso-east-1" + } + ] + }, + "url": "https://budgets.c2s.ic.gov" } }, "params": { @@ -266,6 +297,28 @@ "UseDualStack": false } }, + { + "documentation": "For region aws-iso-b-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" + } + }, + "params": { + "Region": "aws-iso-b-global", + "UseFIPS": false, + "UseDualStack": false + } + }, { "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", "expect": { @@ -305,7 +358,16 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://budgets.us-isob-east-1.sc2s.sgov.gov" + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "budgets", + "signingRegion": "us-isob-east-1" + } + ] + }, + "url": "https://budgets.global.sc2s.sgov.gov" } }, "params": { From 587df5a153039ff99a58484ef6c9ff04d1c1fcaf Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 18 Dec 2024 19:05:03 +0000 Subject: [PATCH 16/20] Update endpoints model --- botocore/data/endpoints.json | 231 +++++++++++++++++++++++++++++++---- 1 file changed, 205 insertions(+), 26 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 1c7d34cccf..7ea2f774a1 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -5780,38 +5780,150 @@ }, "datasync" : { "endpoints" : { - "af-south-1" : { }, - "ap-east-1" : { }, - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-northeast-3" : { }, - "ap-south-1" : { }, - "ap-south-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ap-southeast-3" : { }, - "ap-southeast-4" : { }, - "ap-southeast-5" : { }, + "af-south-1" : { + "variants" : [ { + "hostname" : "datasync.af-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-east-1" : { + "variants" : [ { + "hostname" : "datasync.ap-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-1" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-2" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-northeast-3" : { + "variants" : [ { + "hostname" : "datasync.ap-northeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-1" : { + "variants" : [ { + "hostname" : "datasync.ap-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-south-2" : { + "variants" : [ { + "hostname" : "datasync.ap-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-1" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-2" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-3" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-3.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-4" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-4.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "ap-southeast-5" : { + "variants" : [ { + "hostname" : "datasync.ap-southeast-5.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "ca-central-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-central-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-central-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-central-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "ca-west-1" : { "variants" : [ { "hostname" : "datasync-fips.ca-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.ca-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.ca-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-1" : { + "variants" : [ { + "hostname" : "datasync.eu-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-central-2" : { + "variants" : [ { + "hostname" : "datasync.eu-central-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-north-1" : { + "variants" : [ { + "hostname" : "datasync.eu-north-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-1" : { + "variants" : [ { + "hostname" : "datasync.eu-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "datasync.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-1" : { + "variants" : [ { + "hostname" : "datasync.eu-west-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-2" : { + "variants" : [ { + "hostname" : "datasync.eu-west-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "eu-west-3" : { + "variants" : [ { + "hostname" : "datasync.eu-west-3.api.aws", + "tags" : [ "dualstack" ] } ] }, - "eu-central-1" : { }, - "eu-central-2" : { }, - "eu-north-1" : { }, - "eu-south-1" : { }, - "eu-south-2" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -5854,32 +5966,76 @@ "deprecated" : true, "hostname" : "datasync-fips.us-west-2.amazonaws.com" }, - "il-central-1" : { }, - "me-central-1" : { }, - "me-south-1" : { }, - "sa-east-1" : { }, + "il-central-1" : { + "variants" : [ { + "hostname" : "datasync.il-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-central-1" : { + "variants" : [ { + "hostname" : "datasync.me-central-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "me-south-1" : { + "variants" : [ { + "hostname" : "datasync.me-south-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, + "sa-east-1" : { + "variants" : [ { + "hostname" : "datasync.sa-east-1.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "us-east-1" : { "variants" : [ { "hostname" : "datasync-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { "hostname" : "datasync-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-east-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-2" : { "variants" : [ { "hostname" : "datasync-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-west-2.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -11619,6 +11775,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "kafka-fips.ca-central-1.amazonaws.com", @@ -24309,8 +24466,18 @@ }, "datasync" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "datasync.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "datasync.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "datazone" : { @@ -26830,12 +26997,24 @@ "variants" : [ { "hostname" : "datasync-fips.us-gov-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-east-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-gov-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-gov-west-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "datasync-fips.us-gov-west-1.api.aws", + "tags" : [ "dualstack", "fips" ] + }, { + "hostname" : "datasync.us-gov-west-1.api.aws", + "tags" : [ "dualstack" ] } ] } } From ee6d5ef6c095277f6f2f4b48a00d9f596f500b44 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 18 Dec 2024 19:05:56 +0000 Subject: [PATCH 17/20] Bumping version to 1.35.84 --- .changes/1.35.84.json | 52 +++++++++++++++++++ .../api-change-amplify-47495.json | 5 -- .../api-change-budgets-45312.json | 5 -- .../api-change-connect-85567.json | 5 -- .../api-change-connectparticipant-91624.json | 5 -- .../api-change-datasync-24714.json | 5 -- .../next-release/api-change-iot-57585.json | 5 -- .../next-release/api-change-mwaa-81386.json | 5 -- .../api-change-quicksight-44739.json | 5 -- .../api-change-resiliencehub-9972.json | 5 -- .../api-change-transfer-79171.json | 5 -- CHANGELOG.rst | 15 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 14 files changed, 69 insertions(+), 52 deletions(-) create mode 100644 .changes/1.35.84.json delete mode 100644 .changes/next-release/api-change-amplify-47495.json delete mode 100644 .changes/next-release/api-change-budgets-45312.json delete mode 100644 .changes/next-release/api-change-connect-85567.json delete mode 100644 .changes/next-release/api-change-connectparticipant-91624.json delete mode 100644 .changes/next-release/api-change-datasync-24714.json delete mode 100644 .changes/next-release/api-change-iot-57585.json delete mode 100644 .changes/next-release/api-change-mwaa-81386.json delete mode 100644 .changes/next-release/api-change-quicksight-44739.json delete mode 100644 .changes/next-release/api-change-resiliencehub-9972.json delete mode 100644 .changes/next-release/api-change-transfer-79171.json diff --git a/.changes/1.35.84.json b/.changes/1.35.84.json new file mode 100644 index 0000000000..8caed44850 --- /dev/null +++ b/.changes/1.35.84.json @@ -0,0 +1,52 @@ +[ + { + "category": "``amplify``", + "description": "Added WAF Configuration to Amplify Apps", + "type": "api-change" + }, + { + "category": "``budgets``", + "description": "Releasing minor partition endpoint updates", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "This release adds support for the UpdateParticipantAuthentication API used for customer authentication within Amazon Connect chats.", + "type": "api-change" + }, + { + "category": "``connectparticipant``", + "description": "This release adds support for the GetAuthenticationUrl and CancelParticipantAuthentication APIs used for customer authentication within Amazon Connect chats. There are also minor updates to the GetAttachment API.", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "AWS DataSync introduces the ability to update attributes for in-cloud locations.", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "Release connectivity status query API which is a dedicated high throughput(TPS) API to query a specific device's most recent connectivity state and metadata.", + "type": "api-change" + }, + { + "category": "``mwaa``", + "description": "Added support for Apache Airflow version 2.10.3 to MWAA.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "Add support for PerformanceConfiguration attribute to Dataset entity. Allow PerformanceConfiguration specification in CreateDataset and UpdateDataset APIs.", + "type": "api-change" + }, + { + "category": "``resiliencehub``", + "description": "AWS Resilience Hub now automatically detects already configured CloudWatch alarms and FIS experiments as part of the assessment process and returns the discovered resources in the corresponding list API responses. It also allows you to include or exclude test recommendations for an AppComponent.", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "Added AS2 agreement configurations to control filename preservation and message signing enforcement. Added AS2 connector configuration to preserve content type from S3 objects.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-amplify-47495.json b/.changes/next-release/api-change-amplify-47495.json deleted file mode 100644 index 1c7f21adbe..0000000000 --- a/.changes/next-release/api-change-amplify-47495.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``amplify``", - "description": "Added WAF Configuration to Amplify Apps" -} diff --git a/.changes/next-release/api-change-budgets-45312.json b/.changes/next-release/api-change-budgets-45312.json deleted file mode 100644 index 08bc6647f9..0000000000 --- a/.changes/next-release/api-change-budgets-45312.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``budgets``", - "description": "Releasing minor partition endpoint updates" -} diff --git a/.changes/next-release/api-change-connect-85567.json b/.changes/next-release/api-change-connect-85567.json deleted file mode 100644 index d2190b5fda..0000000000 --- a/.changes/next-release/api-change-connect-85567.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``connect``", - "description": "This release adds support for the UpdateParticipantAuthentication API used for customer authentication within Amazon Connect chats." -} diff --git a/.changes/next-release/api-change-connectparticipant-91624.json b/.changes/next-release/api-change-connectparticipant-91624.json deleted file mode 100644 index a4a5e8e253..0000000000 --- a/.changes/next-release/api-change-connectparticipant-91624.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``connectparticipant``", - "description": "This release adds support for the GetAuthenticationUrl and CancelParticipantAuthentication APIs used for customer authentication within Amazon Connect chats. There are also minor updates to the GetAttachment API." -} diff --git a/.changes/next-release/api-change-datasync-24714.json b/.changes/next-release/api-change-datasync-24714.json deleted file mode 100644 index 9da116e9ea..0000000000 --- a/.changes/next-release/api-change-datasync-24714.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``datasync``", - "description": "AWS DataSync introduces the ability to update attributes for in-cloud locations." -} diff --git a/.changes/next-release/api-change-iot-57585.json b/.changes/next-release/api-change-iot-57585.json deleted file mode 100644 index 09f87d478c..0000000000 --- a/.changes/next-release/api-change-iot-57585.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``iot``", - "description": "Release connectivity status query API which is a dedicated high throughput(TPS) API to query a specific device's most recent connectivity state and metadata." -} diff --git a/.changes/next-release/api-change-mwaa-81386.json b/.changes/next-release/api-change-mwaa-81386.json deleted file mode 100644 index 6b6c47e331..0000000000 --- a/.changes/next-release/api-change-mwaa-81386.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mwaa``", - "description": "Added support for Apache Airflow version 2.10.3 to MWAA." -} diff --git a/.changes/next-release/api-change-quicksight-44739.json b/.changes/next-release/api-change-quicksight-44739.json deleted file mode 100644 index 1807e2639e..0000000000 --- a/.changes/next-release/api-change-quicksight-44739.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``quicksight``", - "description": "Add support for PerformanceConfiguration attribute to Dataset entity. Allow PerformanceConfiguration specification in CreateDataset and UpdateDataset APIs." -} diff --git a/.changes/next-release/api-change-resiliencehub-9972.json b/.changes/next-release/api-change-resiliencehub-9972.json deleted file mode 100644 index db8444a81d..0000000000 --- a/.changes/next-release/api-change-resiliencehub-9972.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``resiliencehub``", - "description": "AWS Resilience Hub now automatically detects already configured CloudWatch alarms and FIS experiments as part of the assessment process and returns the discovered resources in the corresponding list API responses. It also allows you to include or exclude test recommendations for an AppComponent." -} diff --git a/.changes/next-release/api-change-transfer-79171.json b/.changes/next-release/api-change-transfer-79171.json deleted file mode 100644 index 5245befcf4..0000000000 --- a/.changes/next-release/api-change-transfer-79171.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``transfer``", - "description": "Added AS2 agreement configurations to control filename preservation and message signing enforcement. Added AS2 connector configuration to preserve content type from S3 objects." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4b9560642f..4f2bb5f449 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,21 @@ CHANGELOG ========= +1.35.84 +======= + +* api-change:``amplify``: Added WAF Configuration to Amplify Apps +* api-change:``budgets``: Releasing minor partition endpoint updates +* api-change:``connect``: This release adds support for the UpdateParticipantAuthentication API used for customer authentication within Amazon Connect chats. +* api-change:``connectparticipant``: This release adds support for the GetAuthenticationUrl and CancelParticipantAuthentication APIs used for customer authentication within Amazon Connect chats. There are also minor updates to the GetAttachment API. +* api-change:``datasync``: AWS DataSync introduces the ability to update attributes for in-cloud locations. +* api-change:``iot``: Release connectivity status query API which is a dedicated high throughput(TPS) API to query a specific device's most recent connectivity state and metadata. +* api-change:``mwaa``: Added support for Apache Airflow version 2.10.3 to MWAA. +* api-change:``quicksight``: Add support for PerformanceConfiguration attribute to Dataset entity. Allow PerformanceConfiguration specification in CreateDataset and UpdateDataset APIs. +* api-change:``resiliencehub``: AWS Resilience Hub now automatically detects already configured CloudWatch alarms and FIS experiments as part of the assessment process and returns the discovered resources in the corresponding list API responses. It also allows you to include or exclude test recommendations for an AppComponent. +* api-change:``transfer``: Added AS2 agreement configurations to control filename preservation and message signing enforcement. Added AS2 connector configuration to preserve content type from S3 objects. + + 1.35.83 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index acc42c21d3..888050d741 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.83' +__version__ = '1.35.84' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 25e3bb5e4a..76cf6de905 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.83' +release = '1.35.84' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 36a1e004e18841ac9cb848ee35b9b92f3654b0ad Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 19 Dec 2024 19:02:59 +0000 Subject: [PATCH 18/20] Update to latest models --- .../api-change-appstream-57477.json | 5 + .../api-change-mediaconvert-81646.json | 5 + .../api-change-medialive-36945.json | 5 + .../api-change-qconnect-2818.json | 5 + .../next-release/api-change-ssmsap-10597.json | 5 + .../api-change-workspaces-17479.json | 5 + .../data/appstream/2016-12-01/service-2.json | 3 +- .../mediaconvert/2017-08-29/service-2.json | 55 +++++- .../data/medialive/2017-10-14/service-2.json | 182 ++++++++++++++++++ .../data/qconnect/2020-10-19/service-2.json | 10 +- .../data/ssm-sap/2018-05-10/service-2.json | 33 ++++ .../data/workspaces/2015-04-08/service-2.json | 68 +++++++ 12 files changed, 371 insertions(+), 10 deletions(-) create mode 100644 .changes/next-release/api-change-appstream-57477.json create mode 100644 .changes/next-release/api-change-mediaconvert-81646.json create mode 100644 .changes/next-release/api-change-medialive-36945.json create mode 100644 .changes/next-release/api-change-qconnect-2818.json create mode 100644 .changes/next-release/api-change-ssmsap-10597.json create mode 100644 .changes/next-release/api-change-workspaces-17479.json diff --git a/.changes/next-release/api-change-appstream-57477.json b/.changes/next-release/api-change-appstream-57477.json new file mode 100644 index 0000000000..e90732a707 --- /dev/null +++ b/.changes/next-release/api-change-appstream-57477.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``appstream``", + "description": "Added support for Rocky Linux 8 on Amazon AppStream 2.0" +} diff --git a/.changes/next-release/api-change-mediaconvert-81646.json b/.changes/next-release/api-change-mediaconvert-81646.json new file mode 100644 index 0000000000..e5eca09007 --- /dev/null +++ b/.changes/next-release/api-change-mediaconvert-81646.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mediaconvert``", + "description": "This release adds support for inserting timecode tracks into MP4 container outputs." +} diff --git a/.changes/next-release/api-change-medialive-36945.json b/.changes/next-release/api-change-medialive-36945.json new file mode 100644 index 0000000000..ebae8ba735 --- /dev/null +++ b/.changes/next-release/api-change-medialive-36945.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``medialive``", + "description": "MediaLive is releasing ListVersions api" +} diff --git a/.changes/next-release/api-change-qconnect-2818.json b/.changes/next-release/api-change-qconnect-2818.json new file mode 100644 index 0000000000..9bbf08af07 --- /dev/null +++ b/.changes/next-release/api-change-qconnect-2818.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``qconnect``", + "description": "Amazon Q in Connect enables agents to ask Q for assistance in multiple languages and Q will provide answers and recommended step-by-step guides in those languages. Qs default language is English (United States) and you can switch this by setting the locale configuration on the AI Agent." +} diff --git a/.changes/next-release/api-change-ssmsap-10597.json b/.changes/next-release/api-change-ssmsap-10597.json new file mode 100644 index 0000000000..02d732137b --- /dev/null +++ b/.changes/next-release/api-change-ssmsap-10597.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ssm-sap``", + "description": "AWS Systems Manager for SAP added support for registration and discovery of distributed ABAP applications" +} diff --git a/.changes/next-release/api-change-workspaces-17479.json b/.changes/next-release/api-change-workspaces-17479.json new file mode 100644 index 0000000000..c751e407bd --- /dev/null +++ b/.changes/next-release/api-change-workspaces-17479.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``workspaces``", + "description": "Added AWS Global Accelerator (AGA) support for WorkSpaces Personal." +} diff --git a/botocore/data/appstream/2016-12-01/service-2.json b/botocore/data/appstream/2016-12-01/service-2.json index 03b7cf15a6..04e2309b13 100644 --- a/botocore/data/appstream/2016-12-01/service-2.json +++ b/botocore/data/appstream/2016-12-01/service-2.json @@ -4533,7 +4533,8 @@ "WINDOWS_SERVER_2019", "WINDOWS_SERVER_2022", "AMAZON_LINUX2", - "RHEL8" + "RHEL8", + "ROCKY_LINUX8" ] }, "Platforms":{ diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 55a0ff8e02..958f2056cb 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1842,7 +1842,7 @@ "documentation": "Advanced audio normalization settings. Ignore these settings unless you need to comply with a loudness standard." }, "AudioSourceName": { - "shape": "__string", + "shape": "__stringMax2048", "locationName": "audioSourceName", "documentation": "Specifies which audio data to use from each input. In the simplest case, specify an \"Audio Selector\":#inputs-audio_selector by name based on its order within each input. For example if you specify \"Audio Selector 3\", then the third audio selector will be used from each input. If an input does not have an \"Audio Selector 3\", then the audio selector marked as \"default\" in that input will be used. If there is no audio selector marked as \"default\", silence will be inserted for the duration of that input. Alternatively, an \"Audio Selector Group\":#inputs-audio_selector_group name may be specified, with similar default/silence behavior. If no audio_source_name is specified, then \"Audio Selector 1\" will be chosen automatically." }, @@ -2603,6 +2603,11 @@ "locationName": "outlineSize", "documentation": "Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present." }, + "RemoveRubyReserveAttributes": { + "shape": "RemoveRubyReserveAttributes", + "locationName": "removeRubyReserveAttributes", + "documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled." + }, "ShadowColor": { "shape": "BurninSubtitleShadowColor", "locationName": "shadowColor", @@ -4645,7 +4650,7 @@ }, "DropFrameTimecode": { "type": "string", - "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled.", + "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled.", "enum": [ "DISABLED", "ENABLED" @@ -7635,7 +7640,7 @@ "documentation": "Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html." }, "FileInput": { - "shape": "__stringPatternS3Https", + "shape": "__stringMax2048PatternS3Https", "locationName": "fileInput", "documentation": "Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, \"s3://bucket/vf/cpl.xml\". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL." }, @@ -8276,7 +8281,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInput", @@ -8447,7 +8452,7 @@ "FollowSource": { "shape": "__integerMin1Max150", "locationName": "followSource", - "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." + "documentation": "Specify the input that MediaConvert references for your default output settings. MediaConvert uses this input's Resolution, Frame rate, and Pixel aspect ratio for all outputs that you don't manually specify different output settings for. Enabling this setting will disable \"Follow source\" for all other inputs. If MediaConvert cannot follow your source, for example if you specify an audio-only input, MediaConvert uses the first followable input instead. In your JSON job specification, enter an integer from 1 to 150 corresponding to the order of your inputs." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -10779,7 +10784,7 @@ "documentation": "Container specific settings." }, "Extension": { - "shape": "__string", + "shape": "__stringMax256", "locationName": "extension", "documentation": "Use Extension to specify the file extension for outputs in File output groups. If you do not specify a value, the service will use default extensions by container type as follows * MPEG-2 transport stream, m2ts * Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, webm * No Container, the service will use codec extensions (e.g. AAC, H265, H265, AC3)" }, @@ -10852,7 +10857,7 @@ "documentation": "Use Custom Group Name to specify a name for the output group. This value is displayed on the console and can make your job settings JSON more human-readable. It does not affect your outputs. Use up to twelve characters that are either letters, numbers, spaces, or underscores." }, "Name": { - "shape": "__string", + "shape": "__stringMax2048", "locationName": "name", "documentation": "Name of the output group" }, @@ -11450,6 +11455,14 @@ }, "documentation": "Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides." }, + "RemoveRubyReserveAttributes": { + "type": "string", + "documentation": "Optionally remove any tts:rubyReserve attributes present in your input, that do not have a tts:ruby attribute in the same element, from your output. Use if your vertical Japanese output captions have alignment issues. To remove ruby reserve attributes when present: Choose Enabled. To not remove any ruby reserve attributes: Keep the default value, Disabled.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "RenewalType": { "type": "string", "documentation": "Specifies whether the term of your reserved queue pricing plan is automatically extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term.", @@ -12053,6 +12066,14 @@ "SPECIFIEDSTART" ] }, + "TimecodeTrack": { + "type": "string", + "documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled.", + "enum": [ + "DISABLED", + "ENABLED" + ] + }, "TimedMetadata": { "type": "string", "documentation": "Set ID3 metadata to Passthrough to include ID3 metadata in this output. This includes ID3 metadata from the following features: ID3 timestamp period, and Custom ID3 metadata inserter. To exclude this ID3 metadata in this output: set ID3 metadata to None or leave blank.", @@ -12670,7 +12691,7 @@ "DropFrameTimecode": { "shape": "DropFrameTimecode", "locationName": "dropFrameTimecode", - "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion is enabled." + "documentation": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion or Timecode track is enabled." }, "FixedAfd": { "shape": "__integerMin0Max15", @@ -12707,6 +12728,11 @@ "locationName": "timecodeInsertion", "documentation": "Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode insertion when the input frame rate is identical to the output frame rate. To include timecodes in this output, set Timecode insertion to PIC_TIMING_SEI. To leave them out, set it to DISABLED. Default is DISABLED. When the service inserts timecodes in an output, by default, it uses any embedded timecodes from the input. If none are present, the service will set the timecode for the first output frame to zero. To change this default behavior, adjust the settings under Timecode configuration. In the console, these settings are located under Job > Job settings > Timecode configuration. Note - Timecode source under input settings does not affect the timecodes that are inserted in the output. Source under Job settings > Timecode configuration does." }, + "TimecodeTrack": { + "shape": "TimecodeTrack", + "locationName": "timecodeTrack", + "documentation": "To include a timecode track in your MP4 output: Choose Enabled. MediaConvert writes the timecode track in the Null Media Header box (NMHD), without any timecode text formatting information. You can also specify dropframe or non-dropframe timecode under the Drop Frame Timecode setting. To not include a timecode track: Keep the default value, Disabled." + }, "VideoPreprocessors": { "shape": "VideoPreprocessor", "locationName": "videoPreprocessors", @@ -14621,6 +14647,19 @@ "type": "string", "max": 1000 }, + "__stringMax2048": { + "type": "string", + "max": 2048 + }, + "__stringMax2048PatternS3Https": { + "type": "string", + "max": 2048, + "pattern": "^s3://([^\\/]+\\/+)+((([^\\/]*)))|^https?://[^\\/].*[^&]$" + }, + "__stringMax256": { + "type": "string", + "max": 256 + }, "__stringMin0": { "type": "string", "min": 0 diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index a69a05153a..6d768f3cfd 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -5162,6 +5162,56 @@ } ], "documentation": "Update the state of a node." + }, + "ListVersions": { + "name": "ListVersions", + "http": { + "method": "GET", + "requestUri": "/prod/versions", + "responseCode": 200 + }, + "input": { + "shape": "ListVersionsRequest" + }, + "output": { + "shape": "ListVersionsResponse", + "documentation": "List of encoder engine versions that are available in this AWS account." + }, + "errors": [ + { + "shape": "BadRequestException", + "documentation": "The service can't process your request because of a problem in the request. Verify that the syntax is correct." + }, + { + "shape": "InternalServerErrorException", + "documentation": "Internal Service Error" + }, + { + "shape": "ForbiddenException", + "documentation": "You don't have permissions for this action with the credentials that you sent." + }, + { + "shape": "BadGatewayException", + "documentation": "Bad Gateway Error" + }, + { + "shape": "NotFoundException", + "documentation": "The service could not complete your request." + }, + { + "shape": "GatewayTimeoutException", + "documentation": "Gateway Timeout" + }, + { + "shape": "TooManyRequestsException", + "documentation": "Too many requests have been sent in too short of a time. The service limits the rate at which it will accept requests." + }, + { + "shape": "ConflictException", + "documentation": "The service could not complete your request because there is a conflict with the current state of the resource." + } + ], + "documentation": "Retrieves an array of all the encoder engine versions that are available in this AWS account." } }, "shapes": { @@ -7020,6 +7070,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for Channel" @@ -7161,6 +7216,16 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "AnywhereSettings settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "The engine version that you requested for this channel." + }, + "UsedChannelEngineVersions": { + "shape": "__listOfChannelEngineVersionResponse", + "locationName": "usedChannelEngineVersions", + "documentation": "The engine version that the running pipelines are using." } }, "documentation": "Placeholder documentation for ChannelSummary" @@ -7329,6 +7394,15 @@ "shape": "AnywhereSettings", "locationName": "anywhereSettings", "documentation": "The Elemental Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionRequest", + "locationName": "channelEngineVersion", + "documentation": "The desired engine version for this channel." + }, + "DryRun": { + "shape": "__boolean", + "locationName": "dryRun" } }, "documentation": "Placeholder documentation for CreateChannel" @@ -7410,6 +7484,15 @@ "shape": "AnywhereSettings", "locationName": "anywhereSettings", "documentation": "The Elemental Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionRequest", + "locationName": "channelEngineVersion", + "documentation": "The desired engine version for this channel." + }, + "DryRun": { + "shape": "__boolean", + "locationName": "dryRun" } }, "documentation": "A request to create a channel" @@ -8012,6 +8095,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for DeleteChannelResponse" @@ -8476,6 +8564,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for DescribeChannelResponse" @@ -16008,6 +16101,11 @@ "shape": "__string", "locationName": "pipelineId", "documentation": "Pipeline ID" + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Current engine version of the encoder for this pipeline." } }, "documentation": "Runtime details of a pipeline when a channel is running." @@ -17397,6 +17495,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for StartChannelResponse" @@ -17818,6 +17921,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for StopChannelResponse" @@ -18478,6 +18586,15 @@ "shape": "__string", "locationName": "roleArn", "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionRequest", + "locationName": "channelEngineVersion", + "documentation": "Channel engine version for this channel" + }, + "DryRun": { + "shape": "__boolean", + "locationName": "dryRun" } }, "documentation": "Placeholder documentation for UpdateChannel" @@ -18589,6 +18706,15 @@ "shape": "__string", "locationName": "roleArn", "documentation": "An optional Amazon Resource Name (ARN) of the role to assume when running the Channel. If you do not specify this on an update call but the role was previously set that role will be removed." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionRequest", + "locationName": "channelEngineVersion", + "documentation": "Channel engine version for this channel" + }, + "DryRun": { + "shape": "__boolean", + "locationName": "dryRun" } }, "documentation": "A request to update a channel.", @@ -20522,6 +20648,11 @@ "shape": "DescribeAnywhereSettings", "locationName": "anywhereSettings", "documentation": "Anywhere settings for this channel." + }, + "ChannelEngineVersion": { + "shape": "ChannelEngineVersionResponse", + "locationName": "channelEngineVersion", + "documentation": "Requested engine version for this channel." } }, "documentation": "Placeholder documentation for RestartChannelPipelinesResponse" @@ -28137,6 +28268,57 @@ "type": "string", "max": 100, "documentation": "Placeholder documentation for __stringMax100" + }, + "ChannelEngineVersionRequest": { + "type": "structure", + "members": { + "Version": { + "shape": "__string", + "locationName": "version", + "documentation": "The build identifier of the engine version to use for this channel. Specify 'DEFAULT' to reset to the default version." + } + }, + "documentation": "Placeholder documentation for ChannelEngineVersionRequest" + }, + "ChannelEngineVersionResponse": { + "type": "structure", + "members": { + "ExpirationDate": { + "shape": "__timestampIso8601", + "locationName": "expirationDate", + "documentation": "The UTC time when the version expires." + }, + "Version": { + "shape": "__string", + "locationName": "version", + "documentation": "The build identifier for this version of the channel version." + } + }, + "documentation": "Placeholder documentation for ChannelEngineVersionResponse" + }, + "ListVersionsRequest": { + "type": "structure", + "members": { + }, + "documentation": "Placeholder documentation for ListVersionsRequest" + }, + "ListVersionsResponse": { + "type": "structure", + "members": { + "Versions": { + "shape": "__listOfChannelEngineVersionResponse", + "locationName": "versions", + "documentation": "List of engine versions that are available for this AWS account." + } + }, + "documentation": "Placeholder documentation for ListVersionsResponse" + }, + "__listOfChannelEngineVersionResponse": { + "type": "list", + "member": { + "shape": "ChannelEngineVersionResponse" + }, + "documentation": "Placeholder documentation for __listOfChannelEngineVersionResponse" } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/botocore/data/qconnect/2020-10-19/service-2.json b/botocore/data/qconnect/2020-10-19/service-2.json index 6080658747..cd4e132a2e 100644 --- a/botocore/data/qconnect/2020-10-19/service-2.json +++ b/botocore/data/qconnect/2020-10-19/service-2.json @@ -2368,6 +2368,10 @@ "shape":"UuidWithQualifier", "documentation":"

The AI Prompt identifier for the Intent Labeling prompt used by the ANSWER_RECOMMENDATION AI Agent.

" }, + "locale":{ + "shape":"NonEmptyString", + "documentation":"

The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

Changing this locale to anything other than en_US will turn off recommendations triggered by contact transcripts for agent assistance, as this feature is not supported in multiple languages.

" + }, "queryReformulationAIPromptId":{ "shape":"UuidWithQualifier", "documentation":"

The AI Prompt identifier for the Query Reformulation prompt used by the ANSWER_RECOMMENDATION AI Agent.

" @@ -5852,7 +5856,7 @@ }, "type":{ "shape":"GuardrailPiiEntityType", - "documentation":"

Configure AI Guardrail type when the PII entity is detected.

The following PIIs are used to block or mask sensitive information:

  • General

    • ADDRESS

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.

    • AGE

      An individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

    • NAME

      An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.

    • EMAIL

      An email address, such as marymajor@email.com.

    • PHONE

      A phone number. This entity type also includes fax and pager numbers.

    • USERNAME

      A user name that identifies an account, such as a login name, screen name, nick name, or handle.

    • PASSWORD

      An alphanumeric string that is used as a password, such as \"* very20special#pass*\".

    • DRIVER_ID

      The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.

    • LICENSE_PLATE

      A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.

    • VEHICLE_IDENTIFICATION_NUMBER

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs.

  • Finance

    • REDIT_DEBIT_CARD_CVV

      A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.

    • CREDIT_DEBIT_CARD_EXPIRY

      The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021.

    • CREDIT_DEBIT_CARD_NUMBER

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.

    • PIN

      A four-digit personal identification number (PIN) with which you can access your bank account.

    • INTERNATIONAL_BANK_ACCOUNT_NUMBER

      An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure.

    • SWIFT_CODE

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.

  • IT

    • IP_ADDRESS

      An IPv4 address, such as 198.51.100.0.

    • MAC_ADDRESS

      A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC).

    • URL

      A web address, such as www.example.com.

    • AWS_ACCESS_KEY

      A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

    • AWS_SECRET_KEY

      A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

  • USA specific

    • US_BANK_ACCOUNT_NUMBER

      A US bank account number, which is typically 10 to 12 digits long.

    • US_BANK_ROUTING_NUMBER

      A US bank account routing number. These are typically nine digits long,

    • US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.

    • US_PASSPORT_NUMBER

      A US passport number. Passport numbers range from six to nine alphanumeric characters.

    • US_SOCIAL_SECURITY_NUMBER

      A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.

  • Canada specific

    • CA_HEALTH_NUMBER

      A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.

    • CA_SOCIAL_INSURANCE_NUMBER

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.

      The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm .

  • UK Specific

    • UK_NATIONAL_HEALTH_SERVICE_NUMBER

      A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.

    • UK_NATIONAL_INSURANCE_NUMBER

      A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.

      The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.

    • UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.

  • Custom

    • Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc..

" + "documentation":"

Configure AI Guardrail type when the PII entity is detected.

The following PIIs are used to block or mask sensitive information:

  • General

    • ADDRESS

      A physical address, such as \"100 Main Street, Anytown, USA\" or \"Suite #12, Building 123\". An address can include information such as the street, building, location, city, state, country, county, zip code, precinct, and neighborhood.

    • AGE

      An individual's age, including the quantity and unit of time. For example, in the phrase \"I am 40 years old,\" Guarrails recognizes \"40 years\" as an age.

    • NAME

      An individual's name. This entity type does not include titles, such as Dr., Mr., Mrs., or Miss. AI Guardrail doesn't apply this entity type to names that are part of organizations or addresses. For example, AI Guardrail recognizes the \"John Doe Organization\" as an organization, and it recognizes \"Jane Doe Street\" as an address.

    • EMAIL

      An email address, such as marymajor@email.com.

    • PHONE

      A phone number. This entity type also includes fax and pager numbers.

    • USERNAME

      A user name that identifies an account, such as a login name, screen name, nick name, or handle.

    • PASSWORD

      An alphanumeric string that is used as a password, such as \"* very20special#pass*\".

    • DRIVER_ID

      The number assigned to a driver's license, which is an official document permitting an individual to operate one or more motorized vehicles on a public road. A driver's license number consists of alphanumeric characters.

    • LICENSE_PLATE

      A license plate for a vehicle is issued by the state or country where the vehicle is registered. The format for passenger vehicles is typically five to eight digits, consisting of upper-case letters and numbers. The format varies depending on the location of the issuing state or country.

    • VEHICLE_IDENTIFICATION_NUMBER

      A Vehicle Identification Number (VIN) uniquely identifies a vehicle. VIN content and format are defined in the ISO 3779 specification. Each country has specific codes and formats for VINs.

  • Finance

    • CREDIT_DEBIT_CARD_CVV

      A three-digit card verification code (CVV) that is present on VISA, MasterCard, and Discover credit and debit cards. For American Express credit or debit cards, the CVV is a four-digit numeric code.

    • CREDIT_DEBIT_CARD_EXPIRY

      The expiration date for a credit or debit card. This number is usually four digits long and is often formatted as month/year or MM/YY. AI Guardrail recognizes expiration dates such as 01/21, 01/2021, and Jan 2021.

    • CREDIT_DEBIT_CARD_NUMBER

      The number for a credit or debit card. These numbers can vary from 13 to 16 digits in length. However, Amazon Comprehend also recognizes credit or debit card numbers when only the last four digits are present.

    • PIN

      A four-digit personal identification number (PIN) with which you can access your bank account.

    • INTERNATIONAL_BANK_ACCOUNT_NUMBER

      An International Bank Account Number has specific formats in each country. For more information, see www.iban.com/structure.

    • SWIFT_CODE

      A SWIFT code is a standard format of Bank Identifier Code (BIC) used to specify a particular bank or branch. Banks use these codes for money transfers such as international wire transfers.

      SWIFT codes consist of eight or 11 characters. The 11-digit codes refer to specific branches, while eight-digit codes (or 11-digit codes ending in 'XXX') refer to the head or primary office.

  • IT

    • IP_ADDRESS

      An IPv4 address, such as 198.51.100.0.

    • MAC_ADDRESS

      A media access control (MAC) address is a unique identifier assigned to a network interface controller (NIC).

    • URL

      A web address, such as www.example.com.

    • AWS_ACCESS_KEY

      A unique identifier that's associated with a secret access key; you use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

    • AWS_SECRET_KEY

      A unique identifier that's associated with an access key. You use the access key ID and secret access key to sign programmatic Amazon Web Services requests cryptographically.

  • USA specific

    • US_BANK_ACCOUNT_NUMBER

      A US bank account number, which is typically 10 to 12 digits long.

    • US_BANK_ROUTING_NUMBER

      A US bank account routing number. These are typically nine digits long,

    • US_INDIVIDUAL_TAX_IDENTIFICATION_NUMBER

      A US Individual Taxpayer Identification Number (ITIN) is a nine-digit number that starts with a \"9\" and contain a \"7\" or \"8\" as the fourth digit. An ITIN can be formatted with a space or a dash after the third and forth digits.

    • US_PASSPORT_NUMBER

      A US passport number. Passport numbers range from six to nine alphanumeric characters.

    • US_SOCIAL_SECURITY_NUMBER

      A US Social Security Number (SSN) is a nine-digit number that is issued to US citizens, permanent residents, and temporary working residents.

  • Canada specific

    • CA_HEALTH_NUMBER

      A Canadian Health Service Number is a 10-digit unique identifier, required for individuals to access healthcare benefits.

    • CA_SOCIAL_INSURANCE_NUMBER

      A Canadian Social Insurance Number (SIN) is a nine-digit unique identifier, required for individuals to access government programs and benefits.

      The SIN is formatted as three groups of three digits, such as 123-456-789. A SIN can be validated through a simple check-digit process called the Luhn algorithm .

  • UK Specific

    • UK_NATIONAL_HEALTH_SERVICE_NUMBER

      A UK National Health Service Number is a 10-17 digit number, such as 485 555 3456. The current system formats the 10-digit number with spaces after the third and sixth digits. The final digit is an error-detecting checksum.

    • UK_NATIONAL_INSURANCE_NUMBER

      A UK National Insurance Number (NINO) provides individuals with access to National Insurance (social security) benefits. It is also used for some purposes in the UK tax system.

      The number is nine digits long and starts with two letters, followed by six numbers and one letter. A NINO can be formatted with a space or a dash after the two letters and after the second, forth, and sixth digits.

    • UK_UNIQUE_TAXPAYER_REFERENCE_NUMBER

      A UK Unique Taxpayer Reference (UTR) is a 10-digit number that identifies a taxpayer or a business.

  • Custom

    • Regex filter - You can use a regular expressions to define patterns for an AI Guardrail to recognize and act upon such as serial number, booking ID etc..

" } }, "documentation":"

The PII entity to configure for the AI Guardrail.

" @@ -7207,6 +7211,10 @@ "associationConfigurations":{ "shape":"AssociationConfigurationList", "documentation":"

The association configurations for overriding behavior on this AI Agent.

" + }, + "locale":{ + "shape":"NonEmptyString", + "documentation":"

The locale to which specifies the language and region settings that determine the response language for QueryAssistant.

" } }, "documentation":"

The configuration for the MANUAL_SEARCH AI Agent type.

" diff --git a/botocore/data/ssm-sap/2018-05-10/service-2.json b/botocore/data/ssm-sap/2018-05-10/service-2.json index 817565f36f..09b9095bdc 100644 --- a/botocore/data/ssm-sap/2018-05-10/service-2.json +++ b/botocore/data/ssm-sap/2018-05-10/service-2.json @@ -680,6 +680,35 @@ "type":"list", "member":{"shape":"ComponentId"} }, + "ComponentInfo":{ + "type":"structure", + "required":[ + "ComponentType", + "Sid", + "Ec2InstanceId" + ], + "members":{ + "ComponentType":{ + "shape":"ComponentType", + "documentation":"

This string is the type of the component.

Accepted value is WD.

" + }, + "Sid":{ + "shape":"SID", + "documentation":"

This string is the SAP System ID of the component.

Accepted values are alphanumeric.

" + }, + "Ec2InstanceId":{ + "shape":"InstanceId", + "documentation":"

This is the Amazon EC2 instance on which your SAP component is running.

Accepted values are alphanumeric.

" + } + }, + "documentation":"

This is information about the component of your SAP application, such as Web Dispatcher.

" + }, + "ComponentInfoList":{ + "type":"list", + "member":{"shape":"ComponentInfo"}, + "max":5, + "min":0 + }, "ComponentStatus":{ "type":"string", "enum":[ @@ -1612,6 +1641,10 @@ "DatabaseArn":{ "shape":"SsmSapArn", "documentation":"

The Amazon Resource Name of the SAP HANA database.

" + }, + "ComponentsInfo":{ + "shape":"ComponentInfoList", + "documentation":"

This is an optional parameter for component details to which the SAP ABAP application is attached, such as Web Dispatcher.

This is an array of ApplicationComponent objects. You may input 0 to 5 items.

" } } }, diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 02d2584531..ebc4fada5d 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -1421,6 +1421,36 @@ } }, "shapes":{ + "AGAModeForDirectoryEnum":{ + "type":"string", + "enum":[ + "ENABLED_AUTO", + "DISABLED" + ] + }, + "AGAModeForWorkSpaceEnum":{ + "type":"string", + "enum":[ + "ENABLED_AUTO", + "DISABLED", + "INHERITED" + ] + }, + "AGAPreferredProtocolForDirectory":{ + "type":"string", + "enum":[ + "TCP", + "NONE" + ] + }, + "AGAPreferredProtocolForWorkSpace":{ + "type":"string", + "enum":[ + "TCP", + "NONE", + "INHERITED" + ] + }, "ARN":{ "type":"string", "pattern":"^arn:aws[a-z-]{0,7}:[A-Za-z0-9][A-za-z0-9_/.-]{0,62}:[A-za-z0-9_/.-]{0,63}:[A-za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.\\\\-]{0,1023}$" @@ -4050,6 +4080,36 @@ } } }, + "GlobalAcceleratorForDirectory":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Mode":{ + "shape":"AGAModeForDirectoryEnum", + "documentation":"

Indicates if Global Accelerator for directory is enabled or disabled.

" + }, + "PreferredProtocol":{ + "shape":"AGAPreferredProtocolForDirectory", + "documentation":"

Indicates the preferred protocol for Global Accelerator.

" + } + }, + "documentation":"

Describes the Global Accelerator for directory

" + }, + "GlobalAcceleratorForWorkSpace":{ + "type":"structure", + "required":["Mode"], + "members":{ + "Mode":{ + "shape":"AGAModeForWorkSpaceEnum", + "documentation":"

Indicates if Global Accelerator for WorkSpaces is enabled, disabled, or the same mode as the associated directory.

" + }, + "PreferredProtocol":{ + "shape":"AGAPreferredProtocolForWorkSpace", + "documentation":"

Indicates the preferred protocol for Global Accelerator.

" + } + }, + "documentation":"

Describes the Global Accelerator for WorkSpaces.

" + }, "IDCConfig":{ "type":"structure", "members":{ @@ -5602,6 +5662,10 @@ "StorageConnectors":{ "shape":"StorageConnectors", "documentation":"

Indicates the storage connector used

" + }, + "GlobalAccelerator":{ + "shape":"GlobalAcceleratorForDirectory", + "documentation":"

Indicates the Global Accelerator properties.

" } }, "documentation":"

Describes the streaming properties.

" @@ -6731,6 +6795,10 @@ "OperatingSystemName":{ "shape":"OperatingSystemName", "documentation":"

The name of the operating system.

" + }, + "GlobalAccelerator":{ + "shape":"GlobalAcceleratorForWorkSpace", + "documentation":"

Indicates the Global Accelerator properties.

" } }, "documentation":"

Describes a WorkSpace.

" From 17c839bffe9cdd2263d1960e9123496f6fbb2389 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 19 Dec 2024 19:03:00 +0000 Subject: [PATCH 19/20] Update endpoints model --- botocore/data/endpoints.json | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 7ea2f774a1..9874c1a23b 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -24512,8 +24512,18 @@ }, "dlm" : { "endpoints" : { - "cn-north-1" : { }, - "cn-northwest-1" : { } + "cn-north-1" : { + "variants" : [ { + "hostname" : "dlm.cn-north-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + }, + "cn-northwest-1" : { + "variants" : [ { + "hostname" : "dlm.cn-northwest-1.api.amazonwebservices.com.cn", + "tags" : [ "dualstack" ] + } ] + } } }, "dms" : { @@ -31568,6 +31578,18 @@ "us-iso-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-global" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "hostname" : "organizations.us-iso-east-1.c2s.ic.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-global" + }, "outposts" : { "endpoints" : { "us-iso-east-1" : { } From 74239a60b5739d595860635a4efcd42e2f1d125c Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 19 Dec 2024 19:03:53 +0000 Subject: [PATCH 20/20] Bumping version to 1.35.85 --- .changes/1.35.85.json | 32 +++++++++++++++++++ .../api-change-appstream-57477.json | 5 --- .../api-change-mediaconvert-81646.json | 5 --- .../api-change-medialive-36945.json | 5 --- .../api-change-qconnect-2818.json | 5 --- .../next-release/api-change-ssmsap-10597.json | 5 --- .../api-change-workspaces-17479.json | 5 --- CHANGELOG.rst | 11 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 10 files changed, 45 insertions(+), 32 deletions(-) create mode 100644 .changes/1.35.85.json delete mode 100644 .changes/next-release/api-change-appstream-57477.json delete mode 100644 .changes/next-release/api-change-mediaconvert-81646.json delete mode 100644 .changes/next-release/api-change-medialive-36945.json delete mode 100644 .changes/next-release/api-change-qconnect-2818.json delete mode 100644 .changes/next-release/api-change-ssmsap-10597.json delete mode 100644 .changes/next-release/api-change-workspaces-17479.json diff --git a/.changes/1.35.85.json b/.changes/1.35.85.json new file mode 100644 index 0000000000..2021a6c778 --- /dev/null +++ b/.changes/1.35.85.json @@ -0,0 +1,32 @@ +[ + { + "category": "``appstream``", + "description": "Added support for Rocky Linux 8 on Amazon AppStream 2.0", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release adds support for inserting timecode tracks into MP4 container outputs.", + "type": "api-change" + }, + { + "category": "``medialive``", + "description": "MediaLive is releasing ListVersions api", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "Amazon Q in Connect enables agents to ask Q for assistance in multiple languages and Q will provide answers and recommended step-by-step guides in those languages. Qs default language is English (United States) and you can switch this by setting the locale configuration on the AI Agent.", + "type": "api-change" + }, + { + "category": "``ssm-sap``", + "description": "AWS Systems Manager for SAP added support for registration and discovery of distributed ABAP applications", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added AWS Global Accelerator (AGA) support for WorkSpaces Personal.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-appstream-57477.json b/.changes/next-release/api-change-appstream-57477.json deleted file mode 100644 index e90732a707..0000000000 --- a/.changes/next-release/api-change-appstream-57477.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``appstream``", - "description": "Added support for Rocky Linux 8 on Amazon AppStream 2.0" -} diff --git a/.changes/next-release/api-change-mediaconvert-81646.json b/.changes/next-release/api-change-mediaconvert-81646.json deleted file mode 100644 index e5eca09007..0000000000 --- a/.changes/next-release/api-change-mediaconvert-81646.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mediaconvert``", - "description": "This release adds support for inserting timecode tracks into MP4 container outputs." -} diff --git a/.changes/next-release/api-change-medialive-36945.json b/.changes/next-release/api-change-medialive-36945.json deleted file mode 100644 index ebae8ba735..0000000000 --- a/.changes/next-release/api-change-medialive-36945.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``medialive``", - "description": "MediaLive is releasing ListVersions api" -} diff --git a/.changes/next-release/api-change-qconnect-2818.json b/.changes/next-release/api-change-qconnect-2818.json deleted file mode 100644 index 9bbf08af07..0000000000 --- a/.changes/next-release/api-change-qconnect-2818.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``qconnect``", - "description": "Amazon Q in Connect enables agents to ask Q for assistance in multiple languages and Q will provide answers and recommended step-by-step guides in those languages. Qs default language is English (United States) and you can switch this by setting the locale configuration on the AI Agent." -} diff --git a/.changes/next-release/api-change-ssmsap-10597.json b/.changes/next-release/api-change-ssmsap-10597.json deleted file mode 100644 index 02d732137b..0000000000 --- a/.changes/next-release/api-change-ssmsap-10597.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ssm-sap``", - "description": "AWS Systems Manager for SAP added support for registration and discovery of distributed ABAP applications" -} diff --git a/.changes/next-release/api-change-workspaces-17479.json b/.changes/next-release/api-change-workspaces-17479.json deleted file mode 100644 index c751e407bd..0000000000 --- a/.changes/next-release/api-change-workspaces-17479.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``workspaces``", - "description": "Added AWS Global Accelerator (AGA) support for WorkSpaces Personal." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4f2bb5f449..18d8191171 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,17 @@ CHANGELOG ========= +1.35.85 +======= + +* api-change:``appstream``: Added support for Rocky Linux 8 on Amazon AppStream 2.0 +* api-change:``mediaconvert``: This release adds support for inserting timecode tracks into MP4 container outputs. +* api-change:``medialive``: MediaLive is releasing ListVersions api +* api-change:``qconnect``: Amazon Q in Connect enables agents to ask Q for assistance in multiple languages and Q will provide answers and recommended step-by-step guides in those languages. Qs default language is English (United States) and you can switch this by setting the locale configuration on the AI Agent. +* api-change:``ssm-sap``: AWS Systems Manager for SAP added support for registration and discovery of distributed ABAP applications +* api-change:``workspaces``: Added AWS Global Accelerator (AGA) support for WorkSpaces Personal. + + 1.35.84 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 888050d741..44b3f4e569 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.84' +__version__ = '1.35.85' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 76cf6de905..9995798b84 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.84' +release = '1.35.85' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.