From 3baad69c6952364ceae70b38c9db7dcba4a7ebe5 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Fri, 26 Apr 2024 18:11:39 +0000 Subject: [PATCH] Regenerated Clients --- .../04e930bb6f1f4256a45408f269af7532.json | 8 + .../41575353444b40ffbf474f4155544f00.json | 8 + .../4bdcb0d3ce294bc8ae68890e260f17e2.json | 8 + .../9a28e16b3f7d40b7af5c668fd36e83f6.json | 8 + .../b69dd88c51b047f38585a81c676cd4f4.json | 8 + .../c6d725bae61b4db992f1c4c099499292.json | 8 + .../de0e316af1ea4e84a5b9969269479604.json | 8 + .../e2aeb56c5858457eb7da82b2c3951e57.json | 8 + .../api_op_ListPipelineExecutions.go | 3 + service/codepipeline/api_op_RollbackStage.go | 148 ++++++ service/codepipeline/deserializers.go | 446 ++++++++++++++++++ service/codepipeline/generated.json | 1 + service/codepipeline/serializers.go | 129 +++++ service/codepipeline/snapshot_test.go | 24 + service/codepipeline/types/enums.go | 38 ++ service/codepipeline/types/errors.go | 57 +++ service/codepipeline/types/types.go | 65 +++ service/codepipeline/validators.go | 45 ++ .../api_op_AdminGetUser.go | 5 +- .../api_op_CreateResourceServer.go | 9 +- .../api_op_CreateUserPoolClient.go | 7 +- .../api_op_DescribeResourceServer.go | 7 +- .../api_op_UpdateResourceServer.go | 7 +- .../cognitoidentityprovider/deserializers.go | 3 + .../cognitoidentityprovider/types/types.go | 97 ++-- service/connectcampaigns/deserializers.go | 9 + service/connectcampaigns/serializers.go | 5 + service/connectcampaigns/types/types.go | 3 + .../endpoints.go | 13 + .../endpoints_test.go | 79 +++- .../internal/endpoints/endpoints.go | 2 +- service/oam/api_op_CreateLink.go | 22 +- service/oam/api_op_CreateSink.go | 4 +- service/oam/api_op_GetLink.go | 5 + service/oam/api_op_UpdateLink.go | 14 +- service/oam/deserializers.go | 136 ++++++ service/oam/serializers.go | 59 +++ service/oam/types/types.go | 83 ++++ service/oam/validators.go | 63 +++ .../rds/api_op_CreateCustomDBEngineVersion.go | 3 + .../rds/api_op_DeleteCustomDBEngineVersion.go | 3 + .../rds/api_op_ModifyCustomDBEngineVersion.go | 3 + service/rds/deserializers.go | 80 ++++ service/rds/types/types.go | 6 + service/support/endpoints.go | 36 +- service/support/endpoints_test.go | 115 ++++- 46 files changed, 1796 insertions(+), 102 deletions(-) create mode 100644 .changelog/04e930bb6f1f4256a45408f269af7532.json create mode 100644 .changelog/41575353444b40ffbf474f4155544f00.json create mode 100644 .changelog/4bdcb0d3ce294bc8ae68890e260f17e2.json create mode 100644 .changelog/9a28e16b3f7d40b7af5c668fd36e83f6.json create mode 100644 .changelog/b69dd88c51b047f38585a81c676cd4f4.json create mode 100644 .changelog/c6d725bae61b4db992f1c4c099499292.json create mode 100644 .changelog/de0e316af1ea4e84a5b9969269479604.json create mode 100644 .changelog/e2aeb56c5858457eb7da82b2c3951e57.json create mode 100644 service/codepipeline/api_op_RollbackStage.go diff --git a/.changelog/04e930bb6f1f4256a45408f269af7532.json b/.changelog/04e930bb6f1f4256a45408f269af7532.json new file mode 100644 index 00000000000..fde464db392 --- /dev/null +++ b/.changelog/04e930bb6f1f4256a45408f269af7532.json @@ -0,0 +1,8 @@ +{ + "id": "04e930bb-6f1f-4256-a454-08f269af7532", + "type": "feature", + "description": "SupportsLimitlessDatabase field added to describe-db-engine-versions to indicate whether the DB engine version supports Aurora Limitless Database.", + "modules": [ + "service/rds" + ] +} \ No newline at end of file diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..1cc0d8d6b2d --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,8 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "internal/protocoltest/smithyrpcv2cbor" + ] +} \ No newline at end of file diff --git a/.changelog/4bdcb0d3ce294bc8ae68890e260f17e2.json b/.changelog/4bdcb0d3ce294bc8ae68890e260f17e2.json new file mode 100644 index 00000000000..f89f6041ede --- /dev/null +++ b/.changelog/4bdcb0d3ce294bc8ae68890e260f17e2.json @@ -0,0 +1,8 @@ +{ + "id": "4bdcb0d3-ce29-4bc8-ae68-890e260f17e2", + "type": "feature", + "description": "Add ability to manually and automatically roll back a pipeline stage to a previously successful execution.", + "modules": [ + "service/codepipeline" + ] +} \ No newline at end of file diff --git a/.changelog/9a28e16b3f7d40b7af5c668fd36e83f6.json b/.changelog/9a28e16b3f7d40b7af5c668fd36e83f6.json new file mode 100644 index 00000000000..bf8b193d0f2 --- /dev/null +++ b/.changelog/9a28e16b3f7d40b7af5c668fd36e83f6.json @@ -0,0 +1,8 @@ +{ + "id": "9a28e16b-3f7d-40b7-af5c-668fd36e83f6", + "type": "feature", + "description": "This release introduces support for Source Accounts to define which Metrics and Logs to share with the Monitoring Account", + "modules": [ + "service/oam" + ] +} \ No newline at end of file diff --git a/.changelog/b69dd88c51b047f38585a81c676cd4f4.json b/.changelog/b69dd88c51b047f38585a81c676cd4f4.json new file mode 100644 index 00000000000..81b10049a03 --- /dev/null +++ b/.changelog/b69dd88c51b047f38585a81c676cd4f4.json @@ -0,0 +1,8 @@ +{ + "id": "b69dd88c-51b0-47f3-8585-a81c676cd4f4", + "type": "feature", + "description": "Add LimitExceededException to SignUp errors", + "modules": [ + "service/cognitoidentityprovider" + ] +} \ No newline at end of file diff --git a/.changelog/c6d725bae61b4db992f1c4c099499292.json b/.changelog/c6d725bae61b4db992f1c4c099499292.json new file mode 100644 index 00000000000..25e65fa102d --- /dev/null +++ b/.changelog/c6d725bae61b4db992f1c4c099499292.json @@ -0,0 +1,8 @@ +{ + "id": "c6d725ba-e61b-4db9-92f1-c4c099499292", + "type": "feature", + "description": "This release adds support for specifying if Answering Machine should wait for prompt sound.", + "modules": [ + "service/connectcampaigns" + ] +} \ No newline at end of file diff --git a/.changelog/de0e316af1ea4e84a5b9969269479604.json b/.changelog/de0e316af1ea4e84a5b9969269479604.json new file mode 100644 index 00000000000..9124bc59787 --- /dev/null +++ b/.changelog/de0e316af1ea4e84a5b9969269479604.json @@ -0,0 +1,8 @@ +{ + "id": "de0e316a-f1ea-4e84-a5b9-969269479604", + "type": "feature", + "description": "Releasing minor endpoint updates.", + "modules": [ + "service/support" + ] +} \ No newline at end of file diff --git a/.changelog/e2aeb56c5858457eb7da82b2c3951e57.json b/.changelog/e2aeb56c5858457eb7da82b2c3951e57.json new file mode 100644 index 00000000000..f335dd5f4b0 --- /dev/null +++ b/.changelog/e2aeb56c5858457eb7da82b2c3951e57.json @@ -0,0 +1,8 @@ +{ + "id": "e2aeb56c-5858-457e-b7da-82b2c3951e57", + "type": "feature", + "description": "Releasing minor endpoint updates.", + "modules": [ + "service/marketplaceentitlementservice" + ] +} \ No newline at end of file diff --git a/service/codepipeline/api_op_ListPipelineExecutions.go b/service/codepipeline/api_op_ListPipelineExecutions.go index 41d10bf476c..87a451019b2 100644 --- a/service/codepipeline/api_op_ListPipelineExecutions.go +++ b/service/codepipeline/api_op_ListPipelineExecutions.go @@ -36,6 +36,9 @@ type ListPipelineExecutionsInput struct { // This member is required. PipelineName *string + // The pipeline execution to filter on. + Filter *types.PipelineExecutionFilter + // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned nextToken value. Pipeline // history is limited to the most recent 12 months, based on pipeline execution diff --git a/service/codepipeline/api_op_RollbackStage.go b/service/codepipeline/api_op_RollbackStage.go new file mode 100644 index 00000000000..b4f2cae7bf3 --- /dev/null +++ b/service/codepipeline/api_op_RollbackStage.go @@ -0,0 +1,148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package codepipeline + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Rolls back a stage execution. +func (c *Client) RollbackStage(ctx context.Context, params *RollbackStageInput, optFns ...func(*Options)) (*RollbackStageOutput, error) { + if params == nil { + params = &RollbackStageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RollbackStage", params, optFns, c.addOperationRollbackStageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RollbackStageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RollbackStageInput struct { + + // The name of the pipeline for which the stage will be rolled back. + // + // This member is required. + PipelineName *string + + // The name of the stage in the pipeline to be rolled back. + // + // This member is required. + StageName *string + + // The pipeline execution ID for the stage to be rolled back to. + // + // This member is required. + TargetPipelineExecutionId *string + + noSmithyDocumentSerde +} + +type RollbackStageOutput struct { + + // The execution ID of the pipeline execution for the stage that has been rolled + // back. + // + // This member is required. + PipelineExecutionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRollbackStageMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRollbackStage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRollbackStage{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RollbackStage"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpRollbackStageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRollbackStage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRollbackStage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RollbackStage", + } +} diff --git a/service/codepipeline/deserializers.go b/service/codepipeline/deserializers.go index 76283547c3e..436fa8d2377 100644 --- a/service/codepipeline/deserializers.go +++ b/service/codepipeline/deserializers.go @@ -3618,6 +3618,131 @@ func awsAwsjson11_deserializeOpErrorRetryStageExecution(response *smithyhttp.Res } } +type awsAwsjson11_deserializeOpRollbackStage struct { +} + +func (*awsAwsjson11_deserializeOpRollbackStage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRollbackStage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRollbackStage(response, &metadata) + } + output := &RollbackStageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentRollbackStageOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRollbackStage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson11_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("PipelineExecutionNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPipelineExecutionNotFoundException(response, errorBody) + + case strings.EqualFold("PipelineExecutionOutdatedException", errorCode): + return awsAwsjson11_deserializeErrorPipelineExecutionOutdatedException(response, errorBody) + + case strings.EqualFold("PipelineNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorPipelineNotFoundException(response, errorBody) + + case strings.EqualFold("StageNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorStageNotFoundException(response, errorBody) + + case strings.EqualFold("UnableToRollbackStageException", errorCode): + return awsAwsjson11_deserializeErrorUnableToRollbackStageException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsAwsjson11_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpStartPipelineExecution struct { } @@ -5252,6 +5377,41 @@ func awsAwsjson11_deserializeErrorPipelineExecutionNotStoppableException(respons return output } +func awsAwsjson11_deserializeErrorPipelineExecutionOutdatedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PipelineExecutionOutdatedException{} + err := awsAwsjson11_deserializeDocumentPipelineExecutionOutdatedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorPipelineNameInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -5532,6 +5692,41 @@ func awsAwsjson11_deserializeErrorTooManyTagsException(response *smithyhttp.Resp return output } +func awsAwsjson11_deserializeErrorUnableToRollbackStageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnableToRollbackStageException{} + err := awsAwsjson11_deserializeDocumentUnableToRollbackStageException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson11_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -8427,6 +8622,46 @@ func awsAwsjson11_deserializeDocumentExecutorConfiguration(v **types.ExecutorCon return nil } +func awsAwsjson11_deserializeDocumentFailureConditions(v **types.FailureConditions, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FailureConditions + if *v == nil { + sv = &types.FailureConditions{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "result": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Result to be of type string, got %T instead", value) + } + sv.Result = types.Result(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentGitBranchFilterCriteria(v **types.GitBranchFilterCriteria, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -10417,6 +10652,15 @@ func awsAwsjson11_deserializeDocumentPipelineExecution(v **types.PipelineExecuti sv.ExecutionMode = types.ExecutionMode(jtv) } + case "executionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutionType to be of type string, got %T instead", value) + } + sv.ExecutionType = types.ExecutionType(jtv) + } + case "pipelineExecutionId": if value != nil { jtv, ok := value.(string) @@ -10448,6 +10692,11 @@ func awsAwsjson11_deserializeDocumentPipelineExecution(v **types.PipelineExecuti sv.PipelineVersion = ptr.Int32(int32(i64)) } + case "rollbackMetadata": + if err := awsAwsjson11_deserializeDocumentPipelineRollbackMetadata(&sv.RollbackMetadata, value); err != nil { + return err + } + case "status": if value != nil { jtv, ok := value.(string) @@ -10565,6 +10814,46 @@ func awsAwsjson11_deserializeDocumentPipelineExecutionNotStoppableException(v ** return nil } +func awsAwsjson11_deserializeDocumentPipelineExecutionOutdatedException(v **types.PipelineExecutionOutdatedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PipelineExecutionOutdatedException + if *v == nil { + sv = &types.PipelineExecutionOutdatedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Message to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentPipelineExecutionSummary(v **types.PipelineExecutionSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -10596,6 +10885,15 @@ func awsAwsjson11_deserializeDocumentPipelineExecutionSummary(v **types.Pipeline sv.ExecutionMode = types.ExecutionMode(jtv) } + case "executionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutionType to be of type string, got %T instead", value) + } + sv.ExecutionType = types.ExecutionType(jtv) + } + case "lastUpdateTime": if value != nil { switch jtv := value.(type) { @@ -10621,6 +10919,11 @@ func awsAwsjson11_deserializeDocumentPipelineExecutionSummary(v **types.Pipeline sv.PipelineExecutionId = ptr.String(jtv) } + case "rollbackMetadata": + if err := awsAwsjson11_deserializeDocumentPipelineRollbackMetadata(&sv.RollbackMetadata, value); err != nil { + return err + } + case "sourceRevisions": if err := awsAwsjson11_deserializeDocumentSourceRevisionList(&sv.SourceRevisions, value); err != nil { return err @@ -10651,6 +10954,15 @@ func awsAwsjson11_deserializeDocumentPipelineExecutionSummary(v **types.Pipeline sv.Status = types.PipelineExecutionStatus(jtv) } + case "statusSummary": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PipelineExecutionStatusSummary to be of type string, got %T instead", value) + } + sv.StatusSummary = ptr.String(jtv) + } + case "stopTrigger": if err := awsAwsjson11_deserializeDocumentStopExecutionTrigger(&sv.StopTrigger, value); err != nil { return err @@ -10906,6 +11218,46 @@ func awsAwsjson11_deserializeDocumentPipelineNotFoundException(v **types.Pipelin return nil } +func awsAwsjson11_deserializeDocumentPipelineRollbackMetadata(v **types.PipelineRollbackMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PipelineRollbackMetadata + if *v == nil { + sv = &types.PipelineRollbackMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "rollbackTargetPipelineExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PipelineExecutionId to be of type string, got %T instead", value) + } + sv.RollbackTargetPipelineExecutionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentPipelineStageDeclarationList(v *[]types.StageDeclaration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -11873,6 +12225,11 @@ func awsAwsjson11_deserializeDocumentStageDeclaration(v **types.StageDeclaration sv.Name = ptr.String(jtv) } + case "onFailure": + if err := awsAwsjson11_deserializeDocumentFailureConditions(&sv.OnFailure, value); err != nil { + return err + } + default: _, _ = key, value @@ -11922,6 +12279,15 @@ func awsAwsjson11_deserializeDocumentStageExecution(v **types.StageExecution, va sv.Status = types.StageExecutionStatus(jtv) } + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutionType to be of type string, got %T instead", value) + } + sv.Type = types.ExecutionType(jtv) + } + default: _, _ = key, value @@ -12593,6 +12959,46 @@ func awsAwsjson11_deserializeDocumentTransitionState(v **types.TransitionState, return nil } +func awsAwsjson11_deserializeDocumentUnableToRollbackStageException(v **types.UnableToRollbackStageException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnableToRollbackStageException + if *v == nil { + sv = &types.UnableToRollbackStageException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentValidationException(v **types.ValidationException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13951,6 +14357,46 @@ func awsAwsjson11_deserializeOpDocumentRetryStageExecutionOutput(v **RetryStageE return nil } +func awsAwsjson11_deserializeOpDocumentRollbackStageOutput(v **RollbackStageOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RollbackStageOutput + if *v == nil { + sv = &RollbackStageOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "pipelineExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PipelineExecutionId to be of type string, got %T instead", value) + } + sv.PipelineExecutionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentStartPipelineExecutionOutput(v **StartPipelineExecutionOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/codepipeline/generated.json b/service/codepipeline/generated.json index cea8b619bb1..07dca6af346 100644 --- a/service/codepipeline/generated.json +++ b/service/codepipeline/generated.json @@ -41,6 +41,7 @@ "api_op_PutWebhook.go", "api_op_RegisterWebhookWithThirdParty.go", "api_op_RetryStageExecution.go", + "api_op_RollbackStage.go", "api_op_StartPipelineExecution.go", "api_op_StopPipelineExecution.go", "api_op_TagResource.go", diff --git a/service/codepipeline/serializers.go b/service/codepipeline/serializers.go index ed252026a37..eb13a2fb9e2 100644 --- a/service/codepipeline/serializers.go +++ b/service/codepipeline/serializers.go @@ -1831,6 +1831,61 @@ func (m *awsAwsjson11_serializeOpRetryStageExecution) HandleSerialize(ctx contex return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpRollbackStage struct { +} + +func (*awsAwsjson11_serializeOpRollbackStage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRollbackStage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RollbackStageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("CodePipeline_20150709.RollbackStage") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRollbackStageInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpStartPipelineExecution struct { } @@ -2798,6 +2853,18 @@ func awsAwsjson11_serializeDocumentExecutorConfiguration(v *types.ExecutorConfig return nil } +func awsAwsjson11_serializeDocumentFailureConditions(v *types.FailureConditions, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Result) > 0 { + ok := object.Key("result") + ok.String(string(v.Result)) + } + + return nil +} + func awsAwsjson11_serializeDocumentFailureDetails(v *types.FailureDetails, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3213,6 +3280,20 @@ func awsAwsjson11_serializeDocumentPipelineDeclaration(v *types.PipelineDeclarat return nil } +func awsAwsjson11_serializeDocumentPipelineExecutionFilter(v *types.PipelineExecutionFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SucceededInStage != nil { + ok := object.Key("succeededInStage") + if err := awsAwsjson11_serializeDocumentSucceededInStageFilter(v.SucceededInStage, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentPipelineStageDeclarationList(v []types.StageDeclaration, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -3440,6 +3521,25 @@ func awsAwsjson11_serializeDocumentStageDeclaration(v *types.StageDeclaration, v ok.String(*v.Name) } + if v.OnFailure != nil { + ok := object.Key("onFailure") + if err := awsAwsjson11_serializeDocumentFailureConditions(v.OnFailure, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentSucceededInStageFilter(v *types.SucceededInStageFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StageName != nil { + ok := object.Key("stageName") + ok.String(*v.StageName) + } + return nil } @@ -3953,6 +4053,13 @@ func awsAwsjson11_serializeOpDocumentListPipelineExecutionsInput(v *ListPipeline object := value.Object() defer object.Close() + if v.Filter != nil { + ok := object.Key("filter") + if err := awsAwsjson11_serializeDocumentPipelineExecutionFilter(v.Filter, ok); err != nil { + return err + } + } + if v.MaxResults != nil { ok := object.Key("maxResults") ok.Integer(*v.MaxResults) @@ -4312,6 +4419,28 @@ func awsAwsjson11_serializeOpDocumentRetryStageExecutionInput(v *RetryStageExecu return nil } +func awsAwsjson11_serializeOpDocumentRollbackStageInput(v *RollbackStageInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PipelineName != nil { + ok := object.Key("pipelineName") + ok.String(*v.PipelineName) + } + + if v.StageName != nil { + ok := object.Key("stageName") + ok.String(*v.StageName) + } + + if v.TargetPipelineExecutionId != nil { + ok := object.Key("targetPipelineExecutionId") + ok.String(*v.TargetPipelineExecutionId) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentStartPipelineExecutionInput(v *StartPipelineExecutionInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/codepipeline/snapshot_test.go b/service/codepipeline/snapshot_test.go index 57103f2823e..dc507891b5c 100644 --- a/service/codepipeline/snapshot_test.go +++ b/service/codepipeline/snapshot_test.go @@ -458,6 +458,18 @@ func TestCheckSnapshot_RetryStageExecution(t *testing.T) { } } +func TestCheckSnapshot_RollbackStage(t *testing.T) { + svc := New(Options{}) + _, err := svc.RollbackStage(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "RollbackStage") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_StartPipelineExecution(t *testing.T) { svc := New(Options{}) _, err := svc.StartPipelineExecution(context.Background(), nil, func(o *Options) { @@ -925,6 +937,18 @@ func TestUpdateSnapshot_RetryStageExecution(t *testing.T) { } } +func TestUpdateSnapshot_RollbackStage(t *testing.T) { + svc := New(Options{}) + _, err := svc.RollbackStage(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "RollbackStage") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_StartPipelineExecution(t *testing.T) { svc := New(Options{}) _, err := svc.StartPipelineExecution(context.Background(), nil, func(o *Options) { diff --git a/service/codepipeline/types/enums.go b/service/codepipeline/types/enums.go index 219e71001bf..c2f2275e3f9 100644 --- a/service/codepipeline/types/enums.go +++ b/service/codepipeline/types/enums.go @@ -193,6 +193,24 @@ func (ExecutionMode) Values() []ExecutionMode { } } +type ExecutionType string + +// Enum values for ExecutionType +const ( + ExecutionTypeStandard ExecutionType = "STANDARD" + ExecutionTypeRollback ExecutionType = "ROLLBACK" +) + +// Values returns all known values for ExecutionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExecutionType) Values() []ExecutionType { + return []ExecutionType{ + "STANDARD", + "ROLLBACK", + } +} + type ExecutorType string // Enum values for ExecutorType @@ -347,6 +365,22 @@ func (PipelineType) Values() []PipelineType { } } +type Result string + +// Enum values for Result +const ( + ResultRollback Result = "ROLLBACK" +) + +// Values returns all known values for Result. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Result) Values() []Result { + return []Result{ + "ROLLBACK", + } +} + type SourceRevisionType string // Enum values for SourceRevisionType @@ -458,6 +492,8 @@ const ( TriggerTypeCloudWatchEvent TriggerType = "CloudWatchEvent" TriggerTypePutActionRevision TriggerType = "PutActionRevision" TriggerTypeWebhookV2 TriggerType = "WebhookV2" + TriggerTypeManualRollback TriggerType = "ManualRollback" + TriggerTypeAutomatedRollback TriggerType = "AutomatedRollback" ) // Values returns all known values for TriggerType. Note that this can be expanded @@ -472,6 +508,8 @@ func (TriggerType) Values() []TriggerType { "CloudWatchEvent", "PutActionRevision", "WebhookV2", + "ManualRollback", + "AutomatedRollback", } } diff --git a/service/codepipeline/types/errors.go b/service/codepipeline/types/errors.go index 3f10969ca1b..ba90eb2bdf5 100644 --- a/service/codepipeline/types/errors.go +++ b/service/codepipeline/types/errors.go @@ -735,6 +735,35 @@ func (e *PipelineExecutionNotStoppableException) ErrorFault() smithy.ErrorFault return smithy.FaultClient } +// The specified pipeline execution is outdated and cannot be used as a target +// pipeline execution for rollback. +type PipelineExecutionOutdatedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PipelineExecutionOutdatedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PipelineExecutionOutdatedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PipelineExecutionOutdatedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PipelineExecutionOutdatedException" + } + return *e.ErrorCodeOverride +} +func (e *PipelineExecutionOutdatedException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + // The specified pipeline name is already in use. type PipelineNameInUseException struct { Message *string @@ -944,6 +973,34 @@ func (e *TooManyTagsException) ErrorCode() string { } func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Unable to roll back the stage. The cause might be if the pipeline version has +// changed since the target pipeline execution was deployed, the stage is currently +// running, or an incorrect target pipeline execution ID was provided. +type UnableToRollbackStageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnableToRollbackStageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnableToRollbackStageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnableToRollbackStageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnableToRollbackStageException" + } + return *e.ErrorCodeOverride +} +func (e *UnableToRollbackStageException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The validation was specified in an invalid format. type ValidationException struct { Message *string diff --git a/service/codepipeline/types/types.go b/service/codepipeline/types/types.go index 1a777fb7ecf..3790e19e51d 100644 --- a/service/codepipeline/types/types.go +++ b/service/codepipeline/types/types.go @@ -925,6 +925,17 @@ type ExecutorConfiguration struct { noSmithyDocumentSerde } +// The configuration that specifies the result, such as rollback, to occur upon +// stage failure. +type FailureConditions struct { + + // The specified result for when the failure conditions are met, such as rolling + // back the stage. + Result Result + + noSmithyDocumentSerde +} + // Represents information about failure details. type FailureDetails struct { @@ -1349,6 +1360,9 @@ type PipelineExecution struct { // default mode is SUPERSEDED. ExecutionMode ExecutionMode + // The type of the pipeline execution. + ExecutionType ExecutionType + // The ID of the pipeline execution. PipelineExecutionId *string @@ -1358,6 +1372,9 @@ type PipelineExecution struct { // The version number of the pipeline with the specified pipeline execution. PipelineVersion *int32 + // The metadata about the execution pertaining to stage rollback. + RollbackMetadata *PipelineRollbackMetadata + // The status of the pipeline execution. // - Cancelled: The pipeline’s definition was updated before the pipeline // execution could be completed. @@ -1389,6 +1406,16 @@ type PipelineExecution struct { noSmithyDocumentSerde } +// The pipeline execution to filter on. +type PipelineExecutionFilter struct { + + // Filter for pipeline executions where the stage was successful in the current + // pipeline version. + SucceededInStage *SucceededInStageFilter + + noSmithyDocumentSerde +} + // Summary information about a pipeline execution. type PipelineExecutionSummary struct { @@ -1396,6 +1423,9 @@ type PipelineExecutionSummary struct { // default mode is SUPERSEDED. ExecutionMode ExecutionMode + // Type of the pipeline execution. + ExecutionType ExecutionType + // The date and time of the last change to the pipeline execution, in timestamp // format. LastUpdateTime *time.Time @@ -1403,6 +1433,9 @@ type PipelineExecutionSummary struct { // The ID of the pipeline execution. PipelineExecutionId *string + // The metadata for the stage execution to be rolled back. + RollbackMetadata *PipelineRollbackMetadata + // A list of the source artifact revisions that initiated a pipeline execution. SourceRevisions []SourceRevision @@ -1426,6 +1459,9 @@ type PipelineExecutionSummary struct { // - Failed: The pipeline execution was not completed successfully. Status PipelineExecutionStatus + // Status summary for the pipeline. + StatusSummary *string + // The interaction that stopped a pipeline execution. StopTrigger *StopExecutionTrigger @@ -1460,6 +1496,15 @@ type PipelineMetadata struct { noSmithyDocumentSerde } +// The metadata for the stage execution to be rolled back. +type PipelineRollbackMetadata struct { + + // The pipeline execution ID to which the stage will be rolled back. + RollbackTargetPipelineExecutionId *string + + noSmithyDocumentSerde +} + // Returns a summary of a pipeline. type PipelineSummary struct { @@ -1672,6 +1717,11 @@ type StageDeclaration struct { // Reserved for future use. Blockers []BlockerDeclaration + // The method to use when a stage has not completed successfully. For example, + // configuring this field for rollback will roll back a failed stage automatically + // to the last successful pipeline execution in the stage. + OnFailure *FailureConditions + noSmithyDocumentSerde } @@ -1690,6 +1740,10 @@ type StageExecution struct { // This member is required. Status StageExecutionStatus + // The type of pipeline execution for the stage, such as a rollback pipeline + // execution. + Type ExecutionType + noSmithyDocumentSerde } @@ -1727,6 +1781,17 @@ type StopExecutionTrigger struct { noSmithyDocumentSerde } +// Filter for pipeline executions that have successfully completed the stage in +// the current pipeline version. +type SucceededInStageFilter struct { + + // The name of the stage for filtering for pipeline executions where the stage was + // successful in the current pipeline version. + StageName *string + + noSmithyDocumentSerde +} + // A tag is a key-value pair that is used to manage the resource. type Tag struct { diff --git a/service/codepipeline/validators.go b/service/codepipeline/validators.go index 848c68d82d2..4d1b3b9b47c 100644 --- a/service/codepipeline/validators.go +++ b/service/codepipeline/validators.go @@ -570,6 +570,26 @@ func (m *validateOpRetryStageExecution) HandleInitialize(ctx context.Context, in return next.HandleInitialize(ctx, in) } +type validateOpRollbackStage struct { +} + +func (*validateOpRollbackStage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRollbackStage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RollbackStageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRollbackStageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpStartPipelineExecution struct { } @@ -802,6 +822,10 @@ func addOpRetryStageExecutionValidationMiddleware(stack *middleware.Stack) error return stack.Initialize.Add(&validateOpRetryStageExecution{}, middleware.After) } +func addOpRollbackStageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRollbackStage{}, middleware.After) +} + func addOpStartPipelineExecutionValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpStartPipelineExecution{}, middleware.After) } @@ -2341,6 +2365,27 @@ func validateOpRetryStageExecutionInput(v *RetryStageExecutionInput) error { } } +func validateOpRollbackStageInput(v *RollbackStageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RollbackStageInput"} + if v.PipelineName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PipelineName")) + } + if v.StageName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StageName")) + } + if v.TargetPipelineExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPipelineExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpStartPipelineExecutionInput(v *StartPipelineExecutionInput) error { if v == nil { return nil diff --git a/service/cognitoidentityprovider/api_op_AdminGetUser.go b/service/cognitoidentityprovider/api_op_AdminGetUser.go index 4e59866113d..5af14b1acb2 100644 --- a/service/cognitoidentityprovider/api_op_AdminGetUser.go +++ b/service/cognitoidentityprovider/api_op_AdminGetUser.go @@ -83,8 +83,9 @@ type AdminGetUserOutput struct { // The date the user was created. UserCreateDate *time.Time - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. UserLastModifiedDate *time.Time // The MFA options that are activated for the user. The possible values in this diff --git a/service/cognitoidentityprovider/api_op_CreateResourceServer.go b/service/cognitoidentityprovider/api_op_CreateResourceServer.go index 556ee39da01..e347efb1161 100644 --- a/service/cognitoidentityprovider/api_op_CreateResourceServer.go +++ b/service/cognitoidentityprovider/api_op_CreateResourceServer.go @@ -35,9 +35,12 @@ func (c *Client) CreateResourceServer(ctx context.Context, params *CreateResourc type CreateResourceServerInput struct { - // A unique resource server identifier for the resource server. This could be an - // HTTPS endpoint where the resource server is located, such as - // https://my-weather-api.example.com . + // A unique resource server identifier for the resource server. The identifier can + // be an API friendly name like solar-system-data . You can also set an API URL + // like https://solar-system-data-api.example.com as your identifier. Amazon + // Cognito represents scopes in the access token in the format + // $resource-server-identifier/$scope . Longer scope-identifier strings increase + // the size of your access tokens. // // This member is required. Identifier *string diff --git a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go index 6f8fcd2848c..79c6288c186 100644 --- a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go @@ -116,12 +116,13 @@ type CreateUserPoolClientInput struct { // supported. CallbackURLs []string - // The default redirect URI. Must be in the CallbackURLs list. A redirect URI - // must: + // The default redirect URI. In app clients with one assigned IdP, replaces + // redirect_uri in authentication requests. Must be in the CallbackURLs list. A + // redirect URI must: // - Be an absolute URI. // - Be registered with the authorization server. // - Not include a fragment component. - // See OAuth 2.0 - Redirection Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2) + // For more information, see Default redirect URI (https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-client-apps.html#cognito-user-pools-app-idp-settings-about) // . Amazon Cognito requires HTTPS over HTTP except for http://localhost for // testing purposes only. App callback URLs such as myapp://example are also // supported. diff --git a/service/cognitoidentityprovider/api_op_DescribeResourceServer.go b/service/cognitoidentityprovider/api_op_DescribeResourceServer.go index dc6829a11ee..65d0c132462 100644 --- a/service/cognitoidentityprovider/api_op_DescribeResourceServer.go +++ b/service/cognitoidentityprovider/api_op_DescribeResourceServer.go @@ -29,7 +29,12 @@ func (c *Client) DescribeResourceServer(ctx context.Context, params *DescribeRes type DescribeResourceServerInput struct { - // The identifier for the resource server + // A unique resource server identifier for the resource server. The identifier can + // be an API friendly name like solar-system-data . You can also set an API URL + // like https://solar-system-data-api.example.com as your identifier. Amazon + // Cognito represents scopes in the access token in the format + // $resource-server-identifier/$scope . Longer scope-identifier strings increase + // the size of your access tokens. // // This member is required. Identifier *string diff --git a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go index feca9feb0ee..fc8bcb3fa4d 100644 --- a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go +++ b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go @@ -36,7 +36,12 @@ func (c *Client) UpdateResourceServer(ctx context.Context, params *UpdateResourc type UpdateResourceServerInput struct { - // The identifier for the resource server. + // A unique resource server identifier for the resource server. The identifier can + // be an API friendly name like solar-system-data . You can also set an API URL + // like https://solar-system-data-api.example.com as your identifier. Amazon + // Cognito represents scopes in the access token in the format + // $resource-server-identifier/$scope . Longer scope-identifier strings increase + // the size of your access tokens. // // This member is required. Identifier *string diff --git a/service/cognitoidentityprovider/deserializers.go b/service/cognitoidentityprovider/deserializers.go index be61b2c3f0c..bf89b90d71e 100644 --- a/service/cognitoidentityprovider/deserializers.go +++ b/service/cognitoidentityprovider/deserializers.go @@ -10975,6 +10975,9 @@ func awsAwsjson11_deserializeOpErrorSignUp(response *smithyhttp.Response, metada case strings.EqualFold("InvalidSmsRoleTrustRelationshipException", errorCode): return awsAwsjson11_deserializeErrorInvalidSmsRoleTrustRelationshipException(response, errorBody) + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + case strings.EqualFold("NotAuthorizedException", errorCode): return awsAwsjson11_deserializeErrorNotAuthorizedException(response, errorBody) diff --git a/service/cognitoidentityprovider/types/types.go b/service/cognitoidentityprovider/types/types.go index 931e7b30a7d..5a067e3cb78 100644 --- a/service/cognitoidentityprovider/types/types.go +++ b/service/cognitoidentityprovider/types/types.go @@ -182,8 +182,9 @@ type AuthEventType struct { // The challenge responses. ChallengeResponses []ChallengeResponseType - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The user context data captured at the time of an event request. This value @@ -427,8 +428,9 @@ type DeviceType struct { // The date when the device was last authenticated. DeviceLastAuthenticatedDate *time.Time - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. DeviceLastModifiedDate *time.Time noSmithyDocumentSerde @@ -612,8 +614,9 @@ type EventRiskType struct { // The group type. type GroupType struct { - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // A string containing the description of the group. @@ -622,8 +625,9 @@ type GroupType struct { // The name of the group. GroupName *string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // A non-negative integer value that specifies the precedence of this group @@ -667,15 +671,17 @@ type IdentityProviderType struct { // A mapping of IdP attributes to standard and custom user pool attributes. AttributeMapping map[string]string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // A list of IdP identifiers. IdpIdentifiers []string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // The scopes, URLs, and identifiers for your external identity provider. The @@ -1018,8 +1024,9 @@ type PreTokenGenerationVersionConfigType struct { // A container for IdP details. type ProviderDescription struct { - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The date the provider was last modified. @@ -1085,7 +1092,12 @@ type ResourceServerScopeType struct { // A container for information about a resource server for a user pool. type ResourceServerType struct { - // The identifier for the resource server. + // A unique resource server identifier for the resource server. The identifier can + // be an API friendly name like solar-system-data . You can also set an API URL + // like https://solar-system-data-api.example.com as your identifier. Amazon + // Cognito represents scopes in the access token in the format + // $resource-server-identifier/$scope . Longer scope-identifier strings increase + // the size of your access tokens. Identifier *string // The name of the resource server. @@ -1114,8 +1126,9 @@ type RiskConfigurationType struct { // and the EventAction . CompromisedCredentialsRiskConfiguration *CompromisedCredentialsRiskConfigurationType - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // The configuration to override the risk decision. @@ -1357,15 +1370,17 @@ type UICustomizationType struct { // The client ID for the client app. ClientId *string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The logo image for the UI customization. ImageUrl *string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // The user pool ID for the user pool. @@ -1432,8 +1447,9 @@ type UserImportJobType struct { // The message returned when the user import job is completed. CompletionMessage *string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The number of users that couldn't be imported. @@ -1604,8 +1620,9 @@ type UserPoolClientType struct { // The client secret from the user pool request of the client type. ClientSecret *string - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The default redirect URI. Must be in the CallbackURLs list. A redirect URI @@ -1676,8 +1693,9 @@ type UserPoolClientType struct { // of your app client, your ID tokens are valid for one hour. IdTokenValidity *int32 - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // A list of allowed logout URLs for the IdPs. @@ -1762,8 +1780,9 @@ type UserPoolClientType struct { // A user pool description. type UserPoolDescriptionType struct { - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // The ID in a user pool description. @@ -1772,8 +1791,9 @@ type UserPoolDescriptionType struct { // The Lambda configuration information in a user pool description. LambdaConfig *LambdaConfigType - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // The name in a user pool description. @@ -1820,8 +1840,9 @@ type UserPoolType struct { // The attributes that are auto-verified in a user pool. AutoVerifiedAttributes []VerifiedAttributeType - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was created. + // The date and time when the item was created. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. CreationDate *time.Time // A custom domain name that you provide to Amazon Cognito. This parameter applies @@ -1876,8 +1897,9 @@ type UserPoolType struct { // The Lambda triggers associated with the user pool. LambdaConfig *LambdaConfigType - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. LastModifiedDate *time.Time // Can be one of the following values: @@ -1988,8 +2010,9 @@ type UserType struct { // The creation date of the user. UserCreateDate *time.Time - // The date and time, in ISO 8601 (https://www.iso.org/iso-8601-date-and-time-format.html) - // format, when the item was modified. + // The date and time when the item was modified. Amazon Cognito returns this + // timestamp in UNIX epoch time format. Your SDK might render the output in a + // human-readable format like ISO 8601 or a Java Date object. UserLastModifiedDate *time.Time // The user status. This can be one of the following: diff --git a/service/connectcampaigns/deserializers.go b/service/connectcampaigns/deserializers.go index bde4e2cc8aa..2980803e7b8 100644 --- a/service/connectcampaigns/deserializers.go +++ b/service/connectcampaigns/deserializers.go @@ -3491,6 +3491,15 @@ func awsRestjson1_deserializeDocumentAnswerMachineDetectionConfig(v **types.Answ for key, value := range shape { switch key { + case "awaitAnswerMachinePrompt": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.AwaitAnswerMachinePrompt = ptr.Bool(jtv) + } + case "enableAnswerMachineDetection": if value != nil { jtv, ok := value.(bool) diff --git a/service/connectcampaigns/serializers.go b/service/connectcampaigns/serializers.go index 083c6a92276..3207fa9495e 100644 --- a/service/connectcampaigns/serializers.go +++ b/service/connectcampaigns/serializers.go @@ -1709,6 +1709,11 @@ func awsRestjson1_serializeDocumentAnswerMachineDetectionConfig(v *types.AnswerM object := value.Object() defer object.Close() + if v.AwaitAnswerMachinePrompt != nil { + ok := object.Key("awaitAnswerMachinePrompt") + ok.Boolean(*v.AwaitAnswerMachinePrompt) + } + if v.EnableAnswerMachineDetection != nil { ok := object.Key("enableAnswerMachineDetection") ok.Boolean(*v.EnableAnswerMachineDetection) diff --git a/service/connectcampaigns/types/types.go b/service/connectcampaigns/types/types.go index 7e85c5d3b23..b912d3941b1 100644 --- a/service/connectcampaigns/types/types.go +++ b/service/connectcampaigns/types/types.go @@ -24,6 +24,9 @@ type AnswerMachineDetectionConfig struct { // This member is required. EnableAnswerMachineDetection *bool + // Enable or disable await answer machine prompt + AwaitAnswerMachinePrompt *bool + noSmithyDocumentSerde } diff --git a/service/marketplaceentitlementservice/endpoints.go b/service/marketplaceentitlementservice/endpoints.go index d4c68053a6f..d7e10f99088 100644 --- a/service/marketplaceentitlementservice/endpoints.go +++ b/service/marketplaceentitlementservice/endpoints.go @@ -418,6 +418,19 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } + if _Region == "cn-northwest-1" { + uriString := "https://entitlement-marketplace.cn-northwest-1.amazonaws.com.cn" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } if "aws" == _PartitionResult.Name { uriString := func() string { var out strings.Builder diff --git a/service/marketplaceentitlementservice/endpoints_test.go b/service/marketplaceentitlementservice/endpoints_test.go index 271572c03a5..5fb4e13c67e 100644 --- a/service/marketplaceentitlementservice/endpoints_test.go +++ b/service/marketplaceentitlementservice/endpoints_test.go @@ -162,8 +162,45 @@ func TestEndpointCase3(t *testing.T) { } } -// For region cn-north-1 with FIPS enabled and DualStack enabled +// For region cn-northwest-1 with FIPS disabled and DualStack disabled func TestEndpointCase4(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("cn-northwest-1"), + UseFIPS: ptr.Bool(false), + UseDualStack: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://entitlement-marketplace.cn-northwest-1.amazonaws.com.cn") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: smithy.Properties{}, + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) + } + + if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) + } +} + +// For region cn-north-1 with FIPS enabled and DualStack enabled +func TestEndpointCase5(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("cn-north-1"), UseFIPS: ptr.Bool(true), @@ -200,7 +237,7 @@ func TestEndpointCase4(t *testing.T) { } // For region cn-north-1 with FIPS enabled and DualStack disabled -func TestEndpointCase5(t *testing.T) { +func TestEndpointCase6(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("cn-north-1"), UseFIPS: ptr.Bool(true), @@ -237,7 +274,7 @@ func TestEndpointCase5(t *testing.T) { } // For region cn-north-1 with FIPS disabled and DualStack enabled -func TestEndpointCase6(t *testing.T) { +func TestEndpointCase7(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("cn-north-1"), UseFIPS: ptr.Bool(false), @@ -274,7 +311,7 @@ func TestEndpointCase6(t *testing.T) { } // For region cn-north-1 with FIPS disabled and DualStack disabled -func TestEndpointCase7(t *testing.T) { +func TestEndpointCase8(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("cn-north-1"), UseFIPS: ptr.Bool(false), @@ -311,7 +348,7 @@ func TestEndpointCase7(t *testing.T) { } // For region us-gov-east-1 with FIPS enabled and DualStack enabled -func TestEndpointCase8(t *testing.T) { +func TestEndpointCase9(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(true), @@ -348,7 +385,7 @@ func TestEndpointCase8(t *testing.T) { } // For region us-gov-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase9(t *testing.T) { +func TestEndpointCase10(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(true), @@ -385,7 +422,7 @@ func TestEndpointCase9(t *testing.T) { } // For region us-gov-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase10(t *testing.T) { +func TestEndpointCase11(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(false), @@ -422,7 +459,7 @@ func TestEndpointCase10(t *testing.T) { } // For region us-gov-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase11(t *testing.T) { +func TestEndpointCase12(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(false), @@ -459,7 +496,7 @@ func TestEndpointCase11(t *testing.T) { } // For region us-iso-east-1 with FIPS enabled and DualStack enabled -func TestEndpointCase12(t *testing.T) { +func TestEndpointCase13(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(true), @@ -479,7 +516,7 @@ func TestEndpointCase12(t *testing.T) { } // For region us-iso-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase13(t *testing.T) { +func TestEndpointCase14(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(true), @@ -516,7 +553,7 @@ func TestEndpointCase13(t *testing.T) { } // For region us-iso-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase14(t *testing.T) { +func TestEndpointCase15(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(false), @@ -536,7 +573,7 @@ func TestEndpointCase14(t *testing.T) { } // For region us-iso-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase15(t *testing.T) { +func TestEndpointCase16(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(false), @@ -573,7 +610,7 @@ func TestEndpointCase15(t *testing.T) { } // For region us-isob-east-1 with FIPS enabled and DualStack enabled -func TestEndpointCase16(t *testing.T) { +func TestEndpointCase17(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(true), @@ -593,7 +630,7 @@ func TestEndpointCase16(t *testing.T) { } // For region us-isob-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase17(t *testing.T) { +func TestEndpointCase18(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(true), @@ -630,7 +667,7 @@ func TestEndpointCase17(t *testing.T) { } // For region us-isob-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase18(t *testing.T) { +func TestEndpointCase19(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(false), @@ -650,7 +687,7 @@ func TestEndpointCase18(t *testing.T) { } // For region us-isob-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase19(t *testing.T) { +func TestEndpointCase20(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(false), @@ -687,7 +724,7 @@ func TestEndpointCase19(t *testing.T) { } // For custom endpoint with region set and fips disabled and dualstack disabled -func TestEndpointCase20(t *testing.T) { +func TestEndpointCase21(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(false), @@ -725,7 +762,7 @@ func TestEndpointCase20(t *testing.T) { } // For custom endpoint with region not set and fips disabled and dualstack disabled -func TestEndpointCase21(t *testing.T) { +func TestEndpointCase22(t *testing.T) { var params = EndpointParameters{ UseFIPS: ptr.Bool(false), UseDualStack: ptr.Bool(false), @@ -762,7 +799,7 @@ func TestEndpointCase21(t *testing.T) { } // For custom endpoint with fips enabled and dualstack disabled -func TestEndpointCase22(t *testing.T) { +func TestEndpointCase23(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(true), @@ -783,7 +820,7 @@ func TestEndpointCase22(t *testing.T) { } // For custom endpoint with fips disabled and dualstack enabled -func TestEndpointCase23(t *testing.T) { +func TestEndpointCase24(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(false), @@ -804,7 +841,7 @@ func TestEndpointCase23(t *testing.T) { } // Missing region -func TestEndpointCase24(t *testing.T) { +func TestEndpointCase25(t *testing.T) { var params = EndpointParameters{} resolver := NewDefaultEndpointResolverV2() diff --git a/service/marketplaceentitlementservice/internal/endpoints/endpoints.go b/service/marketplaceentitlementservice/internal/endpoints/endpoints.go index 2b86541c6ac..e663ec4af29 100644 --- a/service/marketplaceentitlementservice/internal/endpoints/endpoints.go +++ b/service/marketplaceentitlementservice/internal/endpoints/endpoints.go @@ -195,7 +195,7 @@ var defaultPartitions = endpoints.Partitions{ Region: "cn-northwest-1", }: endpoints.Endpoint{ Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"HTTPS"}, + Protocols: []string{"https"}, CredentialScope: endpoints.CredentialScope{ Region: "cn-northwest-1", }, diff --git a/service/oam/api_op_CreateLink.go b/service/oam/api_op_CreateLink.go index d7a9b7141c6..0a3469de4a4 100644 --- a/service/oam/api_op_CreateLink.go +++ b/service/oam/api_op_CreateLink.go @@ -12,11 +12,14 @@ import ( ) // Creates a link between a source account and a sink that you have created in a -// monitoring account. Before you create a link, you must create a sink in the -// monitoring account and create a sink policy in that account. The sink policy -// must permit the source account to link to it. You can grant permission to source -// accounts by granting permission to an entire organization or to individual -// accounts. For more information, see CreateSink (https://docs.aws.amazon.com/OAM/latest/APIReference/API_CreateSink.html) +// monitoring account. After the link is created, data is sent from the source +// account to the monitoring account. When you create a link, you can optionally +// specify filters that specify which metric namespaces and which log groups are +// shared from the source account to the monitoring account. Before you create a +// link, you must create a sink in the monitoring account and create a sink policy +// in that account. The sink policy must permit the source account to link to it. +// You can grant permission to source accounts by granting permission to an entire +// organization or to individual accounts. For more information, see CreateSink (https://docs.aws.amazon.com/OAM/latest/APIReference/API_CreateSink.html) // and PutSinkPolicy (https://docs.aws.amazon.com/OAM/latest/APIReference/API_PutSinkPolicy.html) // . Each monitoring account can be linked to as many as 100,000 source accounts. // Each source account can be linked to as many as five monitoring accounts. @@ -61,6 +64,11 @@ type CreateLinkInput struct { // This member is required. SinkIdentifier *string + // Use this structure to optionally create filters that specify that only some + // metric namespaces or log groups are to be shared from the source account to the + // monitoring account. + LinkConfiguration *types.LinkConfiguration + // Assigns one or more tags (key-value pairs) to the link. Tags can help you // organize and categorize your resources. You can also use them to scope user // permissions by granting a user permission to access or change only resources @@ -87,6 +95,10 @@ type CreateLinkOutput struct { // The exact label template that you specified, with the variables not resolved. LabelTemplate *string + // This structure includes filters that specify which metric namespaces and which + // log groups are shared from the source account to the monitoring account. + LinkConfiguration *types.LinkConfiguration + // The resource types supported by this link. ResourceTypes []string diff --git a/service/oam/api_op_CreateSink.go b/service/oam/api_op_CreateSink.go index 8539b24e837..24d11d63ebd 100644 --- a/service/oam/api_op_CreateSink.go +++ b/service/oam/api_op_CreateSink.go @@ -16,8 +16,8 @@ import ( // accounts can link to the sink to send observability data. After you create a // sink, you must create a sink policy that allows source accounts to attach to it. // For more information, see PutSinkPolicy (https://docs.aws.amazon.com/OAM/latest/APIReference/API_PutSinkPolicy.html) -// . Each account can contain one sink. If you delete a sink, you can then create a -// new one in that account. +// . Each account can contain one sink per Region. If you delete a sink, you can +// then create a new one in that Region. func (c *Client) CreateSink(ctx context.Context, params *CreateSinkInput, optFns ...func(*Options)) (*CreateSinkOutput, error) { if params == nil { params = &CreateSinkInput{} diff --git a/service/oam/api_op_GetLink.go b/service/oam/api_op_GetLink.go index 6a7ace86ca9..4498af3d0c8 100644 --- a/service/oam/api_op_GetLink.go +++ b/service/oam/api_op_GetLink.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/oam/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -54,6 +55,10 @@ type GetLinkOutput struct { // template variables not resolved. LabelTemplate *string + // This structure includes filters that specify which metric namespaces and which + // log groups are shared from the source account to the monitoring account. + LinkConfiguration *types.LinkConfiguration + // The resource types supported by this link. ResourceTypes []string diff --git a/service/oam/api_op_UpdateLink.go b/service/oam/api_op_UpdateLink.go index d063a77a971..63376dca7d0 100644 --- a/service/oam/api_op_UpdateLink.go +++ b/service/oam/api_op_UpdateLink.go @@ -13,8 +13,10 @@ import ( // Use this operation to change what types of data are shared from a source // account to its linked monitoring account sink. You can't change the sink or -// change the monitoring account with this operation. To update the list of tags -// associated with the sink, use TagResource (https://docs.aws.amazon.com/OAM/latest/APIReference/API_TagResource.html) +// change the monitoring account with this operation. When you update a link, you +// can optionally specify filters that specify which metric namespaces and which +// log groups are shared from the source account to the monitoring account. To +// update the list of tags associated with the sink, use TagResource (https://docs.aws.amazon.com/OAM/latest/APIReference/API_TagResource.html) // . func (c *Client) UpdateLink(ctx context.Context, params *UpdateLinkInput, optFns ...func(*Options)) (*UpdateLinkOutput, error) { if params == nil { @@ -45,6 +47,10 @@ type UpdateLinkInput struct { // This member is required. ResourceTypes []types.ResourceType + // Use this structure to filter which metric namespaces and which log groups are + // to be shared from the source account to the monitoring account. + LinkConfiguration *types.LinkConfiguration + noSmithyDocumentSerde } @@ -64,6 +70,10 @@ type UpdateLinkOutput struct { // template variables not resolved. LabelTemplate *string + // This structure includes filters that specify which metric namespaces and which + // log groups are shared from the source account to the monitoring account. + LinkConfiguration *types.LinkConfiguration + // The resource types now supported by this link. ResourceTypes []string diff --git a/service/oam/deserializers.go b/service/oam/deserializers.go index fda4f19b960..b1e9af98437 100644 --- a/service/oam/deserializers.go +++ b/service/oam/deserializers.go @@ -199,6 +199,11 @@ func awsRestjson1_deserializeOpDocumentCreateLinkOutput(v **CreateLinkOutput, va sv.LabelTemplate = ptr.String(jtv) } + case "LinkConfiguration": + if err := awsRestjson1_deserializeDocumentLinkConfiguration(&sv.LinkConfiguration, value); err != nil { + return err + } + case "ResourceTypes": if err := awsRestjson1_deserializeDocumentResourceTypesOutput(&sv.ResourceTypes, value); err != nil { return err @@ -778,6 +783,11 @@ func awsRestjson1_deserializeOpDocumentGetLinkOutput(v **GetLinkOutput, value in sv.LabelTemplate = ptr.String(jtv) } + case "LinkConfiguration": + if err := awsRestjson1_deserializeDocumentLinkConfiguration(&sv.LinkConfiguration, value); err != nil { + return err + } + case "ResourceTypes": if err := awsRestjson1_deserializeDocumentResourceTypesOutput(&sv.ResourceTypes, value); err != nil { return err @@ -2337,6 +2347,11 @@ func awsRestjson1_deserializeOpDocumentUpdateLinkOutput(v **UpdateLinkOutput, va sv.LabelTemplate = ptr.String(jtv) } + case "LinkConfiguration": + if err := awsRestjson1_deserializeDocumentLinkConfiguration(&sv.LinkConfiguration, value); err != nil { + return err + } + case "ResourceTypes": if err := awsRestjson1_deserializeDocumentResourceTypesOutput(&sv.ResourceTypes, value); err != nil { return err @@ -2896,6 +2911,47 @@ func awsRestjson1_deserializeDocumentInvalidParameterException(v **types.Invalid return nil } +func awsRestjson1_deserializeDocumentLinkConfiguration(v **types.LinkConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LinkConfiguration + if *v == nil { + sv = &types.LinkConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LogGroupConfiguration": + if err := awsRestjson1_deserializeDocumentLogGroupConfiguration(&sv.LogGroupConfiguration, value); err != nil { + return err + } + + case "MetricConfiguration": + if err := awsRestjson1_deserializeDocumentMetricConfiguration(&sv.MetricConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentListAttachedLinksItem(v **types.ListAttachedLinksItem, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3182,6 +3238,86 @@ func awsRestjson1_deserializeDocumentListSinksItems(v *[]types.ListSinksItem, va return nil } +func awsRestjson1_deserializeDocumentLogGroupConfiguration(v **types.LogGroupConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LogGroupConfiguration + if *v == nil { + sv = &types.LogGroupConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected LogsFilter to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentMetricConfiguration(v **types.MetricConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MetricConfiguration + if *v == nil { + sv = &types.MetricConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Filter": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MetricsFilter to be of type string, got %T instead", value) + } + sv.Filter = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentMissingRequiredParameterException(v **types.MissingRequiredParameterException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/oam/serializers.go b/service/oam/serializers.go index a8a5c28d253..6bcaec3c6b5 100644 --- a/service/oam/serializers.go +++ b/service/oam/serializers.go @@ -86,6 +86,13 @@ func awsRestjson1_serializeOpDocumentCreateLinkInput(v *CreateLinkInput, value s ok.String(*v.LabelTemplate) } + if v.LinkConfiguration != nil { + ok := object.Key("LinkConfiguration") + if err := awsRestjson1_serializeDocumentLinkConfiguration(v.LinkConfiguration, ok); err != nil { + return err + } + } + if v.ResourceTypes != nil { ok := object.Key("ResourceTypes") if err := awsRestjson1_serializeDocumentResourceTypesInput(v.ResourceTypes, ok); err != nil { @@ -1188,6 +1195,13 @@ func awsRestjson1_serializeOpDocumentUpdateLinkInput(v *UpdateLinkInput, value s ok.String(*v.Identifier) } + if v.LinkConfiguration != nil { + ok := object.Key("LinkConfiguration") + if err := awsRestjson1_serializeDocumentLinkConfiguration(v.LinkConfiguration, ok); err != nil { + return err + } + } + if v.ResourceTypes != nil { ok := object.Key("ResourceTypes") if err := awsRestjson1_serializeDocumentResourceTypesInput(v.ResourceTypes, ok); err != nil { @@ -1198,6 +1212,51 @@ func awsRestjson1_serializeOpDocumentUpdateLinkInput(v *UpdateLinkInput, value s return nil } +func awsRestjson1_serializeDocumentLinkConfiguration(v *types.LinkConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.LogGroupConfiguration != nil { + ok := object.Key("LogGroupConfiguration") + if err := awsRestjson1_serializeDocumentLogGroupConfiguration(v.LogGroupConfiguration, ok); err != nil { + return err + } + } + + if v.MetricConfiguration != nil { + ok := object.Key("MetricConfiguration") + if err := awsRestjson1_serializeDocumentMetricConfiguration(v.MetricConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentLogGroupConfiguration(v *types.LogGroupConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("Filter") + ok.String(*v.Filter) + } + + return nil +} + +func awsRestjson1_serializeDocumentMetricConfiguration(v *types.MetricConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filter != nil { + ok := object.Key("Filter") + ok.String(*v.Filter) + } + + return nil +} + func awsRestjson1_serializeDocumentResourceTypesInput(v []types.ResourceType, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/service/oam/types/types.go b/service/oam/types/types.go index e8f8ee77b10..3f68fd67eb9 100644 --- a/service/oam/types/types.go +++ b/service/oam/types/types.go @@ -6,6 +6,22 @@ import ( smithydocument "github.com/aws/smithy-go/document" ) +// Use this structure to optionally create filters that specify that only some +// metric namespaces or log groups are to be shared from the source account to the +// monitoring account. +type LinkConfiguration struct { + + // Use this structure to filter which log groups are to send log events from the + // source account to the monitoring account. + LogGroupConfiguration *LogGroupConfiguration + + // Use this structure to filter which metric namespaces are to be shared from the + // source account to the monitoring account. + MetricConfiguration *MetricConfiguration + + noSmithyDocumentSerde +} + // A structure that contains information about one link attached to this // monitoring account sink. type ListAttachedLinksItem struct { @@ -62,4 +78,71 @@ type ListSinksItem struct { noSmithyDocumentSerde } +// This structure contains the Filter parameter which you can use to specify which +// log groups are to share log events from this source account to the monitoring +// account. +type LogGroupConfiguration struct { + + // Use this field to specify which log groups are to share their log events with + // the monitoring account. Use the term LogGroupName and one or more of the + // following operands. Use single quotation marks (') around log group names. The + // matching of log group names is case sensitive. Each filter has a limit of five + // conditional operands. Conditional operands are AND and OR . + // - = and != + // - AND + // - OR + // - LIKE and NOT LIKE . These can be used only as prefix searches. Include a % + // at the end of the string that you want to search for and include. + // - IN and NOT IN , using parentheses ( ) + // Examples: + // - LogGroupName IN ('This-Log-Group', 'Other-Log-Group') includes only the log + // groups with names This-Log-Group and Other-Log-Group . + // - LogGroupName NOT IN ('Private-Log-Group', 'Private-Log-Group-2') includes + // all log groups except the log groups with names Private-Log-Group and + // Private-Log-Group-2 . + // - LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%' includes + // all log groups that have names that start with aws/lambda/ or AWSLogs . + // If you are updating a link that uses filters, you can specify * as the only + // value for the filter parameter to delete the filter and share all log groups + // with the monitoring account. + // + // This member is required. + Filter *string + + noSmithyDocumentSerde +} + +// This structure contains the Filter parameter which you can use to specify which +// metric namespaces are to be shared from this source account to the monitoring +// account. +type MetricConfiguration struct { + + // Use this field to specify which metrics are to be shared with the monitoring + // account. Use the term Namespace and one or more of the following operands. Use + // single quotation marks (') around namespace names. The matching of namespace + // names is case sensitive. Each filter has a limit of five conditional operands. + // Conditional operands are AND and OR . + // - = and != + // - AND + // - OR + // - LIKE and NOT LIKE . These can be used only as prefix searches. Include a % + // at the end of the string that you want to search for and include. + // - IN and NOT IN , using parentheses ( ) + // Examples: + // - Namespace NOT LIKE 'AWS/%' includes only namespaces that don't start with + // AWS/ , such as custom namespaces. + // - Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3') includes only the metrics in + // the EC2, Elastic Load Balancing, and Amazon S3 namespaces. + // - Namespace = 'AWS/EC2' OR Namespace NOT LIKE 'AWS/%' includes only the EC2 + // namespace and your custom namespaces. + // If you are updating a link that uses filters, you can specify * as the only + // value for the filter parameter to delete the filter and share all metric + // namespaces with the monitoring account. + // + // This member is required. + Filter *string + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/oam/validators.go b/service/oam/validators.go index c8b932637bc..bed60b64b1e 100644 --- a/service/oam/validators.go +++ b/service/oam/validators.go @@ -5,6 +5,7 @@ package oam import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/service/oam/types" smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" ) @@ -321,6 +322,58 @@ func addOpUpdateLinkValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateLink{}, middleware.After) } +func validateLinkConfiguration(v *types.LinkConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LinkConfiguration"} + if v.LogGroupConfiguration != nil { + if err := validateLogGroupConfiguration(v.LogGroupConfiguration); err != nil { + invalidParams.AddNested("LogGroupConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.MetricConfiguration != nil { + if err := validateMetricConfiguration(v.MetricConfiguration); err != nil { + invalidParams.AddNested("MetricConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLogGroupConfiguration(v *types.LogGroupConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogGroupConfiguration"} + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateMetricConfiguration(v *types.MetricConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetricConfiguration"} + if v.Filter == nil { + invalidParams.Add(smithy.NewErrParamRequired("Filter")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateLinkInput(v *CreateLinkInput) error { if v == nil { return nil @@ -335,6 +388,11 @@ func validateOpCreateLinkInput(v *CreateLinkInput) error { if v.SinkIdentifier == nil { invalidParams.Add(smithy.NewErrParamRequired("SinkIdentifier")) } + if v.LinkConfiguration != nil { + if err := validateLinkConfiguration(v.LinkConfiguration); err != nil { + invalidParams.AddNested("LinkConfiguration", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -527,6 +585,11 @@ func validateOpUpdateLinkInput(v *UpdateLinkInput) error { if v.ResourceTypes == nil { invalidParams.Add(smithy.NewErrParamRequired("ResourceTypes")) } + if v.LinkConfiguration != nil { + if err := validateLinkConfiguration(v.LinkConfiguration); err != nil { + invalidParams.AddNested("LinkConfiguration", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/rds/api_op_CreateCustomDBEngineVersion.go b/service/rds/api_op_CreateCustomDBEngineVersion.go index ea3829e30fd..1aa7ced921c 100644 --- a/service/rds/api_op_CreateCustomDBEngineVersion.go +++ b/service/rds/api_op_CreateCustomDBEngineVersion.go @@ -222,6 +222,9 @@ type CreateCustomDBEngineVersionOutput struct { // Amazon Redshift. SupportsIntegrations *bool + // Indicates whether the DB engine version supports Aurora Limitless Database. + SupportsLimitlessDatabase *bool + // Indicates whether the DB engine version supports forwarding write operations // from reader DB instances to the writer DB instance in the DB cluster. By // default, write operations aren't allowed on reader DB instances. Valid for: diff --git a/service/rds/api_op_DeleteCustomDBEngineVersion.go b/service/rds/api_op_DeleteCustomDBEngineVersion.go index 3ced09e8463..aebc27f9749 100644 --- a/service/rds/api_op_DeleteCustomDBEngineVersion.go +++ b/service/rds/api_op_DeleteCustomDBEngineVersion.go @@ -175,6 +175,9 @@ type DeleteCustomDBEngineVersionOutput struct { // Amazon Redshift. SupportsIntegrations *bool + // Indicates whether the DB engine version supports Aurora Limitless Database. + SupportsLimitlessDatabase *bool + // Indicates whether the DB engine version supports forwarding write operations // from reader DB instances to the writer DB instance in the DB cluster. By // default, write operations aren't allowed on reader DB instances. Valid for: diff --git a/service/rds/api_op_ModifyCustomDBEngineVersion.go b/service/rds/api_op_ModifyCustomDBEngineVersion.go index 853435473f8..9be45a97f61 100644 --- a/service/rds/api_op_ModifyCustomDBEngineVersion.go +++ b/service/rds/api_op_ModifyCustomDBEngineVersion.go @@ -185,6 +185,9 @@ type ModifyCustomDBEngineVersionOutput struct { // Amazon Redshift. SupportsIntegrations *bool + // Indicates whether the DB engine version supports Aurora Limitless Database. + SupportsLimitlessDatabase *bool + // Indicates whether the DB engine version supports forwarding write operations // from reader DB instances to the writer DB instance in the DB cluster. By // default, write operations aren't allowed on reader DB instances. Valid for: diff --git a/service/rds/deserializers.go b/service/rds/deserializers.go index d9f589ae23e..be67fc785e9 100644 --- a/service/rds/deserializers.go +++ b/service/rds/deserializers.go @@ -31904,6 +31904,22 @@ func awsAwsquery_deserializeDocumentDBEngineVersion(v **types.DBEngineVersion, d sv.SupportsIntegrations = ptr.Bool(xtv) } + case strings.EqualFold("SupportsLimitlessDatabase", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.SupportsLimitlessDatabase = ptr.Bool(xtv) + } + case strings.EqualFold("SupportsLocalWriteForwarding", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -54129,6 +54145,22 @@ func awsAwsquery_deserializeDocumentUpgradeTarget(v **types.UpgradeTarget, decod sv.SupportsIntegrations = ptr.Bool(xtv) } + case strings.EqualFold("SupportsLimitlessDatabase", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.SupportsLimitlessDatabase = ptr.Bool(xtv) + } + case strings.EqualFold("SupportsLocalWriteForwarding", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -55869,6 +55901,22 @@ func awsAwsquery_deserializeOpDocumentCreateCustomDBEngineVersionOutput(v **Crea sv.SupportsIntegrations = ptr.Bool(xtv) } + case strings.EqualFold("SupportsLimitlessDatabase", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.SupportsLimitlessDatabase = ptr.Bool(xtv) + } + case strings.EqualFold("SupportsLocalWriteForwarding", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -57435,6 +57483,22 @@ func awsAwsquery_deserializeOpDocumentDeleteCustomDBEngineVersionOutput(v **Dele sv.SupportsIntegrations = ptr.Bool(xtv) } + case strings.EqualFold("SupportsLimitlessDatabase", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.SupportsLimitlessDatabase = ptr.Bool(xtv) + } + case strings.EqualFold("SupportsLocalWriteForwarding", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -61830,6 +61894,22 @@ func awsAwsquery_deserializeOpDocumentModifyCustomDBEngineVersionOutput(v **Modi sv.SupportsIntegrations = ptr.Bool(xtv) } + case strings.EqualFold("SupportsLimitlessDatabase", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.SupportsLimitlessDatabase = ptr.Bool(xtv) + } + case strings.EqualFold("SupportsLocalWriteForwarding", t.Name.Local): val, err := decoder.Value() if err != nil { diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 70bb4919584..e9749eddb6a 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -1342,6 +1342,9 @@ type DBEngineVersion struct { // Amazon Redshift. SupportsIntegrations *bool + // Indicates whether the DB engine version supports Aurora Limitless Database. + SupportsLimitlessDatabase *bool + // Indicates whether the DB engine version supports forwarding write operations // from reader DB instances to the writer DB instance in the DB cluster. By // default, write operations aren't allowed on reader DB instances. Valid for: @@ -4549,6 +4552,9 @@ type UpgradeTarget struct { // Amazon Redshift. SupportsIntegrations *bool + // Indicates whether the DB engine version supports Aurora Limitless Database. + SupportsLimitlessDatabase *bool + // Indicates whether the target engine version supports forwarding write // operations from reader DB instances to the writer DB instance in the DB cluster. // By default, write operations aren't allowed on reader DB instances. Valid for: diff --git a/service/support/endpoints.go b/service/support/endpoints.go index 002c3a05a83..c121aea649b 100644 --- a/service/support/endpoints.go +++ b/service/support/endpoints.go @@ -448,6 +448,40 @@ func (r *resolver) ResolveEndpoint( } } } + if _PartitionResult.Name == "aws-us-gov" { + if _UseFIPS == true { + if _UseDualStack == false { + uriString := "https://support.us-gov-west-1.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "support") + smithyhttp.SetSigV4ASigningName(&sp, "support") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-gov-west-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } if _PartitionResult.Name == "aws-iso" { if _UseFIPS == false { if _UseDualStack == false { @@ -544,7 +578,7 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { + if _PartitionResult.SupportsFIPS == true { uriString := func() string { var out strings.Builder out.WriteString("https://support-fips.") diff --git a/service/support/endpoints_test.go b/service/support/endpoints_test.go index c93e6bcc58a..7cc88deb104 100644 --- a/service/support/endpoints_test.go +++ b/service/support/endpoints_test.go @@ -503,8 +503,61 @@ func TestEndpointCase10(t *testing.T) { } } -// For region us-gov-east-1 with FIPS enabled and DualStack enabled +// For region aws-us-gov-global with FIPS enabled and DualStack disabled func TestEndpointCase11(t *testing.T) { + var params = EndpointParameters{ + Region: ptr.String("aws-us-gov-global"), + UseFIPS: ptr.Bool(true), + UseDualStack: ptr.Bool(false), + } + + resolver := NewDefaultEndpointResolverV2() + result, err := resolver.ResolveEndpoint(context.Background(), params) + _, _ = result, err + + if err != nil { + t.Fatalf("expect no error, got %v", err) + } + + uri, _ := url.Parse("https://support.us-gov-west-1.amazonaws.com") + + expectEndpoint := smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "support") + smithyhttp.SetSigV4ASigningName(&sp, "support") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-gov-west-1") + return sp + }(), + }, + }) + return out + }(), + } + + if e, a := expectEndpoint.URI, result.URI; e != a { + t.Errorf("expect %v URI, got %v", e, a) + } + + if !reflect.DeepEqual(expectEndpoint.Headers, result.Headers) { + t.Errorf("expect headers to match\n%v != %v", expectEndpoint.Headers, result.Headers) + } + + if !reflect.DeepEqual(expectEndpoint.Properties, result.Properties) { + t.Errorf("expect properties to match\n%v != %v", expectEndpoint.Properties, result.Properties) + } +} + +// For region us-gov-east-1 with FIPS enabled and DualStack enabled +func TestEndpointCase12(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(true), @@ -541,7 +594,7 @@ func TestEndpointCase11(t *testing.T) { } // For region us-gov-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase12(t *testing.T) { +func TestEndpointCase13(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(true), @@ -556,12 +609,28 @@ func TestEndpointCase12(t *testing.T) { t.Fatalf("expect no error, got %v", err) } - uri, _ := url.Parse("https://support-fips.us-gov-east-1.amazonaws.com") + uri, _ := url.Parse("https://support.us-gov-west-1.amazonaws.com") expectEndpoint := smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: smithy.Properties{}, + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "support") + smithyhttp.SetSigV4ASigningName(&sp, "support") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-gov-west-1") + return sp + }(), + }, + }) + return out + }(), } if e, a := expectEndpoint.URI, result.URI; e != a { @@ -578,7 +647,7 @@ func TestEndpointCase12(t *testing.T) { } // For region us-gov-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase13(t *testing.T) { +func TestEndpointCase14(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(false), @@ -615,7 +684,7 @@ func TestEndpointCase13(t *testing.T) { } // For region us-gov-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase14(t *testing.T) { +func TestEndpointCase15(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-gov-east-1"), UseFIPS: ptr.Bool(false), @@ -668,7 +737,7 @@ func TestEndpointCase14(t *testing.T) { } // For region aws-iso-global with FIPS disabled and DualStack disabled -func TestEndpointCase15(t *testing.T) { +func TestEndpointCase16(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("aws-iso-global"), UseFIPS: ptr.Bool(false), @@ -721,7 +790,7 @@ func TestEndpointCase15(t *testing.T) { } // For region us-iso-east-1 with FIPS enabled and DualStack enabled -func TestEndpointCase16(t *testing.T) { +func TestEndpointCase17(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(true), @@ -741,7 +810,7 @@ func TestEndpointCase16(t *testing.T) { } // For region us-iso-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase17(t *testing.T) { +func TestEndpointCase18(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(true), @@ -778,7 +847,7 @@ func TestEndpointCase17(t *testing.T) { } // For region us-iso-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase18(t *testing.T) { +func TestEndpointCase19(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(false), @@ -798,7 +867,7 @@ func TestEndpointCase18(t *testing.T) { } // For region us-iso-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase19(t *testing.T) { +func TestEndpointCase20(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-iso-east-1"), UseFIPS: ptr.Bool(false), @@ -851,7 +920,7 @@ func TestEndpointCase19(t *testing.T) { } // For region aws-iso-b-global with FIPS disabled and DualStack disabled -func TestEndpointCase20(t *testing.T) { +func TestEndpointCase21(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("aws-iso-b-global"), UseFIPS: ptr.Bool(false), @@ -904,7 +973,7 @@ func TestEndpointCase20(t *testing.T) { } // For region us-isob-east-1 with FIPS enabled and DualStack enabled -func TestEndpointCase21(t *testing.T) { +func TestEndpointCase22(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(true), @@ -924,7 +993,7 @@ func TestEndpointCase21(t *testing.T) { } // For region us-isob-east-1 with FIPS enabled and DualStack disabled -func TestEndpointCase22(t *testing.T) { +func TestEndpointCase23(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(true), @@ -961,7 +1030,7 @@ func TestEndpointCase22(t *testing.T) { } // For region us-isob-east-1 with FIPS disabled and DualStack enabled -func TestEndpointCase23(t *testing.T) { +func TestEndpointCase24(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(false), @@ -981,7 +1050,7 @@ func TestEndpointCase23(t *testing.T) { } // For region us-isob-east-1 with FIPS disabled and DualStack disabled -func TestEndpointCase24(t *testing.T) { +func TestEndpointCase25(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-isob-east-1"), UseFIPS: ptr.Bool(false), @@ -1034,7 +1103,7 @@ func TestEndpointCase24(t *testing.T) { } // For custom endpoint with region set and fips disabled and dualstack disabled -func TestEndpointCase25(t *testing.T) { +func TestEndpointCase26(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(false), @@ -1072,7 +1141,7 @@ func TestEndpointCase25(t *testing.T) { } // For custom endpoint with region not set and fips disabled and dualstack disabled -func TestEndpointCase26(t *testing.T) { +func TestEndpointCase27(t *testing.T) { var params = EndpointParameters{ UseFIPS: ptr.Bool(false), UseDualStack: ptr.Bool(false), @@ -1109,7 +1178,7 @@ func TestEndpointCase26(t *testing.T) { } // For custom endpoint with fips enabled and dualstack disabled -func TestEndpointCase27(t *testing.T) { +func TestEndpointCase28(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(true), @@ -1130,7 +1199,7 @@ func TestEndpointCase27(t *testing.T) { } // For custom endpoint with fips disabled and dualstack enabled -func TestEndpointCase28(t *testing.T) { +func TestEndpointCase29(t *testing.T) { var params = EndpointParameters{ Region: ptr.String("us-east-1"), UseFIPS: ptr.Bool(false), @@ -1151,7 +1220,7 @@ func TestEndpointCase28(t *testing.T) { } // Missing region -func TestEndpointCase29(t *testing.T) { +func TestEndpointCase30(t *testing.T) { var params = EndpointParameters{} resolver := NewDefaultEndpointResolverV2()