From 3e7ebbdf445379ca1f84c6207cec39c056c9b035 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 12 Sep 2024 13:48:08 -0400 Subject: [PATCH 1/6] 09/11/2024 CloudFormation schemas in us-east-1; Refresh existing schemas. --- .../schemas/AWS_Amplify_App.json | 17 + ...licationSignals_ServiceLevelObjective.json | 97 +- .../AWS_Backup_RestoreTestingPlan.json | 12 + .../schemas/AWS_DataBrew_Ruleset.json | 3 +- .../schemas/AWS_EC2_Subnet.json | 6 +- .../cloudformation/schemas/AWS_EC2_VPC.json | 4 + .../schemas/AWS_EC2_VPCEndpoint.json | 232 +-- .../schemas/AWS_EC2_VPCPeeringConnection.json | 4 + .../schemas/AWS_ECS_Service.json | 1067 ++++++------ .../schemas/AWS_ECS_TaskDefinition.json | 77 +- .../cloudformation/schemas/AWS_EKS_Addon.json | 6 +- .../schemas/AWS_EKS_Cluster.json | 4 + .../schemas/AWS_EKS_FargateProfile.json | 6 +- .../AWS_EKS_PodIdentityAssociation.json | 2 +- .../AWS_ElasticLoadBalancingV2_Listener.json | 32 +- ...AWS_ElasticLoadBalancingV2_TrustStore.json | 7 +- .../AWS_IoTWireless_WirelessDevice.json | 5 +- .../schemas/AWS_MediaConnect_Flow.json | 24 +- .../schemas/AWS_NetworkFirewall_Firewall.json | 11 +- .../AWS_NetworkFirewall_FirewallPolicy.json | 11 +- .../AWS_NetworkFirewall_RuleGroup.json | 11 +- ...rkFirewall_TLSInspectionConfiguration.json | 11 +- .../schemas/AWS_PaymentCryptography_Key.json | 16 +- .../schemas/AWS_Pipes_Pipe.json | 11 +- .../schemas/AWS_QuickSight_DataSource.json | 1439 +++++++++-------- .../schemas/AWS_RDS_DBCluster.json | 9 +- .../AWS_RDS_DBClusterParameterGroup.json | 11 + .../schemas/AWS_RDS_DBInstance.json | 14 +- .../schemas/AWS_RDS_DBParameterGroup.json | 11 + .../schemas/AWS_RDS_EventSubscription.json | 11 + .../schemas/AWS_RDS_OptionGroup.json | 11 + .../schemas/AWS_RefactorSpaces_Route.json | 20 +- .../AWS_Route53Resolver_ResolverRule.json | 260 +-- .../schemas/AWS_SageMaker_Cluster.json | 758 ++++----- .../schemas/AWS_SageMaker_Domain.json | 59 + .../schemas/AWS_SageMaker_Space.json | 27 + .../schemas/AWS_SageMaker_UserProfile.json | 59 + .../AWS_VpcLattice_AccessLogSubscription.json | 8 +- .../schemas/AWS_VpcLattice_Listener.json | 7 +- .../schemas/AWS_VpcLattice_Rule.json | 13 +- .../schemas/AWS_VpcLattice_Service.json | 10 +- .../AWS_VpcLattice_ServiceNetwork.json | 13 +- ...tice_ServiceNetworkServiceAssociation.json | 10 +- ...cLattice_ServiceNetworkVpcAssociation.json | 10 +- .../schemas/AWS_VpcLattice_TargetGroup.json | 7 +- 45 files changed, 2494 insertions(+), 1949 deletions(-) diff --git a/internal/service/cloudformation/schemas/AWS_Amplify_App.json b/internal/service/cloudformation/schemas/AWS_Amplify_App.json index 4d5f576d24..1811076224 100644 --- a/internal/service/cloudformation/schemas/AWS_Amplify_App.json +++ b/internal/service/cloudformation/schemas/AWS_Amplify_App.json @@ -1,6 +1,7 @@ { "typeName": "AWS::Amplify::App", "description": "The AWS::Amplify::App resource creates Apps in the Amplify Console. An App is a collection of branches.", + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-amplify", "additionalProperties": false, "properties": { "AccessToken": { @@ -37,6 +38,9 @@ "maxLength": 25000, "pattern": "(?s).+" }, + "CacheConfig": { + "$ref": "#/definitions/CacheConfig" + }, "CustomHeaders": { "type": "string", "minLength": 0, @@ -189,6 +193,19 @@ } } }, + "CacheConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "Type": { + "type": "string", + "enum": [ + "AMPLIFY_MANAGED", + "AMPLIFY_MANAGED_NO_COOKIES" + ] + } + } + }, "CustomRule": { "type": "object", "additionalProperties": false, diff --git a/internal/service/cloudformation/schemas/AWS_ApplicationSignals_ServiceLevelObjective.json b/internal/service/cloudformation/schemas/AWS_ApplicationSignals_ServiceLevelObjective.json index b7a8222fbb..6c4dc4a9b7 100644 --- a/internal/service/cloudformation/schemas/AWS_ApplicationSignals_ServiceLevelObjective.json +++ b/internal/service/cloudformation/schemas/AWS_ApplicationSignals_ServiceLevelObjective.json @@ -34,6 +34,17 @@ "Sli": { "$ref": "#/definitions/Sli" }, + "RequestBasedSli": { + "$ref": "#/definitions/RequestBasedSli" + }, + "EvaluationType": { + "description": "Displays whether this is a period-based SLO or a request-based SLO.", + "type": "string", + "enum": [ + "PeriodBased", + "RequestBased" + ] + }, "Goal": { "$ref": "#/definitions/Goal" }, @@ -42,13 +53,13 @@ } }, "required": [ - "Name", - "Sli" + "Name" ], "readOnlyProperties": [ "/properties/Arn", "/properties/CreatedTime", - "/properties/LastUpdatedTime" + "/properties/LastUpdatedTime", + "/properties/EvaluationType" ], "primaryIdentifier": [ "/properties/Arn" @@ -86,6 +97,33 @@ "ComparisonOperator" ] }, + "RequestBasedSli": { + "description": "This structure contains information about the performance metric that a request-based SLO monitors.", + "type": "object", + "additionalProperties": false, + "properties": { + "RequestBasedSliMetric": { + "$ref": "#/definitions/RequestBasedSliMetric" + }, + "MetricThreshold": { + "description": "The value that the SLI metric is compared to.", + "type": "number" + }, + "ComparisonOperator": { + "description": "The arithmetic operation used when comparing the specified metric to the threshold.", + "type": "string", + "enum": [ + "GreaterThanOrEqualTo", + "LessThanOrEqualTo", + "LessThan", + "GreaterThan" + ] + } + }, + "required": [ + "RequestBasedSliMetric" + ] + }, "Goal": { "description": "A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold.", "type": "object", @@ -143,6 +181,52 @@ } } }, + "RequestBasedSliMetric": { + "description": "This structure contains the information about the metric that is used for a request-based SLO.", + "type": "object", + "additionalProperties": false, + "properties": { + "KeyAttributes": { + "$ref": "#/definitions/KeyAttributes" + }, + "OperationName": { + "description": "If the SLO monitors a specific operation of the service, this field displays that operation name.", + "type": "string", + "minLength": 1, + "maxLength": 255 + }, + "MetricType": { + "description": "If the SLO monitors either the LATENCY or AVAILABILITY metric that Application Signals collects, this field displays which of those metrics is used.", + "type": "string", + "enum": [ + "LATENCY", + "AVAILABILITY" + ] + }, + "TotalRequestCountMetric": { + "description": "This structure defines the metric that is used as the \"total requests\" number for a request-based SLO. The number observed for this metric is divided by the number of \"good requests\" or \"bad requests\" that is observed for the metric defined in `MonitoredRequestCountMetric`.", + "$ref": "#/definitions/MetricDataQueries" + }, + "MonitoredRequestCountMetric": { + "$ref": "#/definitions/MonitoredRequestCountMetric" + } + } + }, + "MonitoredRequestCountMetric": { + "description": "This structure defines the metric that is used as the \"good request\" or \"bad request\" value for a request-based SLO. This value observed for the metric defined in `TotalRequestCountMetric` is divided by the number found for `MonitoredRequestCountMetric` to determine the percentage of successful requests that this SLO tracks.", + "type": "object", + "additionalProperties": false, + "properties": { + "GoodCountMetric": { + "description": "If you want to count \"good requests\" to determine the percentage of successful requests for this request-based SLO, specify the metric to use as \"good requests\" in this structure.", + "$ref": "#/definitions/MetricDataQueries" + }, + "BadCountMetric": { + "description": "If you want to count \"bad requests\" to determine the percentage of successful requests for this request-based SLO, specify the metric to use as \"bad requests\" in this structure.", + "$ref": "#/definitions/MetricDataQueries" + } + } + }, "KeyAttributes": { "description": "This is a string-to-string map that contains information about the type of object that this SLO is related to.", "patternProperties": { @@ -409,6 +493,11 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": true, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "application-signals:ListTagsForResource", + "application-signals:TagResource", + "application-signals:UntagResource" + ] } } diff --git a/internal/service/cloudformation/schemas/AWS_Backup_RestoreTestingPlan.json b/internal/service/cloudformation/schemas/AWS_Backup_RestoreTestingPlan.json index 18ff1bd88f..9a2a4d5c51 100644 --- a/internal/service/cloudformation/schemas/AWS_Backup_RestoreTestingPlan.json +++ b/internal/service/cloudformation/schemas/AWS_Backup_RestoreTestingPlan.json @@ -54,6 +54,13 @@ "CONTINUOUS" ] }, + "RestoreTestingScheduleStatus": { + "type": "string", + "enum": [ + "ACTIVE", + "SUSPENDED" + ] + }, "Tag": { "type": "object", "additionalProperties": false, @@ -93,6 +100,9 @@ "ScheduleExpressionTimezone": { "type": "string" }, + "ScheduleStatus": { + "$ref": "#/definitions/RestoreTestingScheduleStatus" + }, "StartWindowHours": { "type": "integer" }, @@ -131,6 +141,7 @@ "create": { "permissions": [ "backup:CreateRestoreTestingPlan", + "backup:UpdateRestoreTestingPlanScheduleStatus", "backup:TagResource", "backup:GetRestoreTestingPlan", "backup:ListTags" @@ -147,6 +158,7 @@ "update": { "permissions": [ "backup:UpdateRestoreTestingPlan", + "backup:UpdateRestoreTestingPlanScheduleStatus", "backup:TagResource", "backup:UntagResource", "backup:GetRestoreTestingPlan", diff --git a/internal/service/cloudformation/schemas/AWS_DataBrew_Ruleset.json b/internal/service/cloudformation/schemas/AWS_DataBrew_Ruleset.json index f8f3a05670..fbd3b4ee25 100644 --- a/internal/service/cloudformation/schemas/AWS_DataBrew_Ruleset.json +++ b/internal/service/cloudformation/schemas/AWS_DataBrew_Ruleset.json @@ -211,7 +211,8 @@ ], "createOnlyProperties": [ "/properties/Name", - "/properties/TargetArn" + "/properties/TargetArn", + "/properties/Tags" ], "handlers": { "create": { diff --git a/internal/service/cloudformation/schemas/AWS_EC2_Subnet.json b/internal/service/cloudformation/schemas/AWS_EC2_Subnet.json index 167cbd4bb4..fe936f7490 100644 --- a/internal/service/cloudformation/schemas/AWS_EC2_Subnet.json +++ b/internal/service/cloudformation/schemas/AWS_EC2_Subnet.json @@ -109,7 +109,11 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": true, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ] }, "definitions": { "Tag": { diff --git a/internal/service/cloudformation/schemas/AWS_EC2_VPC.json b/internal/service/cloudformation/schemas/AWS_EC2_VPC.json index b684beac38..4646d3c7ec 100644 --- a/internal/service/cloudformation/schemas/AWS_EC2_VPC.json +++ b/internal/service/cloudformation/schemas/AWS_EC2_VPC.json @@ -1,5 +1,9 @@ { "tagging": { + "permissions": [ + "ec2:DeleteTags", + "ec2:CreateTags" + ], "taggable": true, "tagOnCreate": true, "tagUpdatable": true, diff --git a/internal/service/cloudformation/schemas/AWS_EC2_VPCEndpoint.json b/internal/service/cloudformation/schemas/AWS_EC2_VPCEndpoint.json index 0f42c84cbc..5f992e4b45 100644 --- a/internal/service/cloudformation/schemas/AWS_EC2_VPCEndpoint.json +++ b/internal/service/cloudformation/schemas/AWS_EC2_VPCEndpoint.json @@ -1,145 +1,173 @@ { + "tagging": { + "taggable": false, + "tagOnCreate": false, + "tagUpdatable": false, + "cloudFormationSystemTags": false + }, + "handlers": { + "read": { + "permissions": [ + "ec2:DescribeVpcEndpoints" + ] + }, + "create": { + "permissions": [ + "ec2:CreateVpcEndpoint", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + }, + "update": { + "permissions": [ + "ec2:ModifyVpcEndpoint", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + }, + "list": { + "permissions": [ + "ec2:DescribeVpcEndpoints" + ] + }, + "delete": { + "permissions": [ + "ec2:DeleteVpcEndpoints", + "ec2:DescribeVpcEndpoints" + ], + "timeoutInMinutes": 210 + } + }, "typeName": "AWS::EC2::VPCEndpoint", - "description": "Specifies a VPC endpoint. A VPC endpoint provides a private connection between your VPC and an endpoint service. You can use an endpoint service provided by AWS, an MKT Partner, or another AWS accounts in your organization. For more information, see the [User Guide](https://docs.aws.amazon.com/vpc/latest/privatelink/).\n An endpoint of type ``Interface`` establishes connections between the subnets in your VPC and an AWS-service, your own service, or a service hosted by another AWS-account. With an interface VPC endpoint, you specify the subnets in which to create the endpoint and the security groups to associate with the endpoint network interfaces.\n An endpoint of type ``gateway`` serves as a target for a route in your route table for traffic destined for S3 or DDB. You can specify an endpoint policy for the endpoint, which controls access to the service from your VPC. You can also specify the VPC route tables that use the endpoint. For more information about connectivity to S3, see [W", + "readOnlyProperties": [ + "/properties/NetworkInterfaceIds", + "/properties/CreationTimestamp", + "/properties/DnsEntries", + "/properties/Id" + ], + "description": "Specifies a VPC endpoint. A VPC endpoint provides a private connection between your VPC and an endpoint service. You can use an endpoint service provided by AWS, an MKT Partner, or another AWS accounts in your organization. For more information, see the [User Guide](https://docs.aws.amazon.com/vpc/latest/privatelink/).\n An endpoint of type ``Interface`` establishes connections between the subnets in your VPC and an AWS-service, your own service, or a service hosted by another AWS-account. With an interface VPC endpoint, you specify the subnets in which to create the endpoint and the security groups to associate with the endpoint network interfaces.\n An endpoint of type ``gateway`` serves as a target for a route in your route table for traffic destined for S3 or DDB. You can specify an endpoint policy for the endpoint, which controls access to the service from your VPC. You can also specify the VPC route tables that use the endpoint. For more information about connectivity to S3, see [Why can't I connect to an S3 bucket using a gateway VPC endpoint?](https://docs.aws.amazon.com/premiumsupport/knowledge-center/connect-s3-vpc-endpoint) \n An endpoint of type ``GatewayLoadBalancer`` provides private connectivity between your VPC and virtual appliances from a service provider.", + "createOnlyProperties": [ + "/properties/ServiceName", + "/properties/VpcEndpointType", + "/properties/VpcId" + ], "additionalProperties": false, + "primaryIdentifier": [ + "/properties/Id" + ], "properties": { - "Id": { - "type": "string", - "description": "" + "PrivateDnsEnabled": { + "description": "Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, ``kinesis.us-east-1.amazonaws.com``), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.\n To use a private hosted zone, you must set the following VPC attributes to ``true``: ``enableDnsHostnames`` and ``enableDnsSupport``.\n This property is supported only for interface endpoints.\n Default: ``false``", + "type": "boolean" }, "CreationTimestamp": { - "type": "string", - "description": "" + "description": "", + "type": "string" }, - "DnsEntries": { - "type": "array", - "uniqueItems": false, - "insertionOrder": false, - "items": { - "type": "string" - }, - "description": "" + "VpcId": { + "description": "The ID of the VPC.", + "type": "string" }, - "NetworkInterfaceIds": { - "type": "array", - "uniqueItems": false, + "RouteTableIds": { + "uniqueItems": true, + "description": "The IDs of the route tables. Routing is supported only for gateway endpoints.", "insertionOrder": false, + "type": "array", "items": { + "relationshipRef": { + "typeName": "AWS::EC2::RouteTable", + "propertyPath": "/properties/RouteTableId" + }, "type": "string" - }, - "description": "" + } + }, + "ServiceName": { + "description": "The name of the endpoint service.", + "type": "string" }, "PolicyDocument": { + "description": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n For CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. CFNlong converts YAML policies to JSON format before calling the API to create or modify the VPC endpoint.", "type": [ "string", "object" - ], - "description": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n For CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. CFNlong converts YAML policies to JSON format before calling the API to create or modify the VPC endpoint." + ] }, - "PrivateDnsEnabled": { - "type": "boolean", - "description": "Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, ``kinesis.us-east-1.amazonaws.com``), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.\n To use a private hosted zone, you must set the following VPC attributes to ``true``: ``enableDnsHostnames`` and ``enableDnsSupport``.\n This property is supported only for interface endpoints.\n Default: ``false``" + "VpcEndpointType": { + "description": "The type of endpoint.\n Default: Gateway", + "type": "string", + "enum": [ + "Interface", + "Gateway", + "GatewayLoadBalancer" + ] }, - "RouteTableIds": { + "NetworkInterfaceIds": { + "uniqueItems": false, + "description": "", + "insertionOrder": false, "type": "array", - "description": "The IDs of the route tables. Routing is supported only for gateway endpoints.", - "uniqueItems": true, + "items": { + "type": "string" + } + }, + "Id": { + "description": "", + "type": "string" + }, + "DnsEntries": { + "uniqueItems": false, + "description": "", "insertionOrder": false, + "type": "array", "items": { "type": "string" } }, "SecurityGroupIds": { - "type": "array", - "description": "The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC. Security groups are supported only for interface endpoints.", "uniqueItems": true, + "description": "The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC. Security groups are supported only for interface endpoints.", "insertionOrder": false, + "type": "array", "items": { + "anyOf": [ + { + "relationshipRef": { + "typeName": "AWS::EC2::SecurityGroup", + "propertyPath": "/properties/GroupId" + } + }, + { + "relationshipRef": { + "typeName": "AWS::EC2::SecurityGroup", + "propertyPath": "/properties/Id" + } + }, + { + "relationshipRef": { + "typeName": "AWS::EC2::VPC", + "propertyPath": "/properties/DefaultSecurityGroup" + } + } + ], "type": "string" } }, - "ServiceName": { - "type": "string", - "description": "The name of the endpoint service." - }, "SubnetIds": { - "type": "array", - "description": "The IDs of the subnets in which to create endpoint network interfaces. You must specify this property for an interface endpoint or a Gateway Load Balancer endpoint. You can't specify this property for a gateway endpoint. For a Gateway Load Balancer endpoint, you can specify only one subnet.", "uniqueItems": true, + "description": "The IDs of the subnets in which to create endpoint network interfaces. You must specify this property for an interface endpoint or a Gateway Load Balancer endpoint. You can't specify this property for a gateway endpoint. For a Gateway Load Balancer endpoint, you can specify only one subnet.", "insertionOrder": false, + "type": "array", "items": { + "relationshipRef": { + "typeName": "AWS::EC2::Subnet", + "propertyPath": "/properties/SubnetId" + }, "type": "string" } - }, - "VpcEndpointType": { - "type": "string", - "enum": [ - "Interface", - "Gateway", - "GatewayLoadBalancer" - ], - "description": "The type of endpoint.\n Default: Gateway" - }, - "VpcId": { - "type": "string", - "description": "The ID of the VPC." } }, "required": [ "VpcId", "ServiceName" - ], - "readOnlyProperties": [ - "/properties/NetworkInterfaceIds", - "/properties/CreationTimestamp", - "/properties/DnsEntries", - "/properties/Id" - ], - "createOnlyProperties": [ - "/properties/ServiceName", - "/properties/VpcEndpointType", - "/properties/VpcId" - ], - "primaryIdentifier": [ - "/properties/Id" - ], - "tagging": { - "taggable": false, - "tagOnCreate": false, - "tagUpdatable": false, - "cloudFormationSystemTags": false - }, - "handlers": { - "create": { - "permissions": [ - "ec2:CreateVpcEndpoint", - "ec2:DescribeVpcEndpoints" - ], - "timeoutInMinutes": 210 - }, - "read": { - "permissions": [ - "ec2:DescribeVpcEndpoints" - ] - }, - "update": { - "permissions": [ - "ec2:ModifyVpcEndpoint", - "ec2:DescribeVpcEndpoints" - ], - "timeoutInMinutes": 210 - }, - "delete": { - "permissions": [ - "ec2:DeleteVpcEndpoints", - "ec2:DescribeVpcEndpoints" - ], - "timeoutInMinutes": 210 - }, - "list": { - "permissions": [ - "ec2:DescribeVpcEndpoints" - ] - } - } + ] } diff --git a/internal/service/cloudformation/schemas/AWS_EC2_VPCPeeringConnection.json b/internal/service/cloudformation/schemas/AWS_EC2_VPCPeeringConnection.json index 34ffa2331a..0a780368d0 100644 --- a/internal/service/cloudformation/schemas/AWS_EC2_VPCPeeringConnection.json +++ b/internal/service/cloudformation/schemas/AWS_EC2_VPCPeeringConnection.json @@ -1,5 +1,9 @@ { "tagging": { + "permissions": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], "taggable": true, "tagOnCreate": true, "tagUpdatable": true, diff --git a/internal/service/cloudformation/schemas/AWS_ECS_Service.json b/internal/service/cloudformation/schemas/AWS_ECS_Service.json index 9cf9e7cda8..87c2d871c3 100644 --- a/internal/service/cloudformation/schemas/AWS_ECS_Service.json +++ b/internal/service/cloudformation/schemas/AWS_ECS_Service.json @@ -1,738 +1,741 @@ { + "tagging": { + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "tagProperty": "/properties/Tags", + "cloudFormationSystemTags": true + }, + "handlers": { + "read": { + "permissions": [ + "ecs:DescribeServices" + ] + }, + "create": { + "permissions": [ + "ecs:CreateService", + "ecs:DescribeServices", + "iam:PassRole", + "ecs:TagResource" + ], + "timeoutInMinutes": 180 + }, + "update": { + "permissions": [ + "ecs:DescribeServices", + "ecs:ListTagsForResource", + "ecs:TagResource", + "ecs:UntagResource", + "ecs:UpdateService" + ], + "timeoutInMinutes": 180 + }, + "list": { + "permissions": [ + "ecs:DescribeServices", + "ecs:ListClusters", + "ecs:ListServices" + ] + }, + "delete": { + "permissions": [ + "ecs:DeleteService", + "ecs:DescribeServices" + ], + "timeoutInMinutes": 30 + } + }, "typeName": "AWS::ECS::Service", - "description": "The ``AWS::ECS::Service`` resource creates an Amazon Elastic Container Service (Amazon ECS) service that runs and maintains the requested number of tasks and associated load balancers.\n The stack update fails if you change any properties that require replacement and at least one Amazon ECS Service Connect ``ServiceConnectService`` is configured. This is because AWS CloudFormation creates the replacement service first, but each ``ServiceConnectService`` must have a name that is unique in the namespace.\n Starting April 15, 2023, AWS; will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, ECS, or EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "readOnlyProperties": [ + "/properties/ServiceArn", + "/properties/Name" + ], + "description": "The ``AWS::ECS::Service`` resource creates an Amazon Elastic Container Service (Amazon ECS) service that runs and maintains the requested number of tasks and associated load balancers.\n The stack update fails if you change any properties that require replacement and at least one ECS Service Connect ``ServiceConnectConfiguration`` property the is configured. This is because AWS CloudFormation creates the replacement service first, but each ``ServiceConnectService`` must have a name that is unique in the namespace.\n Starting April 15, 2023, AWS; will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, ECS, or EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "writeOnlyProperties": [ + "/properties/ServiceConnectConfiguration", + "/properties/VolumeConfigurations" + ], + "createOnlyProperties": [ + "/properties/Cluster", + "/properties/DeploymentController", + "/properties/LaunchType", + "/properties/Role", + "/properties/SchedulingStrategy", + "/properties/ServiceName" + ], + "additionalProperties": false, + "primaryIdentifier": [ + "/properties/ServiceArn", + "/properties/Cluster" + ], "definitions": { - "AwsVpcConfiguration": { + "CapacityProviderStrategyItem": { + "description": "The details of a capacity provider strategy. A capacity provider strategy can be set when using the ``RunTask`` or ``CreateService`` APIs or as the default capacity provider strategy for a cluster with the ``CreateCluster`` API.\n Only capacity providers that are already associated with a cluster and have an ``ACTIVE`` or ``UPDATING`` status can be used in a capacity provider strategy. The ``PutClusterCapacityProviders`` API is used to associate a capacity provider with a cluster.\n If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the ``CreateCapacityProvider`` API operation.\n To use an FARGATElong capacity provider, specify either the ``FARGATE`` or ``FARGATE_SPOT`` capacity providers. The FARGATElong capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy.", + "additionalProperties": false, "type": "object", "properties": { - "AssignPublicIp": { - "type": "string", - "enum": [ - "DISABLED", - "ENABLED" - ], - "description": "Whether the task's elastic network interface receives a public IP address. The default value is ``DISABLED``." + "CapacityProvider": { + "description": "The short name of the capacity provider.", + "type": "string" }, - "SecurityGroups": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per ``AwsVpcConfiguration``.\n All specified security groups must be from the same VPC." + "Base": { + "description": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of ``0`` is used.", + "type": "integer" }, - "Subnets": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per ``AwsVpcConfiguration``.\n All specified subnets must be from the same VPC." + "Weight": { + "description": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The ``weight`` value is taken into consideration after the ``base`` value, if defined, is satisfied.\n If no ``weight`` value is specified, the default value of ``0`` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of ``0`` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of ``0``, any ``RunTask`` or ``CreateService`` actions using the capacity provider strategy will fail.\n An example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of ``1``, then when the ``base`` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of ``1`` for *capacityProviderA* and a weight of ``4`` for *capacityProviderB*, then for every one task that's run using *capacityProviderA*, four tasks would use *capacityProviderB*.", + "type": "integer" } - }, - "additionalProperties": false, - "description": "An object representing the networking details for a task or service. For example ``awsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}``" + } }, - "CapacityProviderStrategyItem": { + "TimeoutConfiguration": { + "description": "An object that represents the timeout configurations for Service Connect.\n If ``idleTimeout`` is set to a time that is less than ``perRequestTimeout``, the connection will close when the ``idleTimeout`` is reached and not the ``perRequestTimeout``.", + "additionalProperties": false, "type": "object", "properties": { - "Base": { - "type": "integer", - "description": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of ``0`` is used." - }, - "CapacityProvider": { - "type": "string", - "description": "The short name of the capacity provider." + "PerRequestTimeoutSeconds": { + "description": "The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP`` ``appProtocol``.", + "type": "integer" }, - "Weight": { - "type": "integer", - "description": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The ``weight`` value is taken into consideration after the ``base`` value, if defined, is satisfied.\n If no ``weight`` value is specified, the default value of ``0`` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of ``0`` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of ``0``, any ``RunTask`` or ``CreateService`` actions using the capacity provider strategy will fail.\n An example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of ``1``, then when the ``base`` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of ``1`` for *capacityProviderA* and a weight of ``4`` for *capacityProviderB*, then for every one task that's run using *capacityProviderA*, four tasks would use *capacityProviderB*." + "IdleTimeoutSeconds": { + "description": "The amount of time in seconds a connection will stay active while idle. A value of ``0`` can be set to disable ``idleTimeout``.\n The ``idleTimeout`` default for ``HTTP``/``HTTP2``/``GRPC`` is 5 minutes.\n The ``idleTimeout`` default for ``TCP`` is 1 hour.", + "type": "integer" } - }, - "additionalProperties": false, - "description": "The details of a capacity provider strategy. A capacity provider strategy can be set when using the ``RunTask`` or ``CreateService`` APIs or as the default capacity provider strategy for a cluster with the ``CreateCluster`` API.\n Only capacity providers that are already associated with a cluster and have an ``ACTIVE`` or ``UPDATING`` status can be used in a capacity provider strategy. The ``PutClusterCapacityProviders`` API is used to associate a capacity provider with a cluster.\n If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New Auto Scaling group capacity providers can be created with the ``CreateCapacityProvider`` API operation.\n To use an FARGATElong capacity provider, specify either the ``FARGATE`` or ``FARGATE_SPOT`` capacity providers. The FARGATElong capacity providers are available to all accounts and only need to be associated with a cluster to be used in a capacity provider strategy." + } }, "DeploymentAlarms": { + "description": "One of the methods which provide a way for you to quickly identify when a deployment has failed, and then to optionally roll back the failure to the last working deployment.\n When the alarms are generated, Amazon ECS sets the service deployment to failed. Set the rollback parameter to have Amazon ECS to roll back your service to the last completed deployment after a failure.\n You can only use the ``DeploymentAlarms`` method to detect failures when the ``DeploymentController`` is set to ``ECS`` (rolling update).\n For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, "type": "object", "properties": { "AlarmNames": { + "description": "One or more CloudWatch alarm names. Use a \",\" to separate the alarms.", "type": "array", "items": { "type": "string" - }, - "description": "One or more CloudWatch alarm names. Use a \",\" to separate the alarms." - }, - "Rollback": { - "type": "boolean", - "description": "Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully." + } }, "Enable": { - "type": "boolean", - "description": "Determines whether to use the CloudWatch alarm option in the service deployment process." + "description": "Determines whether to use the CloudWatch alarm option in the service deployment process.", + "type": "boolean" + }, + "Rollback": { + "description": "Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is used, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.", + "type": "boolean" } }, "required": [ "AlarmNames", "Rollback", "Enable" - ], - "additionalProperties": false, - "description": "One of the methods which provide a way for you to quickly identify when a deployment has failed, and then to optionally roll back the failure to the last working deployment.\n When the alarms are generated, Amazon ECS sets the service deployment to failed. Set the rollback parameter to have Amazon ECS to roll back your service to the last completed deployment after a failure.\n You can only use the ``DeploymentAlarms`` method to detect failures when the ``DeploymentController`` is set to ``ECS`` (rolling update).\n For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*." + ] }, - "DeploymentCircuitBreaker": { + "ServiceConnectTlsCertificateAuthority": { + "description": "The certificate root authority that secures your service.", + "additionalProperties": false, "type": "object", "properties": { - "Enable": { - "type": "boolean", - "description": "Determines whether to use the deployment circuit breaker logic for the service." - }, - "Rollback": { - "type": "boolean", - "description": "Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is on, when a service deployment fails, the service is rolled back to the last deployment that completed successfully." + "AwsPcaAuthorityArn": { + "description": "The ARN of the AWS Private Certificate Authority certificate.", + "type": "string" } - }, - "required": [ - "Enable", - "Rollback" - ], - "additionalProperties": false, - "description": "The deployment circuit breaker can only be used for services using the rolling update (``ECS``) deployment type.\n The *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*.\n For more information about API failure reasons, see [API failure reasons](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html) in the *Amazon Elastic Container Service Developer Guide*." + } }, - "DeploymentConfiguration": { + "LoadBalancer": { + "description": "The ``LoadBalancer`` property specifies details on a load balancer that is used with a service.\n If the service is using the ``CODE_DEPLOY`` deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When you are creating an ACDlong deployment group, you specify two target groups (referred to as a ``targetGroupPair``). Each target group binds to a separate task set in the deployment. The load balancer can also have up to two listeners, a required listener for production traffic and an optional listener that allows you to test new revisions of the service before routing production traffic to it.\n Services with tasks that use the ``awsvpc`` network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ``ip`` as the target type, not ``instance``. Tasks that use the ``awsvpc`` network mode are associated with an elastic network interface, not an Amazon EC2 instance.", + "additionalProperties": false, "type": "object", "properties": { - "DeploymentCircuitBreaker": { - "$ref": "#/definitions/DeploymentCircuitBreaker", - "description": "The deployment circuit breaker can only be used for services using the rolling update (``ECS``) deployment type.\n The *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*" + "TargetGroupArn": { + "description": "The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.\n A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. \n For services using the ``ECS`` deployment controller, you can specify one or multiple target groups. For more information, see [Registering multiple target groups with a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html) in the *Amazon Elastic Container Service Developer Guide*.\n For services using the ``CODE_DEPLOY`` deployment controller, you're required to define two target groups for the load balancer. For more information, see [Blue/green deployment with CodeDeploy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) in the *Amazon Elastic Container Service Developer Guide*.\n If your service's task definition uses the ``awsvpc`` network mode, you must choose ``ip`` as the target type, not ``instance``. Do this when creating your target groups because tasks that use the ``awsvpc`` network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type.", + "type": "string" }, - "MaximumPercent": { - "type": "integer", - "description": "If a service is using the rolling update (``ECS``) deployment type, the ``maximumPercent`` parameter represents an upper limit on the number of your service's tasks that are allowed in the ``RUNNING`` or ``PENDING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the ``REPLICA`` service scheduler and has a ``desiredCount`` of four tasks and a ``maximumPercent`` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default ``maximumPercent`` value for a service using the ``REPLICA`` service scheduler is 200%.\n If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service." + "LoadBalancerName": { + "description": "The name of the load balancer to associate with the Amazon ECS service or task set.\n If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted.", + "type": "string" }, - "MinimumHealthyPercent": { - "type": "integer", - "description": "If a service is using the rolling update (``ECS``) deployment type, the ``minimumHealthyPercent`` represents a lower limit on the number of your service's tasks that must remain in the ``RUNNING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a ``desiredCount`` of four tasks and a ``minimumHealthyPercent`` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. \n For services that *do not* use a load balancer, the following should be noted:\n + A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n + If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a ``RUNNING`` state before the task is counted towards the minimum healthy percent total.\n + If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings. \n \n For services that *do* use a load balancer, the following should be noted:\n + If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n + If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n \n If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state. If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." + "ContainerName": { + "description": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n You need to specify the container name when configuring the target group for an Amazon ECS load balancer.", + "type": "string" }, - "Alarms": { - "$ref": "#/definitions/DeploymentAlarms", - "description": "Information about the CloudWatch alarms." + "ContainerPort": { + "description": "The port on the container to associate with the load balancer. This port must correspond to a ``containerPort`` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the ``hostPort`` of the port mapping.", + "type": "integer" } - }, - "additionalProperties": false, - "description": "The ``DeploymentConfiguration`` property specifies optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks." + } }, - "DeploymentController": { + "PlacementStrategy": { + "description": "The task placement strategy for a task or service. For more information, see [Task placement strategies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, "type": "object", "properties": { + "Field": { + "description": "The field to apply the placement strategy against. For the ``spread`` placement strategy, valid values are ``instanceId`` (or ``host``, which has the same effect), or any platform or custom attribute that's applied to a container instance, such as ``attribute:ecs.availability-zone``. For the ``binpack`` placement strategy, valid values are ``cpu`` and ``memory``. For the ``random`` placement strategy, this field is not used.", + "type": "string" + }, "Type": { + "description": "The type of placement strategy. The ``random`` placement strategy randomly places tasks on available candidates. The ``spread`` placement strategy spreads placement across available candidates evenly based on the ``field`` parameter. The ``binpack`` strategy places tasks on available candidates that have the least available amount of the resource that's specified with the ``field`` parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory but still enough to run the task.", "type": "string", "enum": [ - "CODE_DEPLOY", - "ECS", - "EXTERNAL" - ], - "description": "The deployment controller type to use. There are three deployment controller types available:\n + ECS The rolling update (ECS) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the DeploymentConfiguration. + CODE_DEPLOY The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model powered by , which allows you to verify a new deployment of a service before sending production traffic to it. + EXTERNAL The external (EXTERNAL) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service." + "binpack", + "random", + "spread" + ] } }, - "additionalProperties": false, - "description": "The deployment controller to use for the service. For more information, see [Amazon ECS deployment types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) in the *Amazon Elastic Container Service Developer Guide*." + "required": [ + "Type" + ] }, - "EBSTagSpecification": { + "ServiceConnectConfiguration": { + "description": "The Service Connect configuration of your Amazon ECS service. The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, "type": "object", - "required": [ - "ResourceType" - ], "properties": { - "ResourceType": { - "type": "string", - "description": "The type of volume resource." - }, - "Tags": { + "Services": { + "description": "The list of Service Connect service objects. These are names and aliases (also known as endpoints) that are used by other Amazon ECS services to connect to this service. \n This field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to connect to other services within the namespace. An example of this would be a frontend application that accepts incoming requests from either a load balancer that's attached to the service or by other means.\n An object selects a port from the task definition, assigns a name for the CMAPlong service, and a list of aliases (endpoints) and ports for client applications to refer to this service.", "type": "array", "items": { - "$ref": "#/definitions/Tag" - }, - "description": "The tags applied to this Amazon EBS volume. ``AmazonECSCreated`` and ``AmazonECSManaged`` are reserved tags that can't be used." + "$ref": "#/definitions/ServiceConnectService" + } }, - "PropagateTags": { - "type": "string", - "enum": [ - "SERVICE", - "TASK_DEFINITION" - ], - "description": "Determines whether to propagate the tags from the task definition to ?the Amazon EBS volume. Tags can only propagate to a ``SERVICE`` specified in ?``ServiceVolumeConfiguration``. If no value is specified, the tags aren't ?propagated." + "Enabled": { + "description": "Specifies whether to use Service Connect with this service.", + "type": "boolean" + }, + "LogConfiguration": { + "description": "The log configuration for the container. This parameter maps to ``LogConfig`` in the docker container create command and the ``--log-driver`` option to docker run.\n By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n Understand the following when specifying a log configuration for your containers.\n + Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``,``syslog``, ``splunk``, and ``awsfirelens``.\n + This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n + For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ``ECS_AVAILABLE_LOGGING_DRIVERS`` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n + For tasks that are on FARGATElong, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "$ref": "#/definitions/LogConfiguration" + }, + "Namespace": { + "description": "The namespace name or full Amazon Resource Name (ARN) of the CMAPlong namespace for use with Service Connect. The namespace must be in the same AWS Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about CMAPlong, see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *Developer Guide*.", + "type": "string" } }, - "description": "The tag specifications of an Amazon EBS volume." + "required": [ + "Enabled" + ] }, - "LoadBalancer": { + "ServiceConnectTlsConfiguration": { + "description": "The key that encrypts and decrypts your resources for Service Connect TLS.", + "additionalProperties": false, "type": "object", "properties": { - "ContainerName": { - "type": "string", - "description": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n You need to specify the container name when configuring the target group for an Amazon ECS load balancer." - }, - "ContainerPort": { - "type": "integer", - "description": "The port on the container to associate with the load balancer. This port must correspond to a ``containerPort`` in the task definition the tasks in the service are using. For tasks that use the EC2 launch type, the container instance they're launched on must allow ingress traffic on the ``hostPort`` of the port mapping." + "IssuerCertificateAuthority": { + "description": "The signer certificate authority.", + "$ref": "#/definitions/ServiceConnectTlsCertificateAuthority" }, - "LoadBalancerName": { - "type": "string", - "description": "The name of the load balancer to associate with the Amazon ECS service or task set.\n If you are using an Application Load Balancer or a Network Load Balancer the load balancer name parameter should be omitted." + "KmsKey": { + "description": "The AWS Key Management Service key.", + "type": "string" }, - "TargetGroupArn": { - "type": "string", - "description": "The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set.\n A target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. \n For services using the ``ECS`` deployment controller, you can specify one or multiple target groups. For more information, see [Registering multiple target groups with a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/register-multiple-targetgroups.html) in the *Amazon Elastic Container Service Developer Guide*.\n For services using the ``CODE_DEPLOY`` deployment controller, you're required to define two target groups for the load balancer. For more information, see [Blue/green deployment with CodeDeploy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-bluegreen.html) in the *Amazon Elastic Container Service Developer Guide*.\n If your service's task definition uses the ``awsvpc`` network mode, you must choose ``ip`` as the target type, not ``instance``. Do this when creating your target groups because tasks that use the ``awsvpc`` network mode are associated with an elastic network interface, not an Amazon EC2 instance. This network mode is required for the Fargate launch type." + "RoleArn": { + "description": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.", + "type": "string" } }, + "required": [ + "IssuerCertificateAuthority" + ] + }, + "DeploymentController": { + "description": "The deployment controller to use for the service. For more information, see [Amazon ECS deployment types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) in the *Amazon Elastic Container Service Developer Guide*.", "additionalProperties": false, - "description": "The ``LoadBalancer`` property specifies details on a load balancer that is used with a service.\n If the service is using the ``CODE_DEPLOY`` deployment controller, the service is required to use either an Application Load Balancer or Network Load Balancer. When you are creating an ACDlong deployment group, you specify two target groups (referred to as a ``targetGroupPair``). Each target group binds to a separate task set in the deployment. The load balancer can also have up to two listeners, a required listener for production traffic and an optional listener that allows you to test new revisions of the service before routing production traffic to it.\n Services with tasks that use the ``awsvpc`` network mode (for example, those with the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers are not supported. Also, when you create any target groups for these services, you must choose ``ip`` as the target type, not ``instance``. Tasks that use the ``awsvpc`` network mode are associated with an elastic network interface, not an Amazon EC2 instance." + "type": "object", + "properties": { + "Type": { + "description": "The deployment controller type to use. There are three deployment controller types available:\n + ECS The rolling update (ECS) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the DeploymentConfiguration. + CODE_DEPLOY The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model powered by , which allows you to verify a new deployment of a service before sending production traffic to it. + EXTERNAL The external (EXTERNAL) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.", + "type": "string", + "enum": [ + "CODE_DEPLOY", + "ECS", + "EXTERNAL" + ] + } + } }, "LogConfiguration": { + "description": "The log configuration for the container. This parameter maps to ``LogConfig`` in the docker container create command and the ``--log-driver`` option to docker run.\n By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n Understand the following when specifying a log configuration for your containers.\n + Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``,``syslog``, ``splunk``, and ``awsfirelens``.\n + This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n + For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ``ECS_AVAILABLE_LOGGING_DRIVERS`` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n + For tasks that are on FARGATElong, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "additionalProperties": false, "type": "object", "properties": { - "LogDriver": { - "type": "string", - "description": "The log driver to use for the container.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``, ``logentries``,``syslog``, ``splunk``, and ``awsfirelens``.\n For more information about using the ``awslogs`` log driver, see [Using the awslogs log driver](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide*.\n For more information about using the ``awsfirelens`` log driver, see [Custom log routing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) in the *Amazon Elastic Container Service Developer Guide*.\n If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software." + "SecretOptions": { + "description": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide*.", + "insertionOrder": false, + "type": "array", + "items": { + "$ref": "#/definitions/Secret" + } }, "Options": { - "type": "object", "patternProperties": { "": { "type": "string" } }, + "description": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``", "additionalProperties": false, - "description": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``" + "type": "object" }, - "SecretOptions": { - "type": "array", - "insertionOrder": false, - "items": { - "$ref": "#/definitions/Secret" - }, - "description": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide*." + "LogDriver": { + "description": "The log driver to use for the container.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``, ``syslog``, ``splunk``, and ``awsfirelens``.\n For more information about using the ``awslogs`` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide*.\n For more information about using the ``awsfirelens`` log driver, see [Send Amazon ECS logs to an service or Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html).\n If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", + "type": "string" } - }, - "additionalProperties": false, - "description": "The log configuration for the container. This parameter maps to ``LogConfig`` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the ``--log-driver`` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/).\n By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n Understand the following when specifying a log configuration for your containers.\n + Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``, ``logentries``,``syslog``, ``splunk``, and ``awsfirelens``.\n + This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n + For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ``ECS_AVAILABLE_LOGGING_DRIVERS`` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n + For tasks that are on FARGATElong, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to." + } }, - "NetworkConfiguration": { + "Secret": { + "description": "An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:\n + To inject sensitive data into your containers as environment variables, use the ``secrets`` container definition parameter.\n + To reference sensitive information in the log configuration of a container, use the ``secretOptions`` container definition parameter.\n \n For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, "type": "object", + "required": [ + "Name", + "ValueFrom" + ], "properties": { - "AwsvpcConfiguration": { - "$ref": "#/definitions/AwsVpcConfiguration", - "description": "The VPC subnets and security groups that are associated with a task.\n All specified subnets and security groups must be from the same VPC." + "ValueFrom": { + "description": "The secret to expose to the container. The supported values are either the full ARN of the ASMlong secret or the full ARN of the parameter in the SSM Parameter Store.\n For information about the require IAMlong permissions, see [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-iam) (for Secrets Manager) or [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-parameters.html) (for Systems Manager Parameter store) in the *Amazon Elastic Container Service Developer Guide*.\n If the SSM Parameter Store parameter exists in the same Region as the task you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.", + "type": "string" + }, + "Name": { + "description": "The name of the secret.", + "type": "string" } - }, - "additionalProperties": false, - "description": "The ``NetworkConfiguration`` property specifies an object representing the network configuration for a task or service." + } }, - "PlacementConstraint": { + "AwsVpcConfiguration": { + "description": "An object representing the networking details for a task or service. For example ``awsVpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}``.", + "additionalProperties": false, "type": "object", "properties": { - "Expression": { - "type": "string", - "description": "A cluster query language expression to apply to the constraint. The expression can have a maximum length of 2000 characters. You can't specify an expression if the constraint type is ``distinctInstance``. For more information, see [Cluster query language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) in the *Amazon Elastic Container Service Developer Guide*." + "SecurityGroups": { + "description": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per ``awsvpcConfiguration``.\n All specified security groups must be from the same VPC.", + "type": "array", + "items": { + "type": "string" + } }, - "Type": { + "Subnets": { + "description": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per ``awsvpcConfiguration``.\n All specified subnets must be from the same VPC.", + "type": "array", + "items": { + "type": "string" + } + }, + "AssignPublicIp": { + "description": "Whether the task's elastic network interface receives a public IP address. The default value is ``DISABLED``.", "type": "string", "enum": [ - "distinctInstance", - "memberOf" - ], - "description": "The type of constraint. Use ``distinctInstance`` to ensure that each task in a particular group is running on a different container instance. Use ``memberOf`` to restrict the selection to a group of valid candidates." + "DISABLED", + "ENABLED" + ] } - }, - "required": [ - "Type" - ], - "additionalProperties": false, - "description": "The ``PlacementConstraint`` property specifies an object representing a constraint on task placement in the task definition. For more information, see [Task Placement Constraints](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) in the *Amazon Elastic Container Service Developer Guide*." + } }, - "PlacementStrategy": { + "PlacementConstraint": { + "description": "An object representing a constraint on task placement. For more information, see [Task placement constraints](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html) in the *Amazon Elastic Container Service Developer Guide*.\n If you're using the Fargate launch type, task placement constraints aren't supported.", + "additionalProperties": false, "type": "object", "properties": { - "Field": { - "type": "string", - "description": "The field to apply the placement strategy against. For the ``spread`` placement strategy, valid values are ``instanceId`` (or ``host``, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as ``attribute:ecs.availability-zone``. For the ``binpack`` placement strategy, valid values are ``CPU`` and ``MEMORY``. For the ``random`` placement strategy, this field is not used." - }, "Type": { + "description": "The type of constraint. Use ``distinctInstance`` to ensure that each task in a particular group is running on a different container instance. Use ``memberOf`` to restrict the selection to a group of valid candidates.", "type": "string", "enum": [ - "binpack", - "random", - "spread" - ], - "description": "The type of placement strategy. The ``random`` placement strategy randomly places tasks on available candidates. The ``spread`` placement strategy spreads placement across available candidates evenly based on the ``field`` parameter. The ``binpack`` strategy places tasks on available candidates that have the least available amount of the resource that's specified with the ``field`` parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory but still enough to run the task." + "distinctInstance", + "memberOf" + ] + }, + "Expression": { + "description": "A cluster query language expression to apply to the constraint. The expression can have a maximum length of 2000 characters. You can't specify an expression if the constraint type is ``distinctInstance``. For more information, see [Cluster query language](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html) in the *Amazon Elastic Container Service Developer Guide*.", + "type": "string" } }, "required": [ "Type" - ], - "additionalProperties": false, - "description": "The ``PlacementStrategy`` property specifies the task placement strategy for a task or service. For more information, see [Task Placement Strategies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html) in the *Amazon Elastic Container Service Developer Guide*." + ] }, - "Secret": { - "type": "object", - "required": [ - "Name", - "ValueFrom" - ], - "properties": { - "Name": { - "type": "string", - "description": "The name of the secret." - }, - "ValueFrom": { - "type": "string", - "description": "The secret to expose to the container. The supported values are either the full ARN of the ASMlong secret or the full ARN of the parameter in the SSM Parameter Store.\n For information about the require IAMlong permissions, see [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html#secrets-iam) (for Secrets Manager) or [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-parameters.html) (for Systems Manager Parameter store) in the *Amazon Elastic Container Service Developer Guide*.\n If the SSM Parameter Store parameter exists in the same Region as the task you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified." - } - }, + "ServiceManagedEBSVolumeConfiguration": { + "description": "The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service.\n Many of these parameters map 1:1 with the Amazon EBS ``CreateVolume`` API request parameters.", "additionalProperties": false, - "description": "An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:\n + To inject sensitive data into your containers as environment variables, use the ``secrets`` container definition parameter.\n + To reference sensitive information in the log configuration of a container, use the ``secretOptions`` container definition parameter.\n \n For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide*." - }, - "ServiceConnectClientAlias": { "type": "object", - "properties": { - "Port": { - "type": "integer", - "description": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.\n To avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." - }, - "DnsName": { - "type": "string", - "description": "The ``dnsName`` is the name that you use in the applications of client tasks to connect to this service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can include up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name can't start with a hyphen.\n If this parameter isn't specified, the default value of ``discoveryName.namespace`` is used. If the ``discoveryName`` isn't specified, the port mapping name from the task definition is used in ``portName.namespace``.\n To avoid changing your applications in client Amazon ECS services, set this to the same name that the client application uses by default. For example, a few common names are ``database``, ``db``, or the lowercase name of a database, such as ``mysql`` or ``redis``. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." - } - }, "required": [ - "Port" + "RoleArn" ], - "additionalProperties": false, - "description": "Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks (\"clients\") can use to connect to this service.\n Each name and port mapping must be unique within the namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." - }, - "ServiceConnectConfiguration": { - "type": "object", "properties": { - "Enabled": { - "type": "boolean", - "description": "Specifies whether to use Service Connect with this service." + "SnapshotId": { + "description": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the ``SnapshotId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.", + "type": "string" }, - "Namespace": { - "type": "string", - "description": "The namespace name or full Amazon Resource Name (ARN) of the CMAPlong namespace for use with Service Connect. The namespace must be in the same AWS Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about CMAPlong, see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *Developer Guide*." + "VolumeType": { + "description": "The volume type. This parameter maps 1:1 with the ``VolumeType`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) in the *Amazon EC2 User Guide*.\n The following are the supported volume types.\n + General Purpose SSD: ``gp2``|``gp3`` \n + Provisioned IOPS SSD: ``io1``|``io2`` \n + Throughput Optimized HDD: ``st1`` \n + Cold HDD: ``sc1`` \n + Magnetic: ``standard`` \n The magnetic volume type is not supported on Fargate.", + "type": "string" }, - "Services": { + "KmsKeyId": { + "description": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the ``KmsKeyId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.", + "type": "string" + }, + "TagSpecifications": { + "description": "The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the ``TagSpecifications.N`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.", "type": "array", "items": { - "$ref": "#/definitions/ServiceConnectService" - }, - "description": "The list of Service Connect service objects. These are names and aliases (also known as endpoints) that are used by other Amazon ECS services to connect to this service. \n This field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to connect to other services within the namespace. An example of this would be a frontend application that accepts incoming requests from either a load balancer that's attached to the service or by other means.\n An object selects a port from the task definition, assigns a name for the CMAPlong service, and a list of aliases (endpoints) and ports for client applications to refer to this service." + "$ref": "#/definitions/EBSTagSpecification" + } }, - "LogConfiguration": { - "$ref": "#/definitions/LogConfiguration", - "description": "The log configuration for the container. This parameter maps to ``LogConfig`` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the ``--log-driver`` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/).\n By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n Understand the following when specifying a log configuration for your containers.\n + Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n For tasks on FARGATElong, the supported log drivers are ``awslogs``, ``splunk``, and ``awsfirelens``.\n For tasks hosted on Amazon EC2 instances, the supported log drivers are ``awslogs``, ``fluentd``, ``gelf``, ``json-file``, ``journald``, ``logentries``,``syslog``, ``splunk``, and ``awsfirelens``.\n + This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n + For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the ``ECS_AVAILABLE_LOGGING_DRIVERS`` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n + For tasks that are on FARGATElong, because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to." - } - }, - "required": [ - "Enabled" - ], - "additionalProperties": false, - "description": "The Service Connect configuration of your Amazon ECS service. The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." - }, - "ServiceConnectService": { - "type": "object", - "properties": { - "PortName": { - "type": "string", - "description": "The ``portName`` must match the name of one of the ``portMappings`` from all the containers in the task definition of this Amazon ECS service." + "FilesystemType": { + "description": "The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n The available filesystem types are\u2028 ``ext3``, ``ext4``, and ``xfs``. If no value is specified, the ``xfs`` filesystem type is used by default.", + "type": "string" }, - "DiscoveryName": { - "type": "string", - "description": "The ``discoveryName`` is the name of the new CMAP service that Amazon ECS creates for this Amazon ECS service. This must be unique within the CMAP namespace. The name can contain up to 64 characters. The name can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.\n If the ``discoveryName`` isn't specified, the port mapping name from the task definition is used in ``portName.namespace``." + "Encrypted": { + "description": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the ``Encrypted`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.", + "type": "boolean" }, - "ClientAliases": { - "type": "array", - "items": { - "$ref": "#/definitions/ServiceConnectClientAlias" - }, - "description": "The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1.\n Each alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS tasks (\"clients\") can use to connect to this service.\n Each name and port mapping must be unique within the namespace.\n For each ``ServiceConnectService``, you must provide at least one ``clientAlias`` with one ``port``." + "Throughput": { + "description": "The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the ``Throughput`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n This parameter is only supported for the ``gp3`` volume type.", + "type": "integer" }, - "IngressPortOverride": { - "type": "integer", - "description": "The port number for the Service Connect proxy to listen on.\n Use the value of this field to bypass the proxy for traffic on the port number specified in the named ``portMapping`` in the task definition of this application, and then use it in your VPC security groups to allow traffic into the proxy for this Amazon ECS service.\n In ``awsvpc`` mode and Fargate, the default value is the container port number. The container port number is in the ``portMapping`` in the task definition. In bridge mode, the default value is the ephemeral port of the Service Connect proxy." + "Iops": { + "description": "The number of I/O operations per second (IOPS). For ``gp3``, ``io1``, and ``io2`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n The following are the supported values for each volume type.\n + ``gp3``: 3,000 - 16,000 IOPS\n + ``io1``: 100 - 64,000 IOPS\n + ``io2``: 100 - 256,000 IOPS\n \n This parameter is required for ``io1`` and ``io2`` volume types. The default for ``gp3`` volumes is ``3,000 IOPS``. This parameter is not supported for ``st1``, ``sc1``, or ``standard`` volume types.\n This parameter maps 1:1 with the ``Iops`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.", + "type": "integer" }, - "Tls": { - "$ref": "#/definitions/ServiceConnectTlsConfiguration", - "description": "A reference to an object that represents a Transport Layer Security (TLS) configuration." + "SizeInGiB": { + "description": "The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the ``Size`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n The following are the supported volume size values for each volume type.\n + ``gp2`` and ``gp3``: 1-16,384\n + ``io1`` and ``io2``: 4-16,384\n + ``st1`` and ``sc1``: 125-16,384\n + ``standard``: 1-1,024", + "type": "integer" }, - "Timeout": { - "$ref": "#/definitions/TimeoutConfiguration", - "description": "A reference to an object that represents the configured timeouts for Service Connect." + "RoleArn": { + "description": "The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your AWS infrastructure. We recommend using the Amazon ECS-managed ``AmazonECSInfrastructureRolePolicyForVolumes`` IAM policy with this role. For more information, see [Amazon ECS infrastructure IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html) in the *Amazon ECS Developer Guide*.", + "type": "string" } - }, - "required": [ - "PortName" - ], - "additionalProperties": false, - "description": "The Service Connect service object configuration. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." + } }, - "ServiceConnectTlsConfiguration": { + "ServiceConnectClientAlias": { + "description": "Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks (\"clients\") can use to connect to this service.\n Each name and port mapping must be unique within the namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, "type": "object", "properties": { - "IssuerCertificateAuthority": { - "$ref": "#/definitions/ServiceConnectTlsCertificateAuthority", - "description": "The signer certificate authority." - }, - "KmsKey": { - "type": "string", - "description": "The AWS Key Management Service key." + "DnsName": { + "description": "The ``dnsName`` is the name that you use in the applications of client tasks to connect to this service. The name must be a valid DNS name but doesn't need to be fully-qualified. The name can include up to 127 characters. The name can include lowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name can't start with a hyphen.\n If this parameter isn't specified, the default value of ``discoveryName.namespace`` is used. If the ``discoveryName`` isn't specified, the port mapping name from the task definition is used in ``portName.namespace``.\n To avoid changing your applications in client Amazon ECS services, set this to the same name that the client application uses by default. For example, a few common names are ``database``, ``db``, or the lowercase name of a database, such as ``mysql`` or ``redis``. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "type": "string" }, - "RoleArn": { - "type": "string", - "description": "The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS." + "Port": { + "description": "The listening port number for the Service Connect proxy. This port is available inside of all of the tasks within the same namespace.\n To avoid changing your applications in client Amazon ECS services, set this to the same port that the client application uses by default. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "type": "integer" } }, "required": [ - "IssuerCertificateAuthority" - ], - "additionalProperties": false, - "description": "An object that represents the configuration for Service Connect TLS." + "Port" + ] }, - "ServiceConnectTlsCertificateAuthority": { - "type": "object", - "properties": { - "AwsPcaAuthorityArn": { - "type": "string", - "description": "The ARN of the AWS Private Certificate Authority certificate." - } - }, + "ServiceVolumeConfiguration": { + "description": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.", "additionalProperties": false, - "description": "An object that represents the AWS Private Certificate Authority certificate." - }, - "ServiceManagedEBSVolumeConfiguration": { "type": "object", "required": [ - "RoleArn" + "Name" ], "properties": { - "Encrypted": { - "type": "boolean", - "description": "Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the ``Encrypted`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*." - }, - "KmsKeyId": { - "type": "string", - "description": "The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the ``KmsKeyId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails." - }, - "VolumeType": { - "type": "string", - "description": "The volume type. This parameter maps 1:1 with the ``VolumeType`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) in the *Amazon EC2 User Guide*.\n The following are the supported volume types.\n + General Purpose SSD: ``gp2``|``gp3`` \n + Provisioned IOPS SSD: ``io1``|``io2`` \n + Throughput Optimized HDD: ``st1`` \n + Cold HDD: ``sc1`` \n + Magnetic: ``standard`` \n The magnetic volume type is not supported on Fargate." - }, - "SizeInGiB": { - "type": "integer", - "description": "The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the ``Size`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n The following are the supported volume size values for each volume type.\n + ``gp2`` and ``gp3``: 1-16,384\n + ``io1`` and ``io2``: 4-16,384\n + ``st1`` and ``sc1``: 125-16,384\n + ``standard``: 1-1,024" - }, - "SnapshotId": { - "type": "string", - "description": "The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the ``SnapshotId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*." - }, - "Iops": { - "type": "integer", - "description": "The number of I/O operations per second (IOPS). For ``gp3``, ``io1``, and ``io2`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n The following are the supported values for each volume type.\n + ``gp3``: 3,000 - 16,000 IOPS\n + ``io1``: 100 - 64,000 IOPS\n + ``io2``: 100 - 256,000 IOPS\n \n This parameter is required for ``io1`` and ``io2`` volume types. The default for ``gp3`` volumes is ``3,000 IOPS``. This parameter is not supported for ``st1``, ``sc1``, or ``standard`` volume types.\n This parameter maps 1:1 with the ``Iops`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*." - }, - "Throughput": { - "type": "integer", - "description": "The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the ``Throughput`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.\n This parameter is only supported for the ``gp3`` volume type." - }, - "TagSpecifications": { - "type": "array", - "items": { - "$ref": "#/definitions/EBSTagSpecification" - }, - "description": "The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the ``TagSpecifications.N`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*." - }, - "RoleArn": { - "type": "string", - "description": "The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your AWS infrastructure. We recommend using the Amazon ECS-managed ``AmazonECSInfrastructureRolePolicyForVolumes`` IAM policy with this role. For more information, see [Amazon ECS infrastructure IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/infrastructure_IAM_role.html) in the *Amazon ECS Developer Guide*." + "ManagedEBSVolume": { + "description": "The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created.", + "$ref": "#/definitions/ServiceManagedEBSVolumeConfiguration" }, - "FilesystemType": { - "type": "string", - "description": "The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.\n The available filesystem types are? ``ext3``, ``ext4``, and ``xfs``. If no value is specified, the ``xfs`` filesystem type is used by default." + "Name": { + "description": "The name of the volume. This value must match the volume name from the ``Volume`` object in the task definition.", + "type": "string" } - }, - "description": "The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service.\n Many of these parameters map 1:1 with the Amazon EBS ``CreateVolume`` API request parameters." + } }, - "TimeoutConfiguration": { + "NetworkConfiguration": { + "description": "The network configuration for a task or service.", + "additionalProperties": false, "type": "object", "properties": { - "IdleTimeoutSeconds": { - "type": "integer", - "description": "The amount of time in seconds a connection will stay active while idle. A value of ``0`` can be set to disable ``idleTimeout``.\n The ``idleTimeout`` default for ``HTTP``/``HTTP2``/``GRPC`` is 5 minutes.\n The ``idleTimeout`` default for ``TCP`` is 1 hour." - }, - "PerRequestTimeoutSeconds": { - "type": "integer", - "description": "The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP`` ``appProtocol``." + "AwsvpcConfiguration": { + "description": "The VPC subnets and security groups that are associated with a task.\n All specified subnets and security groups must be from the same VPC.", + "$ref": "#/definitions/AwsVpcConfiguration" } - }, - "additionalProperties": false, - "description": "An object that represents the timeout configurations for Service Connect.\n If ``idleTimeout`` is set to a time that is less than ``perRequestTimeout``, the connection will close when the ``idleTimeout`` is reached and not the ``perRequestTimeout``." + } }, "ServiceRegistry": { + "description": "The details for the service registry.\n Each service may be associated with one service registry. Multiple service registries for each service are not supported.\n When you add, update, or remove the service registries configuration, Amazon ECS starts a new deployment. New tasks are registered and deregistered to the updated service registry configuration.", + "additionalProperties": false, "type": "object", "properties": { "ContainerName": { - "type": "string", - "description": "The container name value to be used for your service discovery service. It's already specified in the task definition. If the task definition that your service task specifies uses the ``bridge`` or ``host`` network mode, you must specify a ``containerName`` and ``containerPort`` combination from the task definition. If the task definition that your service task specifies uses the ``awsvpc`` network mode and a type SRV DNS record is used, you must specify either a ``containerName`` and ``containerPort`` combination or a ``port`` value. However, you can't specify both." - }, - "ContainerPort": { - "type": "integer", - "description": "The port value to be used for your service discovery service. It's already specified in the task definition. If the task definition your service task specifies uses the ``bridge`` or ``host`` network mode, you must specify a ``containerName`` and ``containerPort`` combination from the task definition. If the task definition your service task specifies uses the ``awsvpc`` network mode and a type SRV DNS record is used, you must specify either a ``containerName`` and ``containerPort`` combination or a ``port`` value. However, you can't specify both." + "description": "The container name value to be used for your service discovery service. It's already specified in the task definition. If the task definition that your service task specifies uses the ``bridge`` or ``host`` network mode, you must specify a ``containerName`` and ``containerPort`` combination from the task definition. If the task definition that your service task specifies uses the ``awsvpc`` network mode and a type SRV DNS record is used, you must specify either a ``containerName`` and ``containerPort`` combination or a ``port`` value. However, you can't specify both.", + "type": "string" }, "Port": { - "type": "integer", - "description": "The port value used if your service discovery service specified an SRV record. This field might be used if both the ``awsvpc`` network mode and SRV records are used." + "description": "The port value used if your service discovery service specified an SRV record. This field might be used if both the ``awsvpc`` network mode and SRV records are used.", + "type": "integer" + }, + "ContainerPort": { + "description": "The port value to be used for your service discovery service. It's already specified in the task definition. If the task definition your service task specifies uses the ``bridge`` or ``host`` network mode, you must specify a ``containerName`` and ``containerPort`` combination from the task definition. If the task definition your service task specifies uses the ``awsvpc`` network mode and a type SRV DNS record is used, you must specify either a ``containerName`` and ``containerPort`` combination or a ``port`` value. However, you can't specify both.", + "type": "integer" }, "RegistryArn": { - "type": "string", - "description": "The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is CMAP. For more information, see [CreateService](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html)." + "description": "The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is CMAP. For more information, see [CreateService](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html).", + "type": "string" } - }, - "additionalProperties": false, - "description": "The ``ServiceRegistry`` property specifies details of the service registry. For more information, see [Service Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) in the *Amazon Elastic Container Service Developer Guide*." + } }, "Tag": { + "description": "The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value. You define them.\n The following basic restrictions apply to tags:\n + Maximum number of tags per resource - 50\n + For each resource, each tag key must be unique, and each tag key can have only one value.\n + Maximum key length - 128 Unicode characters in UTF-8\n + Maximum value length - 256 Unicode characters in UTF-8\n + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n + Tag keys and values are case-sensitive.\n + Do not use ``aws:``, ``AWS:``, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "additionalProperties": false, "type": "object", "properties": { + "Value": { + "description": "The optional part of a key-value pair that make up a tag. A ``value`` acts as a descriptor within a tag category (key).", + "type": "string" + }, "Key": { - "type": "string", - "description": "One part of a key-value pair that make up a tag. A ``key`` is a general label that acts like a category for more specific tag values." + "description": "One part of a key-value pair that make up a tag. A ``key`` is a general label that acts like a category for more specific tag values.", + "type": "string" + } + } + }, + "DeploymentCircuitBreaker": { + "description": "The deployment circuit breaker can only be used for services using the rolling update (``ECS``) deployment type.\n The *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If it is turned on, a service deployment will transition to a failed state and stop launching new tasks. You can also configure Amazon ECS to roll back your service to the last completed deployment after a failure. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*.\n For more information about API failure reasons, see [API failure reasons](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/api_failures_messages.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, + "type": "object", + "properties": { + "Enable": { + "description": "Determines whether to use the deployment circuit breaker logic for the service.", + "type": "boolean" }, - "Value": { - "type": "string", - "description": "The optional part of a key-value pair that make up a tag. A ``value`` acts as a descriptor within a tag category (key)." + "Rollback": { + "description": "Determines whether to configure Amazon ECS to roll back the service if a service deployment fails. If rollback is on, when a service deployment fails, the service is rolled back to the last deployment that completed successfully.", + "type": "boolean" } }, + "required": [ + "Enable", + "Rollback" + ] + }, + "DeploymentConfiguration": { + "description": "Optional deployment parameters that control how many tasks run during a deployment and the ordering of stopping and starting tasks.", "additionalProperties": false, - "description": "The metadata that you apply to a resource to help you categorize and organize them. Each tag consists of a key and an optional value. You define them.\n The following basic restrictions apply to tags:\n + Maximum number of tags per resource - 50\n + For each resource, each tag key must be unique, and each tag key can have only one value.\n + Maximum key length - 128 Unicode characters in UTF-8\n + Maximum value length - 256 Unicode characters in UTF-8\n + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n + Tag keys and values are case-sensitive.\n + Do not use ``aws:``, ``AWS:``, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit." + "type": "object", + "properties": { + "Alarms": { + "description": "Information about the CloudWatch alarms.", + "$ref": "#/definitions/DeploymentAlarms" + }, + "DeploymentCircuitBreaker": { + "description": "The deployment circuit breaker can only be used for services using the rolling update (``ECS``) deployment type.\n The *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", + "$ref": "#/definitions/DeploymentCircuitBreaker" + }, + "MaximumPercent": { + "description": "If a service is using the rolling update (``ECS``) deployment type, the ``maximumPercent`` parameter represents an upper limit on the number of your service's tasks that are allowed in the ``RUNNING`` or ``PENDING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the ``REPLICA`` service scheduler and has a ``desiredCount`` of four tasks and a ``maximumPercent`` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default ``maximumPercent`` value for a service using the ``REPLICA`` service scheduler is 200%.\n If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state.\n You can't specify a custom ``maximumPercent`` value for a service that uses either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type.\n If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "type": "integer" + }, + "MinimumHealthyPercent": { + "description": "If a service is using the rolling update (``ECS``) deployment type, the ``minimumHealthyPercent`` represents a lower limit on the number of your service's tasks that must remain in the ``RUNNING`` state during a deployment, as a percentage of the ``desiredCount`` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a ``desiredCount`` of four tasks and a ``minimumHealthyPercent`` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks. \n For services that *do not* use a load balancer, the following should be noted:\n + A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n + If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a ``RUNNING`` state before the task is counted towards the minimum healthy percent total.\n + If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings. \n \n For services that *do* use a load balancer, the following should be noted:\n + If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n + If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n \n The default value for a replica service for ``minimumHealthyPercent`` is 100%. The default ``minimumHealthyPercent`` value for a service using the ``DAEMON`` service schedule is 0% for the CLI, the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n The minimum number of healthy tasks during a deployment is the ``desiredCount`` multiplied by the ``minimumHealthyPercent``/100, rounded up to the nearest integer value.\n If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the ``RUNNING`` state while the container instances are in the ``DRAINING`` state.\n You can't specify a custom ``minimumHealthyPercent`` value for a service that uses either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and has tasks that use the EC2 launch type.\n If a service is using either the blue/green (``CODE_DEPLOY``) or ``EXTERNAL`` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "type": "integer" + } + } }, - "ServiceVolumeConfiguration": { + "EBSTagSpecification": { + "description": "The tag specifications of an Amazon EBS volume.", + "additionalProperties": false, "type": "object", "required": [ - "Name" + "ResourceType" ], "properties": { - "Name": { + "PropagateTags": { + "description": "Determines whether to propagate the tags from the task definition to \u2028the Amazon EBS volume. Tags can only propagate to a ``SERVICE`` specified in \u2028``ServiceVolumeConfiguration``. If no value is specified, the tags aren't \u2028propagated.", "type": "string", - "description": "The name of the volume. This value must match the volume name from the ``Volume`` object in the task definition." + "enum": [ + "SERVICE", + "TASK_DEFINITION" + ] }, - "ManagedEBSVolume": { - "$ref": "#/definitions/ServiceManagedEBSVolumeConfiguration", - "description": "The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created." + "ResourceType": { + "description": "The type of volume resource.", + "type": "string" + }, + "Tags": { + "description": "The tags applied to this Amazon EBS volume. ``AmazonECSCreated`` and ``AmazonECSManaged`` are reserved tags that can't be used.", + "type": "array", + "items": { + "$ref": "#/definitions/Tag" + } + } + } + }, + "ServiceConnectService": { + "description": "The Service Connect service object configuration. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "additionalProperties": false, + "type": "object", + "properties": { + "Timeout": { + "description": "A reference to an object that represents the configured timeouts for Service Connect.", + "$ref": "#/definitions/TimeoutConfiguration" + }, + "IngressPortOverride": { + "description": "The port number for the Service Connect proxy to listen on.\n Use the value of this field to bypass the proxy for traffic on the port number specified in the named ``portMapping`` in the task definition of this application, and then use it in your VPC security groups to allow traffic into the proxy for this Amazon ECS service.\n In ``awsvpc`` mode and Fargate, the default value is the container port number. The container port number is in the ``portMapping`` in the task definition. In bridge mode, the default value is the ephemeral port of the Service Connect proxy.", + "type": "integer" + }, + "ClientAliases": { + "description": "The list of client aliases for this Service Connect service. You use these to assign names that can be used by client applications. The maximum number of client aliases that you can have in this list is 1.\n Each alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS tasks (\"clients\") can use to connect to this service.\n Each name and port mapping must be unique within the namespace.\n For each ``ServiceConnectService``, you must provide at least one ``clientAlias`` with one ``port``.", + "type": "array", + "items": { + "$ref": "#/definitions/ServiceConnectClientAlias" + } + }, + "Tls": { + "description": "A reference to an object that represents a Transport Layer Security (TLS) configuration.", + "$ref": "#/definitions/ServiceConnectTlsConfiguration" + }, + "DiscoveryName": { + "description": "The ``discoveryName`` is the name of the new CMAP service that Amazon ECS creates for this Amazon ECS service. This must be unique within the CMAP namespace. The name can contain up to 64 characters. The name can include lowercase letters, numbers, underscores (_), and hyphens (-). The name can't start with a hyphen.\n If the ``discoveryName`` isn't specified, the port mapping name from the task definition is used in ``portName.namespace``.", + "type": "string" + }, + "PortName": { + "description": "The ``portName`` must match the name of one of the ``portMappings`` from all the containers in the task definition of this Amazon ECS service.", + "type": "string" } }, - "description": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume." + "required": [ + "PortName" + ] } }, "properties": { - "ServiceArn": { - "type": "string", - "description": "" - }, - "CapacityProviderStrategy": { - "type": "array", - "items": { - "$ref": "#/definitions/CapacityProviderStrategyItem" - }, - "description": "The capacity provider strategy to use for the service.\n If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used.\n A capacity provider strategy may contain a maximum of 6 capacity providers." - }, - "Cluster": { - "type": "string", - "description": "The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed." - }, - "DeploymentConfiguration": { - "$ref": "#/definitions/DeploymentConfiguration", - "description": "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks." - }, - "DeploymentController": { - "$ref": "#/definitions/DeploymentController", - "description": "The deployment controller to use for the service. If no deployment controller is specified, the default value of ``ECS`` is used." - }, - "DesiredCount": { - "type": "integer", - "description": "The number of instantiations of the specified task definition to place and keep running in your service.\n For new services, if a desired count is not specified, a default value of ``1`` is used. When using the ``DAEMON`` scheduling strategy, the desired count is not required.\n For existing services, if a desired count is not specified, it is omitted from the operation." - }, - "EnableECSManagedTags": { - "type": "boolean", - "description": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide*.\n When you use Amazon ECS managed tags, you need to set the ``propagateTags`` request parameter." - }, - "EnableExecuteCommand": { - "type": "boolean", - "description": "Determines whether the execute command functionality is turned on for the service. If ``true``, the execute command functionality is turned on for all containers in tasks as part of the service." - }, - "HealthCheckGracePeriodSeconds": { - "type": "integer", - "description": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of ``0`` is used.\n If you do not use an Elastic Load Balancing, we recommend that you use the ``startPeriod`` in the task definition health check parameters. For more information, see [Health check](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html).\n If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up." + "PlatformVersion": { + "default": "LATEST", + "description": "The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the ``LATEST`` platform version is used. For more information, see [platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide*.", + "type": "string" }, - "LaunchType": { + "PropagateTags": { + "description": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n You must set this to a value other than ``NONE`` when you use Cost Explorer. For more information, see [Amazon ECS usage reports](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/usage-reports.html) in the *Amazon Elastic Container Service Developer Guide*.\n The default is ``NONE``.", "type": "string", "enum": [ - "EC2", - "FARGATE", - "EXTERNAL" - ], - "description": "The launch type on which to run your service. For more information, see [Amazon ECS Launch Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide*." + "SERVICE", + "TASK_DEFINITION" + ] }, - "LoadBalancers": { + "ServiceArn": { + "description": "", + "type": "string" + }, + "PlacementStrategies": { + "description": "The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules for each service.", "type": "array", "items": { - "$ref": "#/definitions/LoadBalancer" - }, - "description": "A list of load balancer objects to associate with the service. If you specify the ``Role`` property, ``LoadBalancers`` must be specified as well. For information about the number of load balancers that you can specify per service, see [Service Load Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) in the *Amazon Elastic Container Service Developer Guide*." - }, - "Name": { - "type": "string", - "description": "" - }, - "NetworkConfiguration": { - "$ref": "#/definitions/NetworkConfiguration", - "description": "The network configuration for the service. This parameter is required for task definitions that use the ``awsvpc`` network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide*." + "$ref": "#/definitions/PlacementStrategy" + } }, - "PlacementConstraints": { + "ServiceRegistries": { + "description": "The details of the service discovery registry to associate with this service. For more information, see [Service discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html).\n Each service may be associated with one service registry. Multiple service registries for each service isn't supported.", "type": "array", "items": { - "$ref": "#/definitions/PlacementConstraint" - }, - "description": "An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the task definition and those specified at runtime." + "$ref": "#/definitions/ServiceRegistry" + } }, - "PlacementStrategies": { + "VolumeConfigurations": { + "description": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.", "type": "array", "items": { - "$ref": "#/definitions/PlacementStrategy" - }, - "description": "The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules for each service." + "$ref": "#/definitions/ServiceVolumeConfiguration" + } }, - "PlatformVersion": { - "type": "string", - "default": "LATEST", - "description": "The platform version that your tasks in the service are running on. A platform version is specified only for tasks using the Fargate launch type. If one isn't specified, the ``LATEST`` platform version is used. For more information, see [platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide*." + "CapacityProviderStrategy": { + "description": "The capacity provider strategy to use for the service.\n If a ``capacityProviderStrategy`` is specified, the ``launchType`` parameter must be omitted. If no ``capacityProviderStrategy`` or ``launchType`` is specified, the ``defaultCapacityProviderStrategy`` for the cluster is used.\n A capacity provider strategy may contain a maximum of 6 capacity providers.", + "type": "array", + "items": { + "$ref": "#/definitions/CapacityProviderStrategyItem" + } }, - "PropagateTags": { + "LaunchType": { + "description": "The launch type on which to run your service. For more information, see [Amazon ECS Launch Types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide*.", "type": "string", "enum": [ - "SERVICE", - "TASK_DEFINITION" - ], - "description": "Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the [TagResource](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TagResource.html) API action.\n The default is ``NONE``." + "EC2", + "FARGATE", + "EXTERNAL" + ] }, - "Role": { - "type": "string", - "description": "The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the ``awsvpc`` network mode. If you specify the ``role`` parameter, you must also specify a load balancer object with the ``loadBalancers`` parameter.\n If your account has already created the Amazon ECS service-linked role, that role is used for your service unless you specify a role here. The service-linked role is required if your task definition uses the ``awsvpc`` network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you don't specify a role here. For more information, see [Using service-linked roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) in the *Amazon Elastic Container Service Developer Guide*.\n If your specified role has a path other than ``/``, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name ``bar`` has a path of ``/foo/`` then you would specify ``/foo/bar`` as the role name. For more information, see [Friendly names and paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) in the *IAM User Guide*." + "Name": { + "description": "", + "type": "string" }, "SchedulingStrategy": { + "description": "The scheduling strategy to use for the service. For more information, see [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html).\n There are two service scheduler strategies available:\n + ``REPLICA``-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service uses the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types.\n + ``DAEMON``-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that don't meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.\n Tasks using the Fargate launch type or the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types don't support the ``DAEMON`` scheduling strategy.", "type": "string", "enum": [ "DAEMON", "REPLICA" - ], - "description": "The scheduling strategy to use for the service. For more information, see [Services](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html).\n There are two service scheduler strategies available:\n + ``REPLICA``-The replica scheduling strategy places and maintains the desired number of tasks across your cluster. By default, the service scheduler spreads tasks across Availability Zones. You can use task placement strategies and constraints to customize task placement decisions. This scheduler strategy is required if the service uses the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types.\n + ``DAEMON``-The daemon scheduling strategy deploys exactly one task on each active container instance that meets all of the task placement constraints that you specify in your cluster. The service scheduler also evaluates the task placement constraints for running tasks and will stop tasks that don't meet the placement constraints. When you're using this strategy, you don't need to specify a desired number of tasks, a task placement strategy, or use Service Auto Scaling policies.\n Tasks using the Fargate launch type or the ``CODE_DEPLOY`` or ``EXTERNAL`` deployment controller types don't support the ``DAEMON`` scheduling strategy." - }, - "ServiceConnectConfiguration": { - "$ref": "#/definitions/ServiceConnectConfiguration", - "description": "The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*." + ] }, - "ServiceName": { - "type": "string", - "description": "The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.\n The stack update fails if you change any properties that require replacement and the ``ServiceName`` is configured. This is because AWS CloudFormation creates the replacement service first, but each ``ServiceName`` must be unique in the cluster." + "NetworkConfiguration": { + "description": "The network configuration for the service. This parameter is required for task definitions that use the ``awsvpc`` network mode to receive their own elastic network interface, and it is not supported for other network modes. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide*.", + "$ref": "#/definitions/NetworkConfiguration" }, - "ServiceRegistries": { + "Tags": { + "description": "The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.\n The following basic restrictions apply to tags:\n + Maximum number of tags per resource - 50\n + For each resource, each tag key must be unique, and each tag key can have only one value.\n + Maximum key length - 128 Unicode characters in UTF-8\n + Maximum value length - 256 Unicode characters in UTF-8\n + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n + Tag keys and values are case-sensitive.\n + Do not use ``aws:``, ``AWS:``, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "type": "array", "items": { - "$ref": "#/definitions/ServiceRegistry" - }, - "description": "The details of the service discovery registry to associate with this service. For more information, see [Service discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html).\n Each service may be associated with one service registry. Multiple service registries for each service isn't supported." + "$ref": "#/definitions/Tag" + } }, - "Tags": { + "HealthCheckGracePeriodSeconds": { + "description": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks after a task has first started. This is only used when your service is configured to use a load balancer. If your service has a load balancer defined and you don't specify a health check grace period value, the default value of ``0`` is used.\n If you do not use an Elastic Load Balancing, we recommend that you use the ``startPeriod`` in the task definition health check parameters. For more information, see [Health check](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_HealthCheck.html).\n If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you can specify a health check grace period of up to 2,147,483,647 seconds (about 69 years). During that time, the Amazon ECS service scheduler ignores health check status. This grace period can prevent the service scheduler from marking tasks as unhealthy and stopping them before they have time to come up.", + "type": "integer" + }, + "EnableECSManagedTags": { + "description": "Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For more information, see [Tagging your Amazon ECS resources](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html) in the *Amazon Elastic Container Service Developer Guide*.\n When you use Amazon ECS managed tags, you need to set the ``propagateTags`` request parameter.", + "type": "boolean" + }, + "EnableExecuteCommand": { + "description": "Determines whether the execute command functionality is turned on for the service. If ``true``, the execute command functionality is turned on for all containers in tasks as part of the service.", + "type": "boolean" + }, + "PlacementConstraints": { + "description": "An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the task definition and those specified at runtime.", "type": "array", "items": { - "$ref": "#/definitions/Tag" - }, - "description": "The metadata that you apply to the service to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. When a service is deleted, the tags are deleted as well.\n The following basic restrictions apply to tags:\n + Maximum number of tags per resource - 50\n + For each resource, each tag key must be unique, and each tag key can have only one value.\n + Maximum key length - 128 Unicode characters in UTF-8\n + Maximum value length - 256 Unicode characters in UTF-8\n + If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n + Tag keys and values are case-sensitive.\n + Do not use ``aws:``, ``AWS:``, or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit." + "$ref": "#/definitions/PlacementConstraint" + } }, - "TaskDefinition": { - "type": "string", - "description": "The ``family`` and ``revision`` (``family:revision``) or full ARN of the task definition to run in your service. If a ``revision`` isn't specified, the latest ``ACTIVE`` revision is used.\n A task definition must be specified if the service uses either the ``ECS`` or ``CODE_DEPLOY`` deployment controllers.\n For more information about deployment types, see [Amazon ECS deployment types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html)." + "Cluster": { + "description": "The short name or full Amazon Resource Name (ARN) of the cluster that you run your service on. If you do not specify a cluster, the default cluster is assumed.", + "type": "string" }, - "VolumeConfigurations": { + "LoadBalancers": { + "description": "A list of load balancer objects to associate with the service. If you specify the ``Role`` property, ``LoadBalancers`` must be specified as well. For information about the number of load balancers that you can specify per service, see [Service Load Balancing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) in the *Amazon Elastic Container Service Developer Guide*.", "type": "array", "items": { - "$ref": "#/definitions/ServiceVolumeConfiguration" - }, - "description": "The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume." - } - }, - "primaryIdentifier": [ - "/properties/ServiceArn", - "/properties/Cluster" - ], - "handlers": { - "create": { - "permissions": [ - "ecs:CreateService", - "ecs:DescribeServices", - "iam:PassRole", - "ecs:TagResource" - ], - "timeoutInMinutes": 180 + "$ref": "#/definitions/LoadBalancer" + } }, - "read": { - "permissions": [ - "ecs:DescribeServices" - ] + "ServiceConnectConfiguration": { + "description": "The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.\n Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.", + "$ref": "#/definitions/ServiceConnectConfiguration" }, - "update": { - "permissions": [ - "ecs:DescribeServices", - "ecs:ListTagsForResource", - "ecs:TagResource", - "ecs:UntagResource", - "ecs:UpdateService" - ], - "timeoutInMinutes": 180 + "DesiredCount": { + "description": "The number of instantiations of the specified task definition to place and keep running in your service.\n For new services, if a desired count is not specified, a default value of ``1`` is used. When using the ``DAEMON`` scheduling strategy, the desired count is not required.\n For existing services, if a desired count is not specified, it is omitted from the operation.", + "type": "integer" }, - "delete": { - "permissions": [ - "ecs:DeleteService", - "ecs:DescribeServices" - ], - "timeoutInMinutes": 30 + "DeploymentController": { + "description": "The deployment controller to use for the service. If no deployment controller is specified, the default value of ``ECS`` is used.", + "$ref": "#/definitions/DeploymentController" }, - "list": { - "permissions": [ - "ecs:DescribeServices", - "ecs:ListClusters", - "ecs:ListServices" - ] + "Role": { + "description": "The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is only permitted if you are using a load balancer with your service and your task definition doesn't use the ``awsvpc`` network mode. If you specify the ``role`` parameter, you must also specify a load balancer object with the ``loadBalancers`` parameter.\n If your account has already created the Amazon ECS service-linked role, that role is used for your service unless you specify a role here. The service-linked role is required if your task definition uses the ``awsvpc`` network mode or if the service is configured to use service discovery, an external deployment controller, multiple target groups, or Elastic Inference accelerators in which case you don't specify a role here. For more information, see [Using service-linked roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using-service-linked-roles.html) in the *Amazon Elastic Container Service Developer Guide*.\n If your specified role has a path other than ``/``, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name ``bar`` has a path of ``/foo/`` then you would specify ``/foo/bar`` as the role name. For more information, see [Friendly names and paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) in the *IAM User Guide*.", + "type": "string" + }, + "TaskDefinition": { + "description": "The ``family`` and ``revision`` (``family:revision``) or full ARN of the task definition to run in your service. If a ``revision`` isn't specified, the latest ``ACTIVE`` revision is used.\n A task definition must be specified if the service uses either the ``ECS`` or ``CODE_DEPLOY`` deployment controllers.\n For more information about deployment types, see [Amazon ECS deployment types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html).", + "type": "string" + }, + "ServiceName": { + "description": "The name of your service. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a Region or across multiple Regions.\n The stack update fails if you change any properties that require replacement and the ``ServiceName`` is configured. This is because AWS CloudFormation creates the replacement service first, but each ``ServiceName`` must be unique in the cluster.", + "type": "string" + }, + "DeploymentConfiguration": { + "description": "Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.", + "$ref": "#/definitions/DeploymentConfiguration" } - }, - "tagging": { - "taggable": true, - "tagOnCreate": true, - "tagUpdatable": true, - "cloudFormationSystemTags": true, - "tagProperty": "/properties/Tags" - }, - "readOnlyProperties": [ - "/properties/ServiceArn", - "/properties/Name" - ], - "createOnlyProperties": [ - "/properties/Cluster", - "/properties/DeploymentController", - "/properties/LaunchType", - "/properties/Role", - "/properties/SchedulingStrategy", - "/properties/ServiceName" - ], - "writeOnlyProperties": [ - "/properties/ServiceConnectConfiguration", - "/properties/VolumeConfigurations" - ], - "additionalProperties": false + } } diff --git a/internal/service/cloudformation/schemas/AWS_ECS_TaskDefinition.json b/internal/service/cloudformation/schemas/AWS_ECS_TaskDefinition.json index 6670bc574d..25037cbb1e 100644 --- a/internal/service/cloudformation/schemas/AWS_ECS_TaskDefinition.json +++ b/internal/service/cloudformation/schemas/AWS_ECS_TaskDefinition.json @@ -114,7 +114,7 @@ "type": "object", "properties": { "Command": { - "description": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with ``CMD`` to run the command arguments directly, or ``CMD-SHELL`` to run the command with the container's default shell. \n When you use the AWS Management Console JSON panel, the CLIlong, or the APIs, enclose the list of commands in double quotes and brackets.\n ``[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`` \n You don't include the double quotes and brackets when you use the AWS Management Console.\n ``CMD-SHELL, curl -f http://localhost/ || exit 1`` \n An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see ``HealthCheck`` in tthe docker conainer create command", + "description": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with ``CMD`` to run the command arguments directly, or ``CMD-SHELL`` to run the command with the container's default shell. \n When you use the AWS Management Console JSON panel, the CLIlong, or the APIs, enclose the list of commands in double quotes and brackets.\n ``[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`` \n You don't include the double quotes and brackets when you use the AWS Management Console.\n ``CMD-SHELL, curl -f http://localhost/ || exit 1`` \n An exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see ``HealthCheck`` in tthe docker container create command", "insertionOrder": true, "type": "array", "items": { @@ -164,7 +164,7 @@ ], "properties": { "User": { - "description": "The user to use inside the container. This parameter maps to ``User`` in the docker conainer create command and the ``--user`` option to docker run.\n When running tasks using the ``host`` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.\n You can specify the ``user`` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n + ``user`` \n + ``user:group`` \n + ``uid`` \n + ``uid:gid`` \n + ``user:gid`` \n + ``uid:group`` \n \n This parameter is not supported for Windows containers.", + "description": "The user to use inside the container. This parameter maps to ``User`` in the docker container create command and the ``--user`` option to docker run.\n When running tasks using the ``host`` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.\n You can specify the ``user`` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n + ``user`` \n + ``user:group`` \n + ``uid`` \n + ``uid:gid`` \n + ``user:gid`` \n + ``uid:group`` \n \n This parameter is not supported for Windows containers.", "type": "string" }, "Secrets": { @@ -180,11 +180,11 @@ "type": "integer" }, "Privileged": { - "description": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the ``root`` user). This parameter maps to ``Privileged`` in the the docker conainer create command and the ``--privileged`` option to docker run\n This parameter is not supported for Windows containers or tasks run on FARGATElong.", + "description": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the ``root`` user). This parameter maps to ``Privileged`` in the docker container create command and the ``--privileged`` option to docker run\n This parameter is not supported for Windows containers or tasks run on FARGATElong.", "type": "boolean" }, "HealthCheck": { - "description": "The container health check command and associated configuration parameters for the container. This parameter maps to ``HealthCheck`` in the docker conainer create command and the ``HEALTHCHECK`` parameter of docker run.", + "description": "The container health check command and associated configuration parameters for the container. This parameter maps to ``HealthCheck`` in the docker container create command and the ``HEALTHCHECK`` parameter of docker run.", "$ref": "#/definitions/HealthCheck" }, "StartTimeout": { @@ -193,7 +193,7 @@ }, "VolumesFrom": { "uniqueItems": true, - "description": "Data volumes to mount from another container. This parameter maps to ``VolumesFrom`` in tthe docker conainer create command and the ``--volumes-from`` option to docker run.", + "description": "Data volumes to mount from another container. This parameter maps to ``VolumesFrom`` in tthe docker container create command and the ``--volumes-from`` option to docker run.", "insertionOrder": false, "type": "array", "items": { @@ -201,11 +201,11 @@ } }, "Cpu": { - "description": "The number of ``cpu`` units reserved for the container. This parameter maps to ``CpuShares`` in the docker conainer create commandand the ``--cpu-shares`` option to docker run.\n This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level ``cpu`` value.\n You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024.\n Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n + *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n + *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n + *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n \n On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as ``0``, which Windows interprets as 1% of one CPU.", + "description": "The number of ``cpu`` units reserved for the container. This parameter maps to ``CpuShares`` in the docker container create commandand the ``--cpu-shares`` option to docker run.\n This field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level ``cpu`` value.\n You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024.\n Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n + *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n + *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n + *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n \n On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as ``0``, which Windows interprets as 1% of one CPU.", "type": "integer" }, "EntryPoint": { - "description": "Early versions of the Amazon ECS container agent don't properly handle ``entryPoint`` parameters. If you have problems using ``entryPoint``, update your container agent or enter your commands and arguments as ``command`` array items instead.\n The entry point that's passed to the container. This parameter maps to ``Entrypoint`` in tthe docker conainer create command and the ``--entrypoint`` option to docker run.", + "description": "Early versions of the Amazon ECS container agent don't properly handle ``entryPoint`` parameters. If you have problems using ``entryPoint``, update your container agent or enter your commands and arguments as ``command`` array items instead.\n The entry point that's passed to the container. This parameter maps to ``Entrypoint`` in tthe docker container create command and the ``--entrypoint`` option to docker run.", "insertionOrder": true, "type": "array", "items": { @@ -213,7 +213,7 @@ } }, "DnsServers": { - "description": "A list of DNS servers that are presented to the container. This parameter maps to ``Dns`` in the the docker conainer create command and the ``--dns`` option to docker run.\n This parameter is not supported for Windows containers.", + "description": "A list of DNS servers that are presented to the container. This parameter maps to ``Dns`` in the docker container create command and the ``--dns`` option to docker run.\n This parameter is not supported for Windows containers.", "insertionOrder": false, "type": "array", "items": { @@ -221,11 +221,11 @@ } }, "ReadonlyRootFilesystem": { - "description": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ``ReadonlyRootfs`` in the docker conainer create command and the ``--read-only`` option to docker run.\n This parameter is not supported for Windows containers.", + "description": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ``ReadonlyRootfs`` in the docker container create command and the ``--read-only`` option to docker run.\n This parameter is not supported for Windows containers.", "type": "boolean" }, "Image": { - "description": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either ``repository-url/image:tag`` or ``repository-url/image@digest``. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to ``Image`` in the docker conainer create command and the ``IMAGE`` parameter of docker run.\n + When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n + Images in Amazon ECR repositories can be specified by either using the full ``registry/repository:tag`` or ``registry/repository@digest``. For example, ``012345678910.dkr.ecr..amazonaws.com/:latest`` or ``012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE``. \n + Images in official repositories on Docker Hub use a single name (for example, ``ubuntu`` or ``mongo``).\n + Images in other repositories on Docker Hub are qualified with an organization name (for example, ``amazon/amazon-ecs-agent``).\n + Images in other online repositories are qualified further by a domain name (for example, ``quay.io/assemblyline/ubuntu``).", + "description": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either ``repository-url/image:tag`` or ``repository-url/image@digest``. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to ``Image`` in the docker container create command and the ``IMAGE`` parameter of docker run.\n + When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n + Images in Amazon ECR repositories can be specified by either using the full ``registry/repository:tag`` or ``registry/repository@digest``. For example, ``012345678910.dkr.ecr..amazonaws.com/:latest`` or ``012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE``. \n + Images in official repositories on Docker Hub use a single name (for example, ``ubuntu`` or ``mongo``).\n + Images in other repositories on Docker Hub are qualified with an organization name (for example, ``amazon/amazon-ecs-agent``).\n + Images in other online repositories are qualified further by a domain name (for example, ``quay.io/assemblyline/ubuntu``).", "type": "string" }, "Essential": { @@ -253,7 +253,7 @@ } }, "Name": { - "description": "The name of a container. If you're linking multiple containers together in a task definition, the ``name`` of one container can be entered in the ``links`` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to ``name`` in tthe docker conainer create command and the ``--name`` option to docker run.", + "description": "The name of a container. If you're linking multiple containers together in a task definition, the ``name`` of one container can be entered in the ``links`` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to ``name`` in tthe docker container create command and the ``--name`` option to docker run.", "type": "string" }, "FirelensConfiguration": { @@ -261,7 +261,7 @@ "$ref": "#/definitions/FirelensConfiguration" }, "DockerSecurityOptions": { - "description": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide*.\n This parameter maps to ``SecurityOpt`` in the docker conainer create command and the ``--security-opt`` option to docker run.\n The Amazon ECS container agent running on a container instance must register with the ``ECS_SELINUX_CAPABLE=true`` or ``ECS_APPARMOR_CAPABLE=true`` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "description": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide*.\n This parameter maps to ``SecurityOpt`` in the docker container create command and the ``--security-opt`` option to docker run.\n The Amazon ECS container agent running on a container instance must register with the ``ECS_SELINUX_CAPABLE=true`` or ``ECS_APPARMOR_CAPABLE=true`` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide*.\n Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", "insertionOrder": false, "type": "array", "items": { @@ -269,7 +269,7 @@ } }, "SystemControls": { - "description": "A list of namespaced kernel parameters to set in the container. This parameter maps to ``Sysctls`` in tthe docker conainer create command and the ``--sysctl`` option to docker run. For example, you can configure ``net.ipv4.tcp_keepalive_time`` setting to maintain longer lived connections.", + "description": "A list of namespaced kernel parameters to set in the container. This parameter maps to ``Sysctls`` in tthe docker container create command and the ``--sysctl`` option to docker run. For example, you can configure ``net.ipv4.tcp_keepalive_time`` setting to maintain longer lived connections.", "insertionOrder": false, "type": "array", "items": { @@ -277,11 +277,11 @@ } }, "Interactive": { - "description": "When this parameter is ``true``, you can deploy containerized applications that require ``stdin`` or a ``tty`` to be allocated. This parameter maps to ``OpenStdin`` in the docker conainer create command and the ``--interactive`` option to docker run.", + "description": "When this parameter is ``true``, you can deploy containerized applications that require ``stdin`` or a ``tty`` to be allocated. This parameter maps to ``OpenStdin`` in the docker container create command and the ``--interactive`` option to docker run.", "type": "boolean" }, "DnsSearchDomains": { - "description": "A list of DNS search domains that are presented to the container. This parameter maps to ``DnsSearch`` in the docker conainer create command and the ``--dns-search`` option to docker run.\n This parameter is not supported for Windows containers.", + "description": "A list of DNS search domains that are presented to the container. This parameter maps to ``DnsSearch`` in the docker container create command and the ``--dns-search`` option to docker run.\n This parameter is not supported for Windows containers.", "insertionOrder": false, "type": "array", "items": { @@ -309,11 +309,11 @@ "type": "integer" }, "WorkingDirectory": { - "description": "The working directory to run commands inside the container in. This parameter maps to ``WorkingDir`` in the docker conainer create command and the ``--workdir`` option to docker run.", + "description": "The working directory to run commands inside the container in. This parameter maps to ``WorkingDir`` in the docker container create command and the ``--workdir`` option to docker run.", "type": "string" }, "MemoryReservation": { - "description": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the ``memory`` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to ``MemoryReservation`` in the the docker conainer create command and the ``--memory-reservation`` option to docker run.\n If a task-level memory value is not specified, you must specify a non-zero integer for one or both of ``memory`` or ``memoryReservation`` in a container definition. If you specify both, ``memory`` must be greater than ``memoryReservation``. If you specify ``memoryReservation``, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of ``memory`` is used.\n For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a ``memoryReservation`` of 128 MiB, and a ``memory`` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers. \n The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "description": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the ``memory`` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to ``MemoryReservation`` in the docker container create command and the ``--memory-reservation`` option to docker run.\n If a task-level memory value is not specified, you must specify a non-zero integer for one or both of ``memory`` or ``memoryReservation`` in a container definition. If you specify both, ``memory`` must be greater than ``memoryReservation``. If you specify ``memoryReservation``, then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of ``memory`` is used.\n For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a ``memoryReservation`` of 128 MiB, and a ``memory`` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers. \n The Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "type": "integer" }, "RepositoryCredentials": { @@ -321,7 +321,7 @@ "$ref": "#/definitions/RepositoryCredentials" }, "ExtraHosts": { - "description": "A list of hostnames and IP address mappings to append to the ``/etc/hosts`` file on the container. This parameter maps to ``ExtraHosts`` in the docker conainer create command and the ``--add-host`` option to docker run.\n This parameter isn't supported for Windows containers or tasks that use the ``awsvpc`` network mode.", + "description": "A list of hostnames and IP address mappings to append to the ``/etc/hosts`` file on the container. This parameter maps to ``ExtraHosts`` in the docker container create command and the ``--add-host`` option to docker run.\n This parameter isn't supported for Windows containers or tasks that use the ``awsvpc`` network mode.", "insertionOrder": false, "type": "array", "items": { @@ -329,7 +329,7 @@ } }, "Hostname": { - "description": "The hostname to use for your container. This parameter maps to ``Hostname`` in thethe docker conainer create command and the ``--hostname`` option to docker run.\n The ``hostname`` parameter is not supported if you're using the ``awsvpc`` network mode.", + "description": "The hostname to use for your container. This parameter maps to ``Hostname`` in thethe docker container create command and the ``--hostname`` option to docker run.\n The ``hostname`` parameter is not supported if you're using the ``awsvpc`` network mode.", "type": "string" }, "LinuxParameters": { @@ -337,20 +337,20 @@ "$ref": "#/definitions/LinuxParameters" }, "RestartPolicy": { - "description": "", + "description": "The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the task. For more information, see [Restart individual containers in Amazon ECS tasks with container restart policies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-restart-policy.html) in the *Amazon Elastic Container Service Developer Guide*.", "$ref": "#/definitions/RestartPolicy" }, "DisableNetworking": { - "description": "When this parameter is true, networking is off within the container. This parameter maps to ``NetworkDisabled`` in the docker conainer create command.\n This parameter is not supported for Windows containers.", + "description": "When this parameter is true, networking is off within the container. This parameter maps to ``NetworkDisabled`` in the docker container create command.\n This parameter is not supported for Windows containers.", "type": "boolean" }, "PseudoTerminal": { - "description": "When this parameter is ``true``, a TTY is allocated. This parameter maps to ``Tty`` in tthe docker conainer create command and the ``--tty`` option to docker run.", + "description": "When this parameter is ``true``, a TTY is allocated. This parameter maps to ``Tty`` in tthe docker container create command and the ``--tty`` option to docker run.", "type": "boolean" }, "MountPoints": { "uniqueItems": true, - "description": "The mount points for data volumes in your container.\n This parameter maps to ``Volumes`` in the the docker conainer create command and the ``--volume`` option to docker run.\n Windows containers can mount whole directories on the same drive as ``$env:ProgramData``. Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "description": "The mount points for data volumes in your container.\n This parameter maps to ``Volumes`` in the docker container create command and the ``--volume`` option to docker run.\n Windows containers can mount whole directories on the same drive as ``$env:ProgramData``. Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "insertionOrder": true, "type": "array", "items": { @@ -371,7 +371,7 @@ "type": "string" } }, - "description": "A key/value map of labels to add to the container. This parameter maps to ``Labels`` in the docker conainer create command and the ``--label`` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``", + "description": "A key/value map of labels to add to the container. This parameter maps to ``Labels`` in the docker container create command and the ``--label`` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: ``sudo docker version --format '{{.Server.APIVersion}}'``", "additionalProperties": false, "type": "object" }, @@ -385,7 +385,7 @@ } }, "Command": { - "description": "The command that's passed to the container. This parameter maps to ``Cmd`` in the docker conainer create command and the ``COMMAND`` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", + "description": "The command that's passed to the container. This parameter maps to ``Cmd`` in the docker container create command and the ``COMMAND`` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", "insertionOrder": true, "type": "array", "items": { @@ -394,7 +394,7 @@ }, "Environment": { "uniqueItems": true, - "description": "The environment variables to pass to a container. This parameter maps to ``Env`` in the docker conainer create command and the ``--env`` option to docker run.\n We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "description": "The environment variables to pass to a container. This parameter maps to ``Env`` in the docker container create command and the ``--env`` option to docker run.\n We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", "insertionOrder": false, "type": "array", "items": { @@ -403,7 +403,7 @@ }, "Links": { "uniqueItems": true, - "description": "The ``links`` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is ``bridge``. The ``name:internalName`` construct is analogous to ``name:alias`` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to ``Links`` in the docker conainer create command and the ``--link`` option to docker run.\n This parameter is not supported for Windows containers.\n Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "description": "The ``links`` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is ``bridge``. The ``name:internalName`` construct is analogous to ``name:alias`` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to ``Links`` in the docker container create command and the ``--link`` option to docker run.\n This parameter is not supported for Windows containers.\n Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "insertionOrder": false, "type": "array", "items": { @@ -413,7 +413,7 @@ } }, "SystemControl": { - "description": "A list of namespaced kernel parameters to set in the container. This parameter maps to ``Sysctls`` in tthe docker conainer create command and the ``--sysctl`` option to docker run. For example, you can configure ``net.ipv4.tcp_keepalive_time`` setting to maintain longer lived connections.\n We don't recommend that you specify network-related ``systemControls`` parameters for multiple containers in a single task that also uses either the ``awsvpc`` or ``host`` network mode. Doing this has the following disadvantages:\n + For tasks that use the ``awsvpc`` network mode including Fargate, if you set ``systemControls`` for any container, it applies to all containers in the task. If you set different ``systemControls`` for multiple containers in a single task, the container that's started last determines which ``systemControls`` take effect.\n + For tasks that use the ``host`` network mode, the network namespace ``systemControls`` aren't supported.\n \n If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see [IPC mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode).\n + For tasks that use the ``host`` IPC mode, IPC namespace ``systemControls`` aren't supported.\n + For tasks that use the ``task`` IPC mode, IPC namespace ``systemControls`` values apply to all containers within a task.\n \n This parameter is not supported for Windows containers.\n This parameter is only supported for tasks that are hosted on FARGATElong if the tasks are using platform version ``1.4.0`` or later (Linux). This isn't supported for Windows containers on Fargate.", + "description": "A list of namespaced kernel parameters to set in the container. This parameter maps to ``Sysctls`` in tthe docker container create command and the ``--sysctl`` option to docker run. For example, you can configure ``net.ipv4.tcp_keepalive_time`` setting to maintain longer lived connections.\n We don't recommend that you specify network-related ``systemControls`` parameters for multiple containers in a single task that also uses either the ``awsvpc`` or ``host`` network mode. Doing this has the following disadvantages:\n + For tasks that use the ``awsvpc`` network mode including Fargate, if you set ``systemControls`` for any container, it applies to all containers in the task. If you set different ``systemControls`` for multiple containers in a single task, the container that's started last determines which ``systemControls`` take effect.\n + For tasks that use the ``host`` network mode, the network namespace ``systemControls`` aren't supported.\n \n If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see [IPC mode](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#task_definition_ipcmode).\n + For tasks that use the ``host`` IPC mode, IPC namespace ``systemControls`` aren't supported.\n + For tasks that use the ``task`` IPC mode, IPC namespace ``systemControls`` values apply to all containers within a task.\n \n This parameter is not supported for Windows containers.\n This parameter is only supported for tasks that are hosted on FARGATElong if the tasks are using platform version ``1.4.0`` or later (Linux). This isn't supported for Windows containers on Fargate.", "additionalProperties": false, "type": "object", "properties": { @@ -451,7 +451,7 @@ "type": "boolean" }, "Driver": { - "description": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use ``docker plugin ls`` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to ``Driver`` in the docker conainer create command and the ``xxdriver`` option to docker volume create.", + "description": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use ``docker plugin ls`` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to ``Driver`` in the docker container create command and the ``xxdriver`` option to docker volume create.", "type": "string" }, "Labels": { @@ -460,7 +460,7 @@ "type": "string" } }, - "description": "Custom metadata to add to your Docker volume. This parameter maps to ``Labels`` in the docker conainer create command and the ``xxlabel`` option to docker volume create.", + "description": "Custom metadata to add to your Docker volume. This parameter maps to ``Labels`` in the docker container create command and the ``xxlabel`` option to docker volume create.", "additionalProperties": false, "type": "object" } @@ -540,7 +540,7 @@ "type": "object", "properties": { "Add": { - "description": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to ``CapAdd`` in the docker conainer create command and the ``--cap-add`` option to docker run.\n Tasks launched on FARGATElong only support adding the ``SYS_PTRACE`` kernel capability.\n Valid values: ``\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"``", + "description": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to ``CapAdd`` in the docker container create command and the ``--cap-add`` option to docker run.\n Tasks launched on FARGATElong only support adding the ``SYS_PTRACE`` kernel capability.\n Valid values: ``\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"``", "insertionOrder": false, "type": "array", "items": { @@ -548,7 +548,7 @@ } }, "Drop": { - "description": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to ``CapDrop`` in the docker conainer create command and the ``--cap-drop`` option to docker run.\n Valid values: ``\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"``", + "description": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to ``CapDrop`` in the docker container create command and the ``--cap-drop`` option to docker run.\n Valid values: ``\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"``", "insertionOrder": false, "type": "array", "items": { @@ -657,7 +657,7 @@ } }, "FSxAuthorizationConfig": { - "description": "", + "description": "The authorization configuration details for Amazon FSx for Windows File Server file system. See [FSxWindowsFileServerVolumeConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_FSxWindowsFileServerVolumeConfiguration.html) in the *Amazon ECS API Reference*.\n For more information and the input format, see [Amazon FSx for Windows File Server Volumes](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/wfsx-volumes.html) in the *Amazon Elastic Container Service Developer Guide*.", "additionalProperties": false, "type": "object", "required": [ @@ -666,11 +666,11 @@ ], "properties": { "CredentialsParameter": { - "description": "", + "description": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an ASMlong secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", "type": "string" }, "Domain": { - "description": "", + "description": "A fully qualified domain name hosted by an [](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", "type": "string" } } @@ -785,7 +785,7 @@ "type": "integer" }, "Devices": { - "description": "Any host devices to expose to the container. This parameter maps to ``Devices`` in tthe docker conainer create command and the ``--device`` option to docker run.\n If you're using tasks that use the Fargate launch type, the ``devices`` parameter isn't supported.", + "description": "Any host devices to expose to the container. This parameter maps to ``Devices`` in tthe docker container create command and the ``--device`` option to docker run.\n If you're using tasks that use the Fargate launch type, the ``devices`` parameter isn't supported.", "insertionOrder": false, "type": "array", "items": { @@ -827,11 +827,12 @@ } }, "RestartPolicy": { - "description": "", + "description": "You can enable a restart policy for each container defined in your task definition, to overcome transient failures faster and maintain task availability. When you enable a restart policy for a container, Amazon ECS can restart the container if it exits, without needing to replace the task. For more information, see [Restart individual containers in Amazon ECS tasks with container restart policies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-restart-policy.html) in the *Amazon Elastic Container Service Developer Guide*.", "additionalProperties": false, "type": "object", "properties": { "IgnoredExitCodes": { + "description": "A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit codes.", "insertionOrder": false, "type": "array", "items": { @@ -839,9 +840,11 @@ } }, "RestartAttemptPeriod": { + "description": "A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be restarted only once every ``restartAttemptPeriod`` seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum ``restartAttemptPeriod`` of 60 seconds and a maximum ``restartAttemptPeriod`` of 1800 seconds. By default, a container must run for 300 seconds before it can be restarted.", "type": "integer" }, "Enabled": { + "description": "Specifies whether a restart policy is enabled for the container.", "type": "boolean" } } diff --git a/internal/service/cloudformation/schemas/AWS_EKS_Addon.json b/internal/service/cloudformation/schemas/AWS_EKS_Addon.json index c2d98e8682..65e1aec590 100644 --- a/internal/service/cloudformation/schemas/AWS_EKS_Addon.json +++ b/internal/service/cloudformation/schemas/AWS_EKS_Addon.json @@ -115,7 +115,11 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": false, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "eks:TagResource", + "eks:UntagResource" + ] }, "additionalProperties": false, "required": [ diff --git a/internal/service/cloudformation/schemas/AWS_EKS_Cluster.json b/internal/service/cloudformation/schemas/AWS_EKS_Cluster.json index 4df55e55c7..70e045c703 100644 --- a/internal/service/cloudformation/schemas/AWS_EKS_Cluster.json +++ b/internal/service/cloudformation/schemas/AWS_EKS_Cluster.json @@ -1,5 +1,9 @@ { "tagging": { + "permissions": [ + "eks:TagResource", + "eks:UntagResource" + ], "taggable": true, "tagOnCreate": true, "tagUpdatable": true, diff --git a/internal/service/cloudformation/schemas/AWS_EKS_FargateProfile.json b/internal/service/cloudformation/schemas/AWS_EKS_FargateProfile.json index 08383db3ea..084f7b5e0e 100644 --- a/internal/service/cloudformation/schemas/AWS_EKS_FargateProfile.json +++ b/internal/service/cloudformation/schemas/AWS_EKS_FargateProfile.json @@ -115,7 +115,11 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": false, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "eks:TagResource", + "eks:UntagResource" + ] }, "additionalProperties": false, "required": [ diff --git a/internal/service/cloudformation/schemas/AWS_EKS_PodIdentityAssociation.json b/internal/service/cloudformation/schemas/AWS_EKS_PodIdentityAssociation.json index 8da70999f9..2d63a85fcf 100644 --- a/internal/service/cloudformation/schemas/AWS_EKS_PodIdentityAssociation.json +++ b/internal/service/cloudformation/schemas/AWS_EKS_PodIdentityAssociation.json @@ -72,7 +72,7 @@ "tagProperty": "/properties/Tags", "permissions": [ "eks:TagResource", - "sqs:UntagResource" + "eks:UntagResource" ] }, "additionalProperties": false, diff --git a/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_Listener.json b/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_Listener.json index 8d6ae4c07e..739706bdfc 100644 --- a/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_Listener.json +++ b/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_Listener.json @@ -24,21 +24,24 @@ "handlers": { "read": { "permissions": [ - "elasticloadbalancing:DescribeListeners" + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeListenerAttributes" ] }, "create": { "permissions": [ "elasticloadbalancing:CreateListener", "elasticloadbalancing:DescribeListeners", - "cognito-idp:DescribeUserPoolClient" + "cognito-idp:DescribeUserPoolClient", + "elasticloadbalancing:ModifyListenerAttributes" ] }, "update": { "permissions": [ "elasticloadbalancing:ModifyListener", "elasticloadbalancing:DescribeListeners", - "cognito-idp:DescribeUserPoolClient" + "cognito-idp:DescribeUserPoolClient", + "elasticloadbalancing:ModifyListenerAttributes" ] }, "list": { @@ -310,6 +313,19 @@ } } }, + "ListenerAttribute": { + "description": "", + "additionalProperties": false, + "type": "object", + "properties": { + "Value": { + "type": "string" + }, + "Key": { + "type": "string" + } + } + }, "ForwardConfig": { "description": "Information for creating an action that distributes requests among one or more target groups. For Network Load Balancers, you can specify a single target group. Specify only when ``Type`` is ``forward``. If you specify both ``ForwardConfig`` and ``TargetGroupArn``, you can specify only one target group using ``ForwardConfig`` and it must be the same target group specified in ``TargetGroupArn``.", "additionalProperties": false, @@ -431,6 +447,16 @@ "description": "The mutual authentication configuration information.", "$ref": "#/definitions/MutualAuthentication" }, + "ListenerAttributes": { + "arrayType": "AttributeList", + "uniqueItems": true, + "description": "", + "insertionOrder": false, + "type": "array", + "items": { + "$ref": "#/definitions/ListenerAttribute" + } + }, "AlpnPolicy": { "description": "[TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) policy.", "type": "array", diff --git a/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_TrustStore.json b/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_TrustStore.json index 6e592ea53d..adfb3dab37 100644 --- a/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_TrustStore.json +++ b/internal/service/cloudformation/schemas/AWS_ElasticLoadBalancingV2_TrustStore.json @@ -82,7 +82,12 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": true, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:RemoveTags" + ] }, "handlers": { "create": { diff --git a/internal/service/cloudformation/schemas/AWS_IoTWireless_WirelessDevice.json b/internal/service/cloudformation/schemas/AWS_IoTWireless_WirelessDevice.json index e43748dd72..e9c81d5b62 100644 --- a/internal/service/cloudformation/schemas/AWS_IoTWireless_WirelessDevice.json +++ b/internal/service/cloudformation/schemas/AWS_IoTWireless_WirelessDevice.json @@ -230,7 +230,10 @@ "description": "Application type, which can be specified to obtain real-time position information of your LoRaWAN device.", "type": "string", "enum": [ - "SemtechGeolocation" + "SemtechGeolocation", + "SemtechGNSS", + "SemtechGNSSNG", + "SemtechWiFi" ] } }, diff --git a/internal/service/cloudformation/schemas/AWS_MediaConnect_Flow.json b/internal/service/cloudformation/schemas/AWS_MediaConnect_Flow.json index c26357d671..e0eaf15df9 100644 --- a/internal/service/cloudformation/schemas/AWS_MediaConnect_Flow.json +++ b/internal/service/cloudformation/schemas/AWS_MediaConnect_Flow.json @@ -46,8 +46,12 @@ } }, "Maintenance": { - "description": "The maintenance settings you want to use for the flow. ", + "description": "The maintenance settings you want to use for the flow.", "$ref": "#/definitions/Maintenance" + }, + "SourceMonitoringConfig": { + "description": "The source monitoring config of the flow.", + "$ref": "#/definitions/SourceMonitoringConfig" } }, "definitions": { @@ -561,6 +565,24 @@ "required": [ "Name" ] + }, + "SourceMonitoringConfig": { + "type": "object", + "description": "The settings for source monitoring.", + "properties": { + "ThumbnailState": { + "type": "string", + "description": "The state of thumbnail monitoring.", + "enum": [ + "ENABLED", + "DISABLED" + ] + } + }, + "additionalProperties": false, + "required": [ + "ThumbnailState" + ] } }, "required": [ diff --git a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_Firewall.json b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_Firewall.json index 6d29602433..90c3adbe2a 100644 --- a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_Firewall.json +++ b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_Firewall.json @@ -118,7 +118,16 @@ } }, "tagging": { - "taggable": true + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags", + "permissions": [ + "network-firewall:TagResource", + "network-firewall:UntagResource", + "network-firewall:ListTagsForResource" + ] }, "required": [ "FirewallName", diff --git a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_FirewallPolicy.json b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_FirewallPolicy.json index dbc7ea5d11..d010928e25 100644 --- a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_FirewallPolicy.json +++ b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_FirewallPolicy.json @@ -311,7 +311,16 @@ } }, "tagging": { - "taggable": true + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags", + "permissions": [ + "network-firewall:TagResource", + "network-firewall:UntagResource", + "network-firewall:ListTagsForResource" + ] }, "required": [ "FirewallPolicyName", diff --git a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_RuleGroup.json b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_RuleGroup.json index 3b738d7a0b..85c1c2604a 100644 --- a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_RuleGroup.json +++ b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_RuleGroup.json @@ -655,7 +655,16 @@ } }, "tagging": { - "taggable": true + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags", + "permissions": [ + "network-firewall:TagResource", + "network-firewall:UntagResource", + "network-firewall:ListTagsForResource" + ] }, "required": [ "Type", diff --git a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_TLSInspectionConfiguration.json b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_TLSInspectionConfiguration.json index ada72ec7b6..edbcf90868 100644 --- a/internal/service/cloudformation/schemas/AWS_NetworkFirewall_TLSInspectionConfiguration.json +++ b/internal/service/cloudformation/schemas/AWS_NetworkFirewall_TLSInspectionConfiguration.json @@ -232,7 +232,16 @@ } }, "tagging": { - "taggable": true + "taggable": true, + "tagOnCreate": true, + "tagUpdatable": true, + "cloudFormationSystemTags": true, + "tagProperty": "/properties/Tags", + "permissions": [ + "network-firewall:TagResource", + "network-firewall:UntagResource", + "network-firewall:ListTagsForResource" + ] }, "required": [ "TLSInspectionConfigurationName", diff --git a/internal/service/cloudformation/schemas/AWS_PaymentCryptography_Key.json b/internal/service/cloudformation/schemas/AWS_PaymentCryptography_Key.json index 67abec88a3..84d9685e88 100644 --- a/internal/service/cloudformation/schemas/AWS_PaymentCryptography_Key.json +++ b/internal/service/cloudformation/schemas/AWS_PaymentCryptography_Key.json @@ -12,7 +12,9 @@ "AES_256", "RSA_2048", "RSA_3072", - "RSA_4096" + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384" ] }, "KeyAttributes": { @@ -154,11 +156,12 @@ "Value": { "type": "string", "maxLength": 256, - "minLength": 0 + "minLength": 1 } }, "required": [ - "Key" + "Key", + "Value" ], "additionalProperties": false } @@ -251,7 +254,12 @@ "tagOnCreate": true, "tagUpdatable": true, "cloudFormationSystemTags": true, - "tagProperty": "/properties/Tags" + "tagProperty": "/properties/Tags", + "permissions": [ + "payment-cryptography:ListTagsForResource", + "payment-cryptography:TagResource", + "payment-cryptography:UntagResource" + ] }, "additionalProperties": false } diff --git a/internal/service/cloudformation/schemas/AWS_Pipes_Pipe.json b/internal/service/cloudformation/schemas/AWS_Pipes_Pipe.json index 82e4d23ebd..fc0b10ff02 100644 --- a/internal/service/cloudformation/schemas/AWS_Pipes_Pipe.json +++ b/internal/service/cloudformation/schemas/AWS_Pipes_Pipe.json @@ -1746,6 +1746,11 @@ "EnrichmentParameters": { "$ref": "#/definitions/PipeEnrichmentParameters" }, + "KmsKeyIdentifier": { + "type": "string", + "maxLength": 2048, + "minLength": 0 + }, "LastModifiedTime": { "type": "string", "format": "date-time" @@ -1902,11 +1907,15 @@ "permissions": [ "pipes:DeletePipe", "pipes:DescribePipe", + "pipes:UntagResource", "logs:CreateLogDelivery", "logs:UpdateLogDelivery", "logs:DeleteLogDelivery", "logs:GetLogDelivery", - "logs:ListLogDeliveries" + "logs:ListLogDeliveries", + "kms:DescribeKey", + "kms:Decrypt", + "kms:GenerateDataKey" ] }, "list": { diff --git a/internal/service/cloudformation/schemas/AWS_QuickSight_DataSource.json b/internal/service/cloudformation/schemas/AWS_QuickSight_DataSource.json index 7c33ff8abf..71ea2dfd85 100644 --- a/internal/service/cloudformation/schemas/AWS_QuickSight_DataSource.json +++ b/internal/service/cloudformation/schemas/AWS_QuickSight_DataSource.json @@ -1,284 +1,268 @@ { + "sourceUrl": "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-quicksight", + "handlers": { + "read": { + "permissions": [ + "quicksight:DescribeDataSource", + "quicksight:DescribeDataSourcePermissions", + "quicksight:ListTagsForResource" + ] + }, + "create": { + "permissions": [ + "quicksight:CreateDataSource", + "quicksight:DescribeDataSource", + "quicksight:DescribeDataSourcePermissions", + "quicksight:TagResource", + "quicksight:ListTagsForResource" + ] + }, + "update": { + "permissions": [ + "quicksight:DescribeDataSource", + "quicksight:DescribeDataSourcePermissions", + "quicksight:UpdateDataSource", + "quicksight:UpdateDataSourcePermissions", + "quicksight:CreateFolderMembership", + "quicksight:DeleteFolderMembership", + "quicksight:ListFoldersForResource", + "quicksight:TagResource", + "quicksight:UntagResource", + "quicksight:ListTagsForResource" + ] + }, + "list": { + "permissions": [ + "quicksight:DescribeDataSource", + "quicksight:ListDataSources" + ] + }, + "delete": { + "permissions": [ + "quicksight:DescribeDataSource", + "quicksight:DescribeDataSourcePermissions", + "quicksight:DeleteDataSource", + "quicksight:ListTagsForResource" + ] + } + }, "typeName": "AWS::QuickSight::DataSource", + "readOnlyProperties": [ + "/properties/Arn", + "/properties/CreatedTime", + "/properties/LastUpdatedTime", + "/properties/Status" + ], "description": "Definition of the AWS::QuickSight::DataSource Resource Type.", + "writeOnlyProperties": [ + "/properties/Credentials" + ], + "createOnlyProperties": [ + "/properties/AwsAccountId", + "/properties/DataSourceId", + "/properties/Type" + ], + "additionalProperties": false, + "primaryIdentifier": [ + "/properties/AwsAccountId", + "/properties/DataSourceId" + ], "definitions": { - "AmazonElasticsearchParameters": { + "AuroraPostgreSqlParameters": { + "description": "

Parameters for Amazon Aurora PostgreSQL-Compatible Edition.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The parameters for OpenSearch.

", "properties": { - "Domain": { + "Port": { + "default": 0, + "maximum": 65535, + "description": "

The port that Amazon Aurora PostgreSQL is listening on.<\/p>", + "type": "number", + "minimum": 1 + }, + "Database": { + "minLength": 1, + "description": "

The Amazon Aurora PostgreSQL database to connect to.<\/p>", "type": "string", - "maxLength": 64, + "maxLength": 128 + }, + "Host": { "minLength": 1, - "description": "

The OpenSearch domain.

" + "description": "

The Amazon Aurora PostgreSQL-Compatible host to connect to.<\/p>", + "type": "string", + "maxLength": 256 } }, "required": [ - "Domain" - ], - "additionalProperties": false + "Database", + "Host", + "Port" + ] }, - "AmazonOpenSearchParameters": { + "DataSourceCredentials": { + "description": "

Data source credentials. This is a variant type structure. For this structure to be\n valid, only one of the attributes can be non-null.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The parameters for OpenSearch.

", "properties": { - "Domain": { - "type": "string", - "maxLength": 64, + "SecretArn": { "minLength": 1, - "description": "

The OpenSearch domain.

" + "pattern": "^arn:[-a-z0-9]*:secretsmanager:[-a-z0-9]*:[0-9]{12}:secret:.+$", + "description": "

The Amazon Resource Name (ARN) of the secret associated with the data source in Amazon Secrets Manager.<\/p>", + "type": "string", + "maxLength": 2048 + }, + "CopySourceArn": { + "pattern": "^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+$", + "description": "

The Amazon Resource Name (ARN) of a data source that has the credential pair that you\n want to use. When CopySourceArn<\/code> is not null, the credential pair from the\n data source in the ARN is used as the credentials for the\n DataSourceCredentials<\/code> structure.<\/p>", + "type": "string" + }, + "CredentialPair": { + "$ref": "#/definitions/CredentialPair" } - }, - "required": [ - "Domain" - ], - "additionalProperties": false + } }, - "AthenaParameters": { + "ManifestFileLocation": { + "description": "

Amazon S3 manifest file location.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

Parameters for Amazon Athena.

", "properties": { - "WorkGroup": { - "type": "string", - "maxLength": 128, + "Bucket": { "minLength": 1, - "description": "

The workgroup that Amazon Athena uses.

" + "description": "

Amazon S3 bucket.<\/p>", + "type": "string", + "maxLength": 1024 }, - "RoleArn": { + "Key": { + "minLength": 1, + "description": "

Amazon S3 key that identifies an object.<\/p>", "type": "string", - "maxLength": 2048, - "minLength": 20, - "description": "

Use the RoleArn structure to override an account-wide role for a specific Athena data source. For example, say an account administrator has turned off all Athena access with an account-wide role. The administrator can then use RoleArn to bypass the account-wide role and allow Athena access for the single Athena data source that is specified in the structure, even if the account-wide role forbidding Athena access is still active.

" + "maxLength": 1024 } }, - "additionalProperties": false + "required": [ + "Bucket", + "Key" + ] }, - "AuroraParameters": { + "StarburstParameters": { + "description": "

The parameters that are required to connect to a Starburst data source.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

Parameters for Amazon Aurora.

", "properties": { - "Host": { - "type": "string", - "maxLength": 256, - "minLength": 1, - "description": "

Host.

" - }, "Port": { - "type": "number", "default": 0, "maximum": 65535, - "minimum": 1, - "description": "

Port.

" + "description": "

The port for the Starburst data source.<\/p>", + "type": "number", + "minimum": 1 }, - "Database": { - "type": "string", - "maxLength": 128, + "ProductType": { + "$ref": "#/definitions/StarburstProductType" + }, + "Host": { "minLength": 1, - "description": "

Database.

" + "description": "

The host name of the Starburst data source.<\/p>", + "type": "string", + "maxLength": 256 + }, + "Catalog": { + "minLength": 0, + "description": "

The catalog name for the Starburst data source.<\/p>", + "type": "string", + "maxLength": 128 } }, "required": [ - "Database", + "Catalog", "Host", "Port" - ], - "additionalProperties": false + ] }, - "AuroraPostgreSqlParameters": { + "RedshiftParameters": { + "description": "

The parameters for Amazon Redshift. The ClusterId<\/code> field can be blank if\n Host<\/code> and Port<\/code> are both set. The Host<\/code> and Port<\/code> fields can be blank if the ClusterId<\/code> field is set.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

Parameters for Amazon Aurora PostgreSQL-Compatible Edition.

", "properties": { - "Host": { - "type": "string", - "maxLength": 256, + "IAMParameters": { + "$ref": "#/definitions/RedshiftIAMParameters" + }, + "ClusterId": { "minLength": 1, - "description": "

The Amazon Aurora PostgreSQL-Compatible host to connect to.

" + "description": "

Cluster ID. This field can be blank if the Host<\/code> and Port<\/code> are\n provided.<\/p>", + "type": "string", + "maxLength": 64 }, "Port": { - "type": "number", "default": 0, "maximum": 65535, - "minimum": 1, - "description": "

The port that Amazon Aurora PostgreSQL is listening on.

" + "description": "

Port. This field can be blank if the ClusterId<\/code> is provided.<\/p>", + "type": "number", + "minimum": 0 }, "Database": { + "minLength": 1, + "description": "

Database.<\/p>", "type": "string", - "maxLength": 128, + "maxLength": 128 + }, + "Host": { "minLength": 1, - "description": "

The Amazon Aurora PostgreSQL database to connect to.

" + "description": "

Host. This field can be blank if ClusterId<\/code> is provided.<\/p>", + "type": "string", + "maxLength": 256 + }, + "IdentityCenterConfiguration": { + "$ref": "#/definitions/IdentityCenterConfiguration" } }, "required": [ - "Database", - "Host", - "Port" - ], - "additionalProperties": false + "Database" + ] }, - "AwsIotAnalyticsParameters": { + "VpcConnectionProperties": { + "description": "

VPC connection properties.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The parameters for IoT Analytics.

", "properties": { - "DataSetName": { - "type": "string", - "maxLength": 128, - "minLength": 1, - "description": "

Dataset name.

" + "VpcConnectionArn": { + "description": "

The Amazon Resource Name (ARN) for the VPC connection.<\/p>", + "type": "string" } }, "required": [ - "DataSetName" - ], - "additionalProperties": false + "VpcConnectionArn" + ] }, - "CredentialPair": { + "SnowflakeParameters": { + "description": "

The parameters for Snowflake.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The combination of user name and password that are used as credentials.

", "properties": { - "Username": { + "Warehouse": { + "minLength": 0, + "description": "

Warehouse.<\/p>", "type": "string", - "maxLength": 64, - "minLength": 1, - "description": "

User name.

" + "maxLength": 128 }, - "Password": { - "type": "string", - "maxLength": 1024, + "Database": { "minLength": 1, - "description": "

Password.

" - }, - "AlternateDataSourceParameters": { - "type": "array", - "items": { - "$ref": "#/definitions/DataSourceParameters" - }, - "maxItems": 50, - "minItems": 1, - "description": "

A set of alternate data source parameters that you want to share for these\n credentials. The credentials are applied in tandem with the data source parameters when\n you copy a data source by using a create or update request. The API operation compares\n the DataSourceParameters structure that's in the request with the\n structures in the AlternateDataSourceParameters allow list. If the\n structures are an exact match, the request is allowed to use the new data source with\n the existing credentials. If the AlternateDataSourceParameters list is\n null, the DataSourceParameters originally used with these\n Credentials is automatically allowed.

" - } - }, - "required": [ - "Password", - "Username" - ], - "additionalProperties": false - }, - "DataSourceCredentials": { - "type": "object", - "description": "

Data source credentials. This is a variant type structure. For this structure to be\n valid, only one of the attributes can be non-null.

", - "properties": { - "CredentialPair": { - "$ref": "#/definitions/CredentialPair" - }, - "CopySourceArn": { + "description": "

Database.<\/p>", "type": "string", - "pattern": "^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+$", - "description": "

The Amazon Resource Name (ARN) of a data source that has the credential pair that you\n want to use. When CopySourceArn is not null, the credential pair from the\n data source in the ARN is used as the credentials for the\n DataSourceCredentials structure.

" + "maxLength": 128 }, - "SecretArn": { - "type": "string", - "maxLength": 2048, + "Host": { "minLength": 1, - "pattern": "^arn:[-a-z0-9]*:secretsmanager:[-a-z0-9]*:[0-9]{12}:secret:.+$", - "description": "

The Amazon Resource Name (ARN) of the secret associated with the data source in Amazon Secrets Manager.

" - } - }, - "additionalProperties": false - }, - "DataSourceErrorInfo": { - "type": "object", - "description": "

Error information for the data source creation or update.

", - "properties": { - "Type": { - "$ref": "#/definitions/DataSourceErrorInfoType" - }, - "Message": { + "description": "

Host.<\/p>", "type": "string", - "description": "

Error message.

" + "maxLength": 256 } }, - "additionalProperties": false - }, - "DataSourceErrorInfoType": { - "type": "string", - "enum": [ - "ACCESS_DENIED", - "COPY_SOURCE_NOT_FOUND", - "TIMEOUT", - "ENGINE_VERSION_NOT_SUPPORTED", - "UNKNOWN_HOST", - "GENERIC_SQL_FAILURE", - "CONFLICT", - "UNKNOWN" + "required": [ + "Database", + "Host", + "Warehouse" ] }, - "DataSourceParameters": { - "type": "object", - "description": "

The parameters that Amazon QuickSight uses to connect to your underlying data source.\n This is a variant type structure. For this structure to be valid, only one of the\n attributes can be non-null.

", - "properties": { - "AmazonElasticsearchParameters": { - "$ref": "#/definitions/AmazonElasticsearchParameters" - }, - "AthenaParameters": { - "$ref": "#/definitions/AthenaParameters" - }, - "AuroraParameters": { - "$ref": "#/definitions/AuroraParameters" - }, - "AuroraPostgreSqlParameters": { - "$ref": "#/definitions/AuroraPostgreSqlParameters" - }, - "MariaDbParameters": { - "$ref": "#/definitions/MariaDbParameters" - }, - "MySqlParameters": { - "$ref": "#/definitions/MySqlParameters" - }, - "OracleParameters": { - "$ref": "#/definitions/OracleParameters" - }, - "PostgreSqlParameters": { - "$ref": "#/definitions/PostgreSqlParameters" - }, - "PrestoParameters": { - "$ref": "#/definitions/PrestoParameters" - }, - "RdsParameters": { - "$ref": "#/definitions/RdsParameters" - }, - "RedshiftParameters": { - "$ref": "#/definitions/RedshiftParameters" - }, - "S3Parameters": { - "$ref": "#/definitions/S3Parameters" - }, - "SnowflakeParameters": { - "$ref": "#/definitions/SnowflakeParameters" - }, - "SparkParameters": { - "$ref": "#/definitions/SparkParameters" - }, - "SqlServerParameters": { - "$ref": "#/definitions/SqlServerParameters" - }, - "TeradataParameters": { - "$ref": "#/definitions/TeradataParameters" - }, - "AmazonOpenSearchParameters": { - "$ref": "#/definitions/AmazonOpenSearchParameters" - }, - "DatabricksParameters": { - "$ref": "#/definitions/DatabricksParameters" - }, - "StarburstParameters": { - "$ref": "#/definitions/StarburstParameters" - }, - "TrinoParameters": { - "$ref": "#/definitions/TrinoParameters" - } - }, - "additionalProperties": false - }, "DataSourceType": { "type": "string", "enum": [ @@ -326,768 +310,787 @@ "GLUE" ] }, - "DatabricksParameters": { + "AmazonElasticsearchParameters": { + "description": "

The parameters for OpenSearch.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The parameters that are required to connect to a Databricks data source.

", "properties": { - "Host": { + "Domain": { + "minLength": 1, + "description": "

The OpenSearch domain.<\/p>", "type": "string", - "maxLength": 256, + "maxLength": 64 + } + }, + "required": [ + "Domain" + ] + }, + "AmazonOpenSearchParameters": { + "description": "

The parameters for OpenSearch.<\/p>", + "additionalProperties": false, + "type": "object", + "properties": { + "Domain": { "minLength": 1, - "description": "

The host name of the Databricks data source.

" - }, + "description": "

The OpenSearch domain.<\/p>", + "type": "string", + "maxLength": 64 + } + }, + "required": [ + "Domain" + ] + }, + "ResourceStatus": { + "type": "string", + "enum": [ + "CREATION_IN_PROGRESS", + "CREATION_SUCCESSFUL", + "CREATION_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_FAILED", + "DELETED" + ] + }, + "AuroraParameters": { + "description": "

Parameters for Amazon Aurora.<\/p>", + "additionalProperties": false, + "type": "object", + "properties": { "Port": { - "type": "number", "default": 0, "maximum": 65535, - "minimum": 1, - "description": "

The port for the Databricks data source.

" + "description": "

Port.<\/p>", + "type": "number", + "minimum": 1 }, - "SqlEndpointPath": { + "Database": { + "minLength": 1, + "description": "

Database.<\/p>", "type": "string", - "maxLength": 4096, + "maxLength": 128 + }, + "Host": { "minLength": 1, - "description": "

The HTTP path of the Databricks data source.

" + "description": "

Host.<\/p>", + "type": "string", + "maxLength": 256 } }, "required": [ + "Database", "Host", - "Port", - "SqlEndpointPath" - ], - "additionalProperties": false + "Port" + ] + }, + "S3Parameters": { + "description": "

The parameters for S3.<\/p>", + "additionalProperties": false, + "type": "object", + "properties": { + "ManifestFileLocation": { + "$ref": "#/definitions/ManifestFileLocation" + }, + "RoleArn": { + "minLength": 20, + "description": "

Use the RoleArn<\/code> structure to override an account-wide role for a specific S3 data source. For example, say an account administrator has turned off all S3 access with an account-wide role. The administrator can then use RoleArn<\/code> to bypass the account-wide role and allow S3 access for the single S3 data source that is specified in the structure, even if the account-wide role forbidding S3 access is still active.<\/p>", + "type": "string", + "maxLength": 2048 + } + }, + "required": [ + "ManifestFileLocation" + ] }, "IdentityCenterConfiguration": { + "description": "

The parameters for an IAM Identity Center configuration.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

The parameters for an IAM Identity Center configuration.

", "properties": { "EnableIdentityPropagation": { - "type": "boolean", "default": null, - "description": "

A Boolean option that controls whether Trusted Identity Propagation should be used.

" + "description": "

A Boolean option that controls whether Trusted Identity Propagation should be used.<\/p>", + "type": "boolean" } - }, - "additionalProperties": false + } }, - "ManifestFileLocation": { + "SslProperties": { + "description": "

Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your\n underlying data source.<\/p>", + "additionalProperties": false, "type": "object", - "description": "

Amazon S3 manifest file location.

", "properties": { - "Bucket": { - "type": "string", - "maxLength": 1024, - "minLength": 1, - "description": "

Amazon S3 bucket.

" + "DisableSsl": { + "default": false, + "description": "

A Boolean option to control whether SSL should be disabled.<\/p>", + "type": "boolean" + } + } + }, + "DataSourceErrorInfoType": { + "type": "string", + "enum": [ + "ACCESS_DENIED", + "COPY_SOURCE_NOT_FOUND", + "TIMEOUT", + "ENGINE_VERSION_NOT_SUPPORTED", + "UNKNOWN_HOST", + "GENERIC_SQL_FAILURE", + "CONFLICT", + "UNKNOWN" + ] + }, + "ResourcePermission": { + "description": "

Permission for the resource.<\/p>", + "additionalProperties": false, + "type": "object", + "properties": { + "Actions": { + "minItems": 1, + "maxItems": 20, + "description": "

The IAM action to grant or revoke permissions on.<\/p>", + "type": "array", + "items": { + "type": "string" + } }, - "Key": { - "type": "string", - "maxLength": 1024, + "Resource": { + "type": "string" + }, + "Principal": { "minLength": 1, - "description": "

Amazon S3 key that identifies an object.

" + "description": "

The Amazon Resource Name (ARN) of the principal. This can be one of the\n following:<\/p>\n