diff --git a/plugins/aws/resoto_plugin_aws/collector.py b/plugins/aws/resoto_plugin_aws/collector.py index b9b7833b7f..b68e71d44d 100644 --- a/plugins/aws/resoto_plugin_aws/collector.py +++ b/plugins/aws/resoto_plugin_aws/collector.py @@ -40,6 +40,9 @@ ssm, ecr, secretsmanager, + opensearch, + acm, + waf, ) from resoto_plugin_aws.resource.base import AwsAccount, AwsApiSpec, AwsRegion, AwsResource, GraphBuilder @@ -63,9 +66,11 @@ + route53.resources + s3.resources + service_quotas.resources + + waf.resources ) regional_resources: List[Type[AwsResource]] = ( sagemaker.resources # start with sagemaker, because it is very slow + + acm.resources + apigateway.resources + autoscaling.resources + athena.resources @@ -88,6 +93,7 @@ + kinesis.resources + kms.resources + lambda_.resources + + opensearch.resources + rds.resources + secretsmanager.resources + service_quotas.resources diff --git a/plugins/aws/resoto_plugin_aws/resource/acm.py b/plugins/aws/resoto_plugin_aws/resource/acm.py new file mode 100644 index 0000000000..3c9ae0fe09 --- /dev/null +++ b/plugins/aws/resoto_plugin_aws/resource/acm.py @@ -0,0 +1,155 @@ +import logging +from datetime import datetime +from typing import ClassVar, Dict, Optional, List, Type + +from attrs import define, field +from boto3.exceptions import Boto3Error + +from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder +from resoto_plugin_aws.utils import ToDict +from resotolib.json_bender import Bender, S, ForallBend, Bend + +log = logging.getLogger("resoto.plugins.aws") +service_name = "acm" + + +@define(eq=False, slots=False) +class AwsAcmResourceRecord: + kind: ClassVar[str] = "aws_acm_resource_record" + mapping: ClassVar[Dict[str, Bender]] = {"name": S("Name"), "type": S("Type"), "value": S("Value")} + name: Optional[str] = field(default=None, metadata={"description": "The name of the DNS record to create in your domain. This is supplied by ACM."}) # fmt: skip + type: Optional[str] = field(default=None, metadata={"description": "The type of DNS record. Currently this can be CNAME."}) # fmt: skip + value: Optional[str] = field(default=None, metadata={"description": "The value of the CNAME record to add to your DNS database. This is supplied by ACM."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsAcmDomainValidation: + kind: ClassVar[str] = "aws_acm_domain_validation" + mapping: ClassVar[Dict[str, Bender]] = { + "domain_name": S("DomainName"), + "validation_emails": S("ValidationEmails", default=[]), + "validation_domain": S("ValidationDomain"), + "validation_status": S("ValidationStatus"), + "resource_record": S("ResourceRecord") >> Bend(AwsAcmResourceRecord.mapping), + "validation_method": S("ValidationMethod"), + } + domain_name: Optional[str] = field(default=None, metadata={"description": "A fully qualified domain name (FQDN) in the certificate. For example, www.example.com or example.com."}) # fmt: skip + validation_emails: Optional[List[str]] = field(default=None, metadata={"description": "A list of email addresses that ACM used to send domain validation emails."}) # fmt: skip + validation_domain: Optional[str] = field(default=None, metadata={"description": "The domain name that ACM used to send domain validation emails."}) # fmt: skip + validation_status: Optional[str] = field(default=None, metadata={"description": "The validation status of the domain name."}) # fmt: skip + resource_record: Optional[AwsAcmResourceRecord] = field(default=None, metadata={"description": "Contains the CNAME record that you add to your DNS database for domain validation."}) # fmt: skip + validation_method: Optional[str] = field(default=None, metadata={"description": "Specifies the domain validation method."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsAcmRenewalSummary: + kind: ClassVar[str] = "aws_acm_renewal_summary" + mapping: ClassVar[Dict[str, Bender]] = { + "renewal_status": S("RenewalStatus"), + "domain_validation_options": S("DomainValidationOptions", default=[]) + >> ForallBend(AwsAcmDomainValidation.mapping), + "renewal_status_reason": S("RenewalStatusReason"), + "updated_at": S("UpdatedAt"), + } + renewal_status: Optional[str] = field(default=None, metadata={"description": "The status of ACM's managed renewal of the certificate."}) # fmt: skip + domain_validation_options: Optional[List[AwsAcmDomainValidation]] = field(factory=list, metadata={"description": "Contains information about the validation of each domain name in the certificate, as it pertains to ACM's managed renewal. This is different from the initial validation that occurs as a result of the RequestCertificate request. This field exists only when the certificate type is AMAZON_ISSUED."}) # fmt: skip + renewal_status_reason: Optional[str] = field(default=None, metadata={"description": "The reason that a renewal request was unsuccessful."}) # fmt: skip + updated_at: Optional[datetime] = field(default=None, metadata={"description": "The time at which the renewal summary was last updated."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsAcmExtendedKeyUsage: + kind: ClassVar[str] = "aws_acm_extended_key_usage" + mapping: ClassVar[Dict[str, Bender]] = {"name": S("Name"), "oid": S("OID")} + name: Optional[str] = field(default=None, metadata={"description": "The name of an Extended Key Usage value."}) # fmt: skip + oid: Optional[str] = field(default=None, metadata={"description": "An object identifier (OID) for the extension value. OIDs are strings of numbers separated by periods. The following OIDs are defined in RFC 3280 and RFC 5280. 1.3.6.1.5.5.7.3.1 (TLS_WEB_SERVER_AUTHENTICATION) 1.3.6.1.5.5.7.3.2 (TLS_WEB_CLIENT_AUTHENTICATION) 1.3.6.1.5.5.7.3.3 (CODE_SIGNING) 1.3.6.1.5.5.7.3.4 (EMAIL_PROTECTION) 1.3.6.1.5.5.7.3.8 (TIME_STAMPING) 1.3.6.1.5.5.7.3.9 (OCSP_SIGNING) 1.3.6.1.5.5.7.3.5 (IPSEC_END_SYSTEM) 1.3.6.1.5.5.7.3.6 (IPSEC_TUNNEL) 1.3.6.1.5.5.7.3.7 (IPSEC_USER)"}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsAcmCertificate(AwsResource): + kind: ClassVar[str] = "aws_acm_certificate" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("acm", "describe-certificate", "Certificate") + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("DomainName"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("DomainName"), + "ctime": S("CreatedAt"), + "arn": S("CertificateArn"), + "subject_alternative_names": S("SubjectAlternativeNames", default=[]), + "domain_validation_options": S("DomainValidationOptions", default=[]) + >> ForallBend(AwsAcmDomainValidation.mapping), + "serial": S("Serial"), + "subject": S("Subject"), + "issuer": S("Issuer"), + "issued_at": S("IssuedAt"), + "imported_at": S("ImportedAt"), + "status": S("Status"), + "revoked_at": S("RevokedAt"), + "revocation_reason": S("RevocationReason"), + "not_before": S("NotBefore"), + "not_after": S("NotAfter"), + "key_algorithm": S("KeyAlgorithm"), + "signature_algorithm": S("SignatureAlgorithm"), + "in_use_by": S("InUseBy", default=[]), + "failure_reason": S("FailureReason"), + "type": S("Type"), + "renewal_summary": S("RenewalSummary") >> Bend(AwsAcmRenewalSummary.mapping), + "key_usages": S("KeyUsages", default=[]) >> ForallBend(S("Name")), + "extended_key_usages": S("ExtendedKeyUsages", default=[]) >> ForallBend(AwsAcmExtendedKeyUsage.mapping), + "certificate_authority_arn": S("CertificateAuthorityArn"), + "renewal_eligibility": S("RenewalEligibility"), + "certificate_transparency_logging": S("Options", "CertificateTransparencyLoggingPreference"), + } + subject_alternative_names: Optional[List[str]] = field(factory=list, metadata={"description": "One or more domain names (subject alternative names) included in the certificate. This list contains the domain names that are bound to the public key that is contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website."}) # fmt: skip + domain_validation_options: Optional[List[AwsAcmDomainValidation]] = field(factory=list, metadata={"description": "Contains information about the initial validation of each domain name that occurs as a result of the RequestCertificate request. This field exists only when the certificate type is AMAZON_ISSUED."}) # fmt: skip + serial: Optional[str] = field(default=None, metadata={"description": "The serial number of the certificate."}) # fmt: skip + subject: Optional[str] = field(default=None, metadata={"description": "The name of the entity that is associated with the public key contained in the certificate."}) # fmt: skip + issuer: Optional[str] = field(default=None, metadata={"description": "The name of the certificate authority that issued and signed the certificate."}) # fmt: skip + issued_at: Optional[datetime] = field(default=None, metadata={"description": "The time at which the certificate was issued. This value exists only when the certificate type is AMAZON_ISSUED."}) # fmt: skip + imported_at: Optional[datetime] = field(default=None, metadata={"description": "The date and time when the certificate was imported. This value exists only when the certificate type is IMPORTED."}) # fmt: skip + status: Optional[str] = field(default=None, metadata={"description": "The status of the certificate. A certificate enters status PENDING_VALIDATION upon being requested, unless it fails for any of the reasons given in the troubleshooting topic Certificate request fails. ACM makes repeated attempts to validate a certificate for 72 hours and then times out. If a certificate shows status FAILED or VALIDATION_TIMED_OUT, delete the request, correct the issue with DNS validation or Email validation, and try again. If validation succeeds, the certificate enters status ISSUED."}) # fmt: skip + revoked_at: Optional[datetime] = field(default=None, metadata={"description": "The time at which the certificate was revoked. This value exists only when the certificate status is REVOKED."}) # fmt: skip + revocation_reason: Optional[str] = field(default=None, metadata={"description": "The reason the certificate was revoked. This value exists only when the certificate status is REVOKED."}) # fmt: skip + not_before: Optional[datetime] = field(default=None, metadata={"description": "The time before which the certificate is not valid."}) # fmt: skip + not_after: Optional[datetime] = field(default=None, metadata={"description": "The time after which the certificate is not valid."}) # fmt: skip + key_algorithm: Optional[str] = field(default=None, metadata={"description": "The algorithm that was used to generate the public-private key pair."}) # fmt: skip + signature_algorithm: Optional[str] = field(default=None, metadata={"description": "The algorithm that was used to sign the certificate."}) # fmt: skip + in_use_by: Optional[List[str]] = field(factory=list, metadata={"description": "A list of ARNs for the Amazon Web Services resources that are using the certificate. A certificate can be used by multiple Amazon Web Services resources."}) # fmt: skip + failure_reason: Optional[str] = field(default=None, metadata={"description": "The reason the certificate request failed. This value exists only when the certificate status is FAILED. For more information, see Certificate Request Failed in the Certificate Manager User Guide."}) # fmt: skip + type: Optional[str] = field(default=None, metadata={"description": "The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Certificate Manager User Guide."}) # fmt: skip + renewal_summary: Optional[AwsAcmRenewalSummary] = field(default=None, metadata={"description": "Contains information about the status of ACM's managed renewal for the certificate. This field exists only when the certificate type is AMAZON_ISSUED."}) # fmt: skip + key_usages: Optional[List[str]] = field(factory=list, metadata={"description": "A list of Key Usage X.509 v3 extension objects. Each object is a string value that identifies the purpose of the public key contained in the certificate. Possible extension values include DIGITAL_SIGNATURE, KEY_ENCHIPHERMENT, NON_REPUDIATION, and more."}) # fmt: skip + extended_key_usages: Optional[List[AwsAcmExtendedKeyUsage]] = field(factory=list, metadata={"description": "Contains a list of Extended Key Usage X.509 v3 extension objects. Each object specifies a purpose for which the certificate public key can be used and consists of a name and an object identifier (OID)."}) # fmt: skip + certificate_authority_arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the private certificate authority (CA) that issued the certificate. This has the following format: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012"}) # fmt: skip + renewal_eligibility: Optional[str] = field(default=None, metadata={"description": "Specifies whether the certificate is eligible for renewal. At this time, only exported private certificates can be renewed with the RenewCertificate command."}) # fmt: skip + certificate_transparency_logging: Optional[str] = field(default=None, metadata={"description": "Value that specifies whether to add the certificate to a transparency log. Certificate transparency makes it possible to detect SSL certificates that have been mistakenly or maliciously issued. A browser might respond to certificate that has not been logged by showing an error message. The logs are cryptographically secure."}) # fmt: skip + + @classmethod + def collect_resources(cls: Type[AwsResource], builder: GraphBuilder) -> None: + def fetch_certificate(arn: str) -> None: + with builder.suppress(f"{service_name}.describe-certificate"): + if res := builder.client.get(service_name, "describe-certificate", "Certificate", CertificateArn=arn): + AwsAcmCertificate.collect([res], builder) + + # Default behavior: in case the class has an ApiSpec, call the api and call collect. + log.debug(f"Collecting {cls.__name__} in region {builder.region.name}") + try: + for item in builder.client.list( + aws_service=service_name, action="list-certificates", result_name="CertificateSummaryList" + ): + builder.submit_work(service_name, fetch_certificate, item["CertificateArn"]) + except Boto3Error as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [AwsApiSpec(service_name, "list-certificates"), cls.api_spec] + + +resources: List[Type[AwsResource]] = [AwsAcmCertificate] diff --git a/plugins/aws/resoto_plugin_aws/resource/apigateway.py b/plugins/aws/resoto_plugin_aws/resource/apigateway.py index 411737d528..ca9a7fc386 100644 --- a/plugins/aws/resoto_plugin_aws/resource/apigateway.py +++ b/plugins/aws/resoto_plugin_aws/resource/apigateway.py @@ -62,7 +62,7 @@ def service_name(cls) -> str: @define(eq=False, slots=False) class AwsApiGatewayMethodResponse: - kind: ClassVar[str] = "aws_api_gateway_method_response" + kind: ClassVar[str] = "aws_apigateway_method_response" kind_display: ClassVar[str] = "AWS API Gateway Method Response" kind_description: ClassVar[str] = ( "API Gateway Method Response allows users to define the response parameters" @@ -81,7 +81,7 @@ class AwsApiGatewayMethodResponse: @define(eq=False, slots=False) class AwsApiGatewayIntegrationResponse: - kind: ClassVar[str] = "aws_api_gateway_integration_response" + kind: ClassVar[str] = "aws_apigateway_integration_response" kind_display: ClassVar[str] = "AWS API Gateway Integration Response" kind_description: ClassVar[str] = ( "API Gateway Integration Response is used to define the response structure" @@ -103,7 +103,7 @@ class AwsApiGatewayIntegrationResponse: @define(eq=False, slots=False) class AwsApiGatewayIntegration: - kind: ClassVar[str] = "aws_api_gateway_integration" + kind: ClassVar[str] = "aws_apigateway_integration" kind_display: ClassVar[str] = "AWS API Gateway Integration" kind_description: ClassVar[str] = ( "API Gateway Integration is a feature provided by AWS API Gateway that allows" @@ -146,7 +146,7 @@ class AwsApiGatewayIntegration: @define(eq=False, slots=False) class AwsApiGatewayMethod: - kind: ClassVar[str] = "aws_api_gateway_method" + kind: ClassVar[str] = "aws_apigateway_method" kind_display: ClassVar[str] = "AWS API Gateway Method" kind_description: ClassVar[str] = ( "AWS API Gateway Method allows users to define the individual methods that" @@ -182,13 +182,13 @@ class AwsApiGatewayMethod: @define(eq=False, slots=False) class AwsApiGatewayResource(AwsResource): # collection of resource resources happens in AwsApiGatewayRestApi.collect() - kind: ClassVar[str] = "aws_api_gateway_resource" + kind: ClassVar[str] = "aws_apigateway_resource" kind_display: ClassVar[str] = "AWS API Gateway Resource" kind_description: ClassVar[str] = ( "API Gateway Resource is a logical unit used in API Gateway to represent a" " part of an API's resource hierarchy." ) - reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["aws_api_gateway_authorizer"]}} + reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["aws_apigateway_authorizer"]}} mapping: ClassVar[Dict[str, Bender]] = { "id": S("id"), "resource_parent_id": S("parentId"), @@ -234,7 +234,7 @@ def service_name(cls) -> str: @define(eq=False, slots=False) class AwsApiGatewayAuthorizer(AwsResource): # collection of authorizer resources happens in AwsApiGatewayRestApi.collect() - kind: ClassVar[str] = "aws_api_gateway_authorizer" + kind: ClassVar[str] = "aws_apigateway_authorizer" kind_display: ClassVar[str] = "AWS API Gateway Authorizer" kind_description: ClassVar[str] = ( "API Gateway Authorizers are mechanisms that help control access to APIs" @@ -303,7 +303,7 @@ def service_name(cls) -> str: @define(eq=False, slots=False) class AwsApiGatewayCanarySetting: - kind: ClassVar[str] = "aws_api_gateway_canary_setting" + kind: ClassVar[str] = "aws_apigateway_canary_setting" kind_display: ClassVar[str] = "AWS API Gateway Canary Setting" kind_description: ClassVar[str] = ( "API Gateway Canary Setting is a feature in AWS API Gateway that allows you" @@ -325,7 +325,7 @@ class AwsApiGatewayCanarySetting: @define(eq=False, slots=False) class AwsApiGatewayStage(ApiGatewayTaggable, AwsResource): # collection of stage resources happens in AwsApiGatewayRestApi.collect() - kind: ClassVar[str] = "aws_api_gateway_stage" + kind: ClassVar[str] = "aws_apigateway_stage" kind_display: ClassVar[str] = "AWS API Gateway Stage" kind_description: ClassVar[str] = ( "API Gateway Stages are environment configurations for deploying and managing" @@ -386,14 +386,14 @@ def called_mutator_apis(cls) -> List[AwsApiSpec]: @define(eq=False, slots=False) class AwsApiGatewayDeployment(AwsResource): # collection of deployment resources happens in AwsApiGatewayRestApi.collect() - kind: ClassVar[str] = "aws_api_gateway_deployment" + kind: ClassVar[str] = "aws_apigateway_deployment" kind_display: ClassVar[str] = "AWS API Gateway Deployment" kind_description: ClassVar[str] = ( "API Gateway Deployments represents a deployment of an API to an API Gateway stage." " This allows the API to be invocable by end-users." ) - # edge to aws_api_gateway_stage is established in AwsApiGatewayRestApi.collect() - reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["aws_api_gateway_stage"]}} + # edge to aws_apigateway_stage is established in AwsApiGatewayRestApi.collect() + reference_kinds: ClassVar[ModelReference] = {"successors": {"default": ["aws_apigateway_stage"]}} mapping: ClassVar[Dict[str, Bender]] = { "id": S("id"), @@ -428,7 +428,7 @@ def service_name(cls) -> str: @define(eq=False, slots=False) class AwsApiGatewayEndpointConfiguration: - kind: ClassVar[str] = "aws_api_gateway_endpoint_configuration" + kind: ClassVar[str] = "aws_apigateway_endpoint_configuration" kind_display: ClassVar[str] = "AWS API Gateway Endpoint Configuration" kind_description: ClassVar[str] = ( "API Gateway Endpoint Configuration is a configuration that defines the" @@ -445,7 +445,7 @@ class AwsApiGatewayEndpointConfiguration: @define(eq=False, slots=False) class AwsApiGatewayRestApi(ApiGatewayTaggable, AwsResource): - kind: ClassVar[str] = "aws_api_gateway_rest_api" + kind: ClassVar[str] = "aws_apigateway_rest_api" kind_display: ClassVar[str] = "AWS API Gateway REST API" kind_description: ClassVar[str] = ( "API Gateway is a fully managed service that makes it easy for developers to" @@ -458,9 +458,9 @@ class AwsApiGatewayRestApi(ApiGatewayTaggable, AwsResource): "successors": { "default": [ "aws_vpc_endpoint", - "aws_api_gateway_deployment", - "aws_api_gateway_authorizer", - "aws_api_gateway_resource", + "aws_apigateway_deployment", + "aws_apigateway_authorizer", + "aws_apigateway_resource", ], "delete": ["aws_vpc_endpoint"], } @@ -581,7 +581,7 @@ def delete_resource(self, client: AwsClient, graph: Graph) -> bool: @define(eq=False, slots=False) class AwsApiGatewayMutualTlsAuthentication: - kind: ClassVar[str] = "aws_api_gateway_mutual_tls_authentication" + kind: ClassVar[str] = "aws_apigateway_mutual_tls_authentication" kind_display: ClassVar[str] = "AWS API Gateway Mutual TLS Authentication" kind_description: ClassVar[str] = ( "API Gateway Mutual TLS Authentication enables mutual TLS authentication for" @@ -600,7 +600,7 @@ class AwsApiGatewayMutualTlsAuthentication: @define(eq=False, slots=False) class AwsApiGatewayDomainName(ApiGatewayTaggable, AwsResource): - kind: ClassVar[str] = "aws_api_gateway_domain_name" + kind: ClassVar[str] = "aws_apigateway_domain_name" kind_display: ClassVar[str] = "AWS API Gateway Domain Name" kind_description: ClassVar[str] = ( "API Gateway Domain Name is a custom domain name that you can associate with" diff --git a/plugins/aws/resoto_plugin_aws/resource/autoscaling.py b/plugins/aws/resoto_plugin_aws/resource/autoscaling.py index a1839f15f6..693124bbff 100644 --- a/plugins/aws/resoto_plugin_aws/resource/autoscaling.py +++ b/plugins/aws/resoto_plugin_aws/resource/autoscaling.py @@ -3,7 +3,7 @@ from attrs import define, field from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder -from resoto_plugin_aws.resource.ec2 import AwsEc2Instance +from resoto_plugin_aws.resource.ec2 import AwsEc2Instance, AwsEc2LaunchTemplate from resoto_plugin_aws.utils import ToDict from resotolib.baseresources import BaseAutoScalingGroup, ModelReference from resotolib.graph import Graph @@ -284,7 +284,7 @@ class AwsAutoScalingGroup(AwsResource, BaseAutoScalingGroup): ) api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-auto-scaling-groups", "AutoScalingGroups") reference_kinds: ClassVar[ModelReference] = { - "successors": {"default": ["aws_ec2_instance"]}, + "successors": {"default": ["aws_ec2_instance", "aws_ec2_launch_template"]}, "predecessors": {"delete": ["aws_ec2_instance"]}, } mapping: ClassVar[Dict[str, Bender]] = { @@ -357,6 +357,8 @@ class AwsAutoScalingGroup(AwsResource, BaseAutoScalingGroup): def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: for instance in self.autoscaling_instances: builder.dependant_node(self, clazz=AwsEc2Instance, id=instance.instance_id) + if (tpl := self.autoscaling_launch_template) and (tid := tpl.launch_template_id): + builder.add_edge(self, clazz=AwsEc2LaunchTemplate, id=tid) def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool: client.call( diff --git a/plugins/aws/resoto_plugin_aws/resource/base.py b/plugins/aws/resoto_plugin_aws/resource/base.py index acf0c0bf4a..cbffbfd3f6 100644 --- a/plugins/aws/resoto_plugin_aws/resource/base.py +++ b/plugins/aws/resoto_plugin_aws/resource/base.py @@ -27,7 +27,7 @@ ModelReference, ) from resotolib.config import Config, current_config -from resotolib.core.actions import CoreFeedback +from resotolib.core.actions import CoreFeedback, SuppressWithFeedback from resotolib.graph import ByNodeId, BySearchCriteria, EdgeKey, Graph, NodeSelector from resotolib.json import from_json, value_in_path from resotolib.json_bender import Bender, bend @@ -49,17 +49,21 @@ def get_client(config: Config, resource: BaseResource) -> AwsClient: T = TypeVar("T") -def parse_json(json: Json, clazz: Type[T], builder: GraphBuilder) -> Optional[T]: +def parse_json( + json: Json, clazz: Type[T], builder: GraphBuilder, mapping: Optional[Dict[str, Bender]] = None +) -> Optional[T]: """ Use this method to parse json into a class. If the json can not be parsed, the error is reported to the core. Based on configuration, either the exception is raised or None is returned. :param json: the json to parse. :param clazz: the class to parse into. - :param builder: the graph builder. + :param builder: the graph builder. + :param mapping: the optional mapping to apply before parsing. :return: The parsed object or None. """ try: - return from_json(json, clazz) + mapped = bend(mapping, json) if mapping is not None else json + return from_json(mapped, clazz) except Exception as e: # report and log the error builder.core_feedback.error(f"Failed to parse json into {clazz.__name__}: {e}. Source: {json}", log) @@ -173,8 +177,7 @@ def id_from_arn(arn: str) -> str: @classmethod def from_api(cls: Type[AwsResourceType], json: Json, builder: GraphBuilder) -> Optional[AwsResourceType]: - mapped = bend(cls.mapping, json) - return parse_json(mapped, cls, builder) + return parse_json(json, cls, builder, cls.mapping) @classmethod def collect_resources(cls: Type[AwsResource], builder: GraphBuilder) -> None: @@ -408,6 +411,9 @@ def __init__( self.metrics_start = start self.metrics_delta = delta + def suppress(self, message: str) -> SuppressWithFeedback: + return SuppressWithFeedback(message, self.core_feedback, log) + def submit_work(self, service: str, fn: Callable[..., T], *args: Any, **kwargs: Any) -> Future[T]: """ Use this method for work that can be done in parallel. diff --git a/plugins/aws/resoto_plugin_aws/resource/cloudfront.py b/plugins/aws/resoto_plugin_aws/resource/cloudfront.py index 28966e20e3..7b5c514b8e 100644 --- a/plugins/aws/resoto_plugin_aws/resource/cloudfront.py +++ b/plugins/aws/resoto_plugin_aws/resource/cloudfront.py @@ -2,12 +2,15 @@ from typing import ClassVar, Dict, List, Optional, Type from attr import define, field +from boto3.exceptions import Boto3Error from resoto_plugin_aws.aws_client import AwsClient +from resoto_plugin_aws.resource.acm import AwsAcmCertificate from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder from resoto_plugin_aws.resource.iam import AwsIamServerCertificate from resoto_plugin_aws.resource.lambda_ import AwsLambdaFunction from resoto_plugin_aws.resource.s3 import AwsS3Bucket +from resoto_plugin_aws.resource.waf import AwsWafWebACL from resoto_plugin_aws.utils import ToDict from resotolib.baseresources import ModelReference from resotolib.graph import Graph @@ -33,8 +36,8 @@ def add_tags(res: AwsResource) -> None: builder.submit_work(service_name, add_tags, instance) @staticmethod - def delete_cloudfront_resource(client: AwsClient, resource: str, id: str) -> bool: - description = client.get(service_name, f"get-{resource}", None, None, Id=id) + def delete_cloudfront_resource(client: AwsClient, resource: str, rid: str) -> bool: + description = client.get(service_name, f"get-{resource}", None, None, Id=rid) if description: etag = description.get("ETag", None) if etag: @@ -42,7 +45,7 @@ def delete_cloudfront_resource(client: AwsClient, resource: str, id: str) -> boo aws_service=service_name, action=f"delete-{resource}", result_name=None, - Id=id, + Id=rid, IfMatch=etag, ) return True @@ -192,7 +195,7 @@ class AwsCloudFrontOriginGroupMembers: mapping: ClassVar[Dict[str, Bender]] = { "origin_id": S("OriginId"), } - members: Optional[str] = field(default=None) + origin_id: Optional[str] = field(default=None) @define(eq=False, slots=False) @@ -303,7 +306,7 @@ class AwsCloudFrontDefaultCacheBehavior: "compress": S("Compress"), "lambda_function_associations": S("LambdaFunctionAssociations", "Items", default=[]) >> ForallBend(AwsCloudFrontLambdaFunctionAssociation.mapping), - "function_association": S("FunctionAssociations", "Items", default=[]) + "function_associations": S("FunctionAssociations", "Items", default=[]) >> ForallBend(AwsCloudFrontFunctionAssociation.mapping), "field_level_encryption_id": S("FieldLevelEncryptionId"), "realtime_log_config_arn": S("RealtimeLogConfigArn"), @@ -322,8 +325,8 @@ class AwsCloudFrontDefaultCacheBehavior: allowed_methods: List[str] = field(factory=list) smooth_streaming: Optional[bool] = field(default=None) compress: Optional[bool] = field(default=None) - lambda_function_association: List[AwsCloudFrontLambdaFunctionAssociation] = field(factory=list) - function_association: List[AwsCloudFrontFunctionAssociation] = field(factory=list) + lambda_function_associations: List[AwsCloudFrontLambdaFunctionAssociation] = field(factory=list) + function_associations: List[AwsCloudFrontFunctionAssociation] = field(factory=list) field_level_encryption_id: Optional[str] = field(default=None) realtime_log_config_arn: Optional[str] = field(default=None) cache_policy_id: Optional[str] = field(default=None) @@ -352,9 +355,9 @@ class AwsCloudFrontCacheBehavior: "allowed_methods": S("AllowedMethods", "Items", default=[]), "smooth_streaming": S("SmoothStreaming"), "compress": S("Compress"), - "lambda_function_association": S("LambdaFunctionAssociations", "Items", default=[]) + "lambda_function_associations": S("LambdaFunctionAssociations", "Items", default=[]) >> ForallBend(AwsCloudFrontLambdaFunctionAssociation.mapping), - "function_association": S("FunctionAssociations", "Items", default=[]) + "function_associations": S("FunctionAssociations", "Items", default=[]) >> ForallBend(AwsCloudFrontFunctionAssociation.mapping), "field_level_encryption_id": S("FieldLevelEncryptionId"), "realtime_log_config_arn": S("RealtimeLogConfigArn"), @@ -374,8 +377,8 @@ class AwsCloudFrontCacheBehavior: allowed_methods: List[str] = field(factory=list) smooth_streaming: Optional[bool] = field(default=None) compress: Optional[bool] = field(default=None) - lambda_function_association: List[AwsCloudFrontLambdaFunctionAssociation] = field(factory=list) - function_association: List[AwsCloudFrontFunctionAssociation] = field(factory=list) + lambda_function_associations: List[AwsCloudFrontLambdaFunctionAssociation] = field(factory=list) + function_associations: List[AwsCloudFrontFunctionAssociation] = field(factory=list) field_level_encryption_id: Optional[str] = field(default=None) realtime_log_config_arn: Optional[str] = field(default=None) cache_policy_id: Optional[str] = field(default=None) @@ -462,76 +465,213 @@ class AwsCloudFrontAliasICPRecordal: icp_recordal_status: Optional[str] = field(default=None) +@define(eq=False, slots=False) +class AwsCloudFrontSigner: + kind: ClassVar[str] = "aws_cloudfront_signer" + mapping: ClassVar[Dict[str, Bender]] = { + "aws_account_number": S("AwsAccountNumber"), + "key_pair_ids": S("KeyPairIds", "Items"), + } + aws_account_number: Optional[str] = field(default=None, metadata={"description": "An Amazon Web Services account number that contains active CloudFront key pairs that CloudFront can use to verify the signatures of signed URLs and signed cookies. If the Amazon Web Services account that owns the key pairs is the same account that owns the CloudFront distribution, the value of this field is self."}) # fmt: skip + key_pair_ids: Optional[List[str]] = field(default=None, metadata={"description": "A list of CloudFront key pair identifiers."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontActiveTrustedSigners: + kind: ClassVar[str] = "aws_cloudfront_active_trusted_signers" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "quantity": S("Quantity"), + "items": S("Items", default=[]) >> ForallBend(AwsCloudFrontSigner.mapping), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "This field is true if any of the Amazon Web Services accounts in the list are configured as trusted signers. If not, this field is false."}) # fmt: skip + quantity: Optional[int] = field(default=None, metadata={"description": "The number of Amazon Web Services accounts in the list."}) # fmt: skip + items: Optional[List[AwsCloudFrontSigner]] = field(factory=list, metadata={"description": "A list of Amazon Web Services accounts and the identifiers of active CloudFront key pairs in each account that CloudFront can use to verify the signatures of signed URLs and signed cookies."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontKGKeyPairIds: + kind: ClassVar[str] = "aws_cloudfront_kg_key_pair_ids" + mapping: ClassVar[Dict[str, Bender]] = { + "key_group_id": S("KeyGroupId"), + "key_pair_ids": S("KeyPairIds", "Items"), + } + key_group_id: Optional[str] = field(default=None, metadata={"description": "The identifier of the key group that contains the public keys."}) # fmt: skip + key_pair_ids: Optional[List[str]] = field(default=None, metadata={"description": "A list of CloudFront key pair identifiers."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontActiveTrustedKeyGroups: + kind: ClassVar[str] = "aws_cloudfront_active_trusted_key_groups" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "quantity": S("Quantity"), + "items": S("Items", default=[]) >> ForallBend(AwsCloudFrontKGKeyPairIds.mapping), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "This field is true if any of the key groups have public keys that CloudFront can use to verify the signatures of signed URLs and signed cookies. If not, this field is false."}) # fmt: skip + quantity: Optional[int] = field(default=None, metadata={"description": "The number of key groups in the list."}) # fmt: skip + items: Optional[List[AwsCloudFrontKGKeyPairIds]] = field(factory=list, metadata={"description": "A list of key groups, including the identifiers of the public keys in each key group that CloudFront can use to verify the signatures of signed URLs and signed cookies."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontAllowedMethods: + kind: ClassVar[str] = "aws_cloudfront_allowed_methods" + mapping: ClassVar[Dict[str, Bender]] = { + "quantity": S("Quantity"), + "items": S("Items", default=[]), + "cached_methods": S("CachedMethods", "Items"), + } + quantity: Optional[int] = field(default=None, metadata={"description": "The number of HTTP methods that you want CloudFront to forward to your origin. Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD, and OPTIONS requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests)."}) # fmt: skip + items: Optional[List[str]] = field(factory=list, metadata={"description": "A complex type that contains the HTTP methods that you want CloudFront to process and forward to your origin."}) # fmt: skip + cached_methods: Optional[List[str]] = field(default=None, metadata={"description": "A complex type that controls whether CloudFront caches the response to requests using the specified HTTP methods. There are two choices: CloudFront caches responses to GET and HEAD requests. CloudFront caches responses to GET, HEAD, and OPTIONS requests."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontLoggingConfig: + kind: ClassVar[str] = "aws_cloudfront_logging_config" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "include_cookies": S("IncludeCookies"), + "bucket": S("Bucket"), + "prefix": S("Prefix"), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't want to enable logging when you create a distribution or if you want to disable logging for an existing distribution, specify false for Enabled, and specify empty Bucket and Prefix elements."}) # fmt: skip + include_cookies: Optional[bool] = field(default=None, metadata={"description": "Specifies whether you want CloudFront to include cookies in access logs, specify true for IncludeCookies. If you choose to include cookies in logs, CloudFront logs all cookies regardless of how you configure the cache behaviors for this distribution."}) # fmt: skip + bucket: Optional[str] = field(default=None, metadata={"description": "The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com."}) # fmt: skip + prefix: Optional[str] = field(default=None, metadata={"description": "An optional string that you want CloudFront to prefix to the access log filenames for this distribution, for example, myprefix/. If you want to enable logging, but you don't want to specify a prefix, you still must include an empty Prefix element in the Logging element."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudFrontDistributionConfig: + kind: ClassVar[str] = "aws_cloudfront_distribution_config" + mapping: ClassVar[Dict[str, Bender]] = { + "caller_reference": S("CallerReference"), + "aliases": S("Aliases", "Items"), + "default_root_object": S("DefaultRootObject"), + "origins": S("Origins", "Items", default=[]) >> ForallBend(AwsCloudFrontOrigin.mapping), + "origin_groups": S("OriginGroups", "Items", default=[]) >> ForallBend(AwsCloudFrontOriginGroup.mapping), + "default_cache_behavior": S("DefaultCacheBehavior") >> Bend(AwsCloudFrontDefaultCacheBehavior.mapping), + "cache_behaviors": S("CacheBehaviors", "Items", default=[]) >> ForallBend(AwsCloudFrontCacheBehavior.mapping), + "custom_error_responses": S("CustomErrorResponses", "Items", default=[]) + >> ForallBend(AwsCloudFrontCustomErrorResponse.mapping), + "comment": S("Comment"), + "logging": S("Logging") >> Bend(AwsCloudFrontLoggingConfig.mapping), + "price_class": S("PriceClass"), + "enabled": S("Enabled"), + "viewer_certificate": S("ViewerCertificate") >> Bend(AwsCloudFrontViewerCertificate.mapping), + "restrictions": S("Restrictions") >> Bend(AwsCloudFrontRestrictions.mapping), + "web_acl_id": S("WebACLId"), + "http_version": S("HttpVersion"), + "is_ipv6_enabled": S("IsIPV6Enabled"), + "continuous_deployment_policy_id": S("ContinuousDeploymentPolicyId"), + "staging": S("Staging"), + } + caller_reference: Optional[str] = field(default=None, metadata={"description": "A unique value (for example, a date-time stamp) that ensures that the request can't be replayed. If the value of CallerReference is new (regardless of the content of the DistributionConfig object), CloudFront creates a new distribution."}) # fmt: skip + aliases: Optional[List[str]] = field(default=None, metadata={"description": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution."}) # fmt: skip + default_root_object: Optional[str] = field(default=None, metadata={"description": "The object that you want CloudFront to request from your origin (for example, index.html) when a viewer requests the root URL for your distribution (https://www.example.com) instead of an object in your distribution (https://www.example.com/product-description.html)."}) # fmt: skip + origins: Optional[List[AwsCloudFrontOrigin]] = field(default=None, metadata={"description": "A complex type that contains information about origins for this distribution."}) # fmt: skip + origin_groups: Optional[List[AwsCloudFrontOriginGroup]] = field(default=None, metadata={"description": "A complex type that contains information about origin groups for this distribution."}) # fmt: skip + default_cache_behavior: Optional[AwsCloudFrontDefaultCacheBehavior] = field(default=None, metadata={"description": "A complex type that describes the default cache behavior if you don't specify a CacheBehavior element or if files don't match any of the values of PathPattern in CacheBehavior elements. You must create exactly one default cache behavior."}) # fmt: skip + cache_behaviors: Optional[List[AwsCloudFrontCacheBehavior]] = field(default=None, metadata={"description": "Optional: A complex type that contains cache behaviors for this distribution. If Quantity is 0, you can omit Items."}) # fmt: skip + custom_error_responses: Optional[List[AwsCloudFrontCustomErrorResponse]] = field(default=None, metadata={"description": "A complex type that contains a CustomErrorResponse element for each HTTP status code for which you want to specify a custom error page and/or a caching duration."}) # fmt: skip + comment: Optional[str] = field(default=None, metadata={"description": "A comment to describe the distribution. The comment cannot be longer than 128 characters."}) # fmt: skip + logging: Optional[AwsCloudFrontLoggingConfig] = field(default=None, metadata={"description": "A complex type that controls whether access logs are written for the distribution. For more information about logging, see Access Logs in the Amazon CloudFront Developer Guide."}) # fmt: skip + price_class: Optional[str] = field(default=None, metadata={"description": "The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify PriceClass_All, CloudFront responds to requests for your objects from all CloudFront edge locations."}) # fmt: skip + enabled: Optional[bool] = field(default=None, metadata={"description": "From this field, you can enable or disable the selected distribution."}) # fmt: skip + viewer_certificate: Optional[AwsCloudFrontViewerCertificate] = field(default=None, metadata={"description": "A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers."}) # fmt: skip + restrictions: Optional[AwsCloudFrontRestrictions] = field(default=None, metadata={"description": "A complex type that identifies ways in which you want to restrict distribution of your content."}) # fmt: skip + web_acl_id: Optional[str] = field(default=None, metadata={"description": "A unique identifier that specifies the WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a."}) # fmt: skip + http_version: Optional[str] = field(default=None, metadata={"description": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to communicate with CloudFront. The default value for new web distributions is http2. Viewers that don't support HTTP/2 automatically use an earlier HTTP version."}) # fmt: skip + is_ipv6_enabled: Optional[bool] = field(default=None, metadata={"description": "If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify true. If you specify false, CloudFront responds to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses."}) # fmt: skip + continuous_deployment_policy_id: Optional[str] = field(default=None, metadata={"description": "The identifier of a continuous deployment policy. For more information, see CreateContinuousDeploymentPolicy."}) # fmt: skip + staging: Optional[bool] = field(default=None, metadata={"description": "A Boolean that indicates whether this is a staging distribution. When this value is true, this is a staging distribution. When this value is false, this is not a staging distribution."}) # fmt: skip + + @define(eq=False, slots=False) class AwsCloudFrontDistribution(CloudFrontTaggable, CloudFrontResource, AwsResource): kind: ClassVar[str] = "aws_cloudfront_distribution" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("cloudfront", "get-distribution", "Distribution") kind_display: ClassVar[str] = "AWS CloudFront Distribution" kind_description: ClassVar[str] = ( "CloudFront Distributions are a content delivery network (CDN) offered by" " Amazon Web Services, which enables users to deliver their content to end-" " users with low latency and high transfer speeds." ) - api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-distributions", "DistributionList.Items") reference_kinds: ClassVar[ModelReference] = { "predecessors": {"delete": ["aws_lambda_function"]}, "successors": { "default": [ - "aws_lambda_function", - "aws_iam_server_certificate", + "aws_acm_certificate", + "aws_cloudfront_cache_policy", + "aws_cloudfront_field_level_encryption_config", "aws_cloudfront_function", + "aws_cloudfront_origin_access_control", "aws_cloudfront_realtime_log_config", - "aws_cloudfront_field_level_encryption_config", "aws_cloudfront_response_headers_policy", - "aws_cloudfront_cache_policy", - "aws_cloudfront_origin_access_control", + "aws_iam_server_certificate", + "aws_lambda_function", "aws_s3_bucket", + "aws_waf_web_acl", ] }, } mapping: ClassVar[Dict[str, Bender]] = { "id": S("Id"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("DomainName"), "mtime": S("LastModifiedTime"), "arn": S("ARN"), "distribution_status": S("Status"), - "distribution_domain_name": S("DomainName"), - "distribution_aliases": S("Aliases", "Items", default=[]), - "distribution_origin": S("Origins", "Items", default=[]) >> ForallBend(AwsCloudFrontOrigin.mapping), - "distribution_origin_group": S("OriginGroups", "Items", default=[]) - >> ForallBend(AwsCloudFrontOriginGroup.mapping), - "distribution_default_cache_behavior": S("DefaultCacheBehavior") - >> Bend(AwsCloudFrontDefaultCacheBehavior.mapping), - "distribution_cache_behavior": S("CacheBehaviors", "Items", default=[]) - >> ForallBend(AwsCloudFrontCacheBehavior.mapping), - "distribution_custom_error_response": S("CustomErrorResponses", "Items", default=[]) - >> ForallBend(AwsCloudFrontCustomErrorResponse.mapping), - "distribution_comment": S("Comment"), - "distribution_price_class": S("PriceClass"), - "distribution_enabled": S("Enabled"), - "distribution_viewer_certificate": S("ViewerCertificate") >> Bend(AwsCloudFrontViewerCertificate.mapping), - "distribution_restrictions": S("Restrictions") >> Bend(AwsCloudFrontRestrictions.mapping), - "distribution_web_acl_id": S("WebACLId"), - "distribution_http_version": S("HttpVersion"), - "distribution_is_ipv6_enabled": S("IsIPV6Enabled"), + "distribution_in_progress_invalidation_batches": S("InProgressInvalidationBatches"), + "distribution_active_trusted_signers": S("ActiveTrustedSigners") + >> Bend(AwsCloudFrontActiveTrustedSigners.mapping), + "distribution_active_trusted_key_groups": S("ActiveTrustedKeyGroups") + >> Bend(AwsCloudFrontActiveTrustedKeyGroups.mapping), + "distribution_config": S("DistributionConfig") >> Bend(AwsCloudFrontDistributionConfig.mapping), "distribution_alias_icp_recordals": S("AliasICPRecordals", default=[]) >> ForallBend(AwsCloudFrontAliasICPRecordal.mapping), } - distribution_status: Optional[str] = field(default=None) - distribution_domain_name: Optional[str] = field(default=None) - distribution_aliases: List[str] = field(factory=list) - distribution_origin: List[AwsCloudFrontOrigin] = field(factory=list) - distribution_origin_group: List[AwsCloudFrontOriginGroup] = field(factory=list) - distribution_default_cache_behavior: Optional[AwsCloudFrontDefaultCacheBehavior] = field(default=None) - distribution_cache_behavior: List[AwsCloudFrontCacheBehavior] = field(factory=list) - distribution_custom_error_response: List[AwsCloudFrontCustomErrorResponse] = field(factory=list) - distribution_comment: Optional[str] = field(default=None) - distribution_price_class: Optional[str] = field(default=None) - distribution_enabled: Optional[bool] = field(default=None) - distribution_viewer_certificate: Optional[AwsCloudFrontViewerCertificate] = field(default=None) - distribution_restrictions: Optional[AwsCloudFrontRestrictions] = field(default=None) - distribution_web_acl_id: Optional[str] = field(default=None) - distribution_http_version: Optional[str] = field(default=None) - distribution_is_ipv6_enabled: Optional[bool] = field(default=None) - distribution_alias_icp_recordals: List[AwsCloudFrontAliasICPRecordal] = field(factory=list) + distribution_status: Optional[str] = field(default=None, metadata={"description": "The distribution's status. When the status is Deployed, the distribution's information is fully propagated to all CloudFront edge locations."}) # fmt: skip + distribution_in_progress_invalidation_batches: Optional[int] = field(default=None, metadata={"description": "The number of invalidation batches currently in progress."}) # fmt: skip + distribution_active_trusted_signers: Optional[AwsCloudFrontActiveTrustedSigners] = field(default=None, metadata={"description": "We recommend using TrustedKeyGroups instead of TrustedSigners."}) # fmt: skip + distribution_active_trusted_key_groups: Optional[AwsCloudFrontActiveTrustedKeyGroups] = field(default=None, metadata={"description": "This field contains a list of key groups and the public keys in each key group that CloudFront can use to verify the signatures of signed URLs or signed cookies."}) # fmt: skip + distribution_config: Optional[AwsCloudFrontDistributionConfig] = field(default=None, metadata={"description": "The distribution's configuration."}) # fmt: skip + distribution_alias_icp_recordals: Optional[List[AwsCloudFrontAliasICPRecordal]] = field(factory=list, metadata={"description": "Amazon Web Services services in China customers must file for an Internet Content Provider (ICP) recordal if they want to serve content publicly on an alternate domain name, also known as a CNAME, that they've added to CloudFront."}) # fmt: skip + + @classmethod + def collect_resources(cls: Type[AwsResource], builder: GraphBuilder) -> None: + def fetch_distribution(did: str) -> None: + with builder.suppress(f"{service_name}.get-distribution"): + if js := builder.client.get(service_name, "get-distribution", "Distribution", Id=did): + AwsCloudFrontDistribution.collect([js], builder) + + # Default behavior: in case the class has an ApiSpec, call the api and call collect. + log.debug(f"Collecting {cls.__name__} in region {builder.region.name}") + try: + for item in builder.client.list( + aws_service=service_name, action="list-distributions", result_name="DistributionList.Items" + ): + builder.submit_work(service_name, fetch_distribution, item["Id"]) + if builder.config.collect_usage_metrics: + try: + cls.collect_usage_metrics(builder) + except Exception as e: + log.warning(f"Failed to collect usage metrics for {cls.__name__}: {e}") + except Boto3Error as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return super().called_mutator_apis() + [ + AwsApiSpec(service_name, "get-distribution"), + AwsApiSpec(service_name, "list-distributions"), + ] @classmethod def called_mutator_apis(cls) -> List[AwsApiSpec]: @@ -543,53 +683,58 @@ def called_mutator_apis(cls) -> List[AwsApiSpec]: ] def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: - # edges from default cache behaviour - if dcb := self.distribution_default_cache_behavior: - for a in dcb.lambda_function_association: - builder.dependant_node(self, clazz=AwsLambdaFunction, arn=a.lambda_function_arn) - for b in dcb.function_association: - builder.add_edge(self, clazz=AwsCloudFrontFunction, arn=b.function_arn) - if dcb.realtime_log_config_arn: - builder.add_edge(self, clazz=AwsCloudFrontRealtimeLogConfig, arn=dcb.realtime_log_config_arn) - if dcb.field_level_encryption_id: - builder.add_edge(self, clazz=AwsCloudFrontFieldLevelEncryptionConfig, id=dcb.field_level_encryption_id) - if dcb.response_headers_policy_id: - builder.add_edge(self, clazz=AwsCloudFrontResponseHeadersPolicy, id=dcb.response_headers_policy_id) - if dcb.cache_policy_id: - builder.add_edge(self, clazz=AwsCloudFrontCachePolicy, id=dcb.cache_policy_id) - - # edges from other cache behaviours - for cb_item in self.distribution_cache_behavior: - for c in cb_item.lambda_function_association: - builder.add_edge(self, clazz=AwsLambdaFunction, arn=c.lambda_function_arn) - for d in cb_item.function_association: - builder.add_edge(self, clazz=AwsCloudFrontFunction, arn=d.function_arn) - if cb_item.field_level_encryption_id: - builder.add_edge( - self, clazz=AwsCloudFrontFieldLevelEncryptionConfig, id=cb_item.field_level_encryption_id - ) - if cb_item.realtime_log_config_arn: - builder.add_edge(self, clazz=AwsCloudFrontRealtimeLogConfig, arn=cb_item.realtime_log_config_arn) - if cb_item.cache_policy_id: - builder.add_edge(self, clazz=AwsCloudFrontCachePolicy, id=cb_item.cache_policy_id) - if cb_item.response_headers_policy_id: - builder.add_edge(self, clazz=AwsCloudFrontResponseHeadersPolicy, id=cb_item.response_headers_policy_id) - - # other edges - if self.distribution_origin: - for entry in self.distribution_origin: + if cfg := self.distribution_config: + # edges from default cache behavior + if dcb := cfg.default_cache_behavior: + for a in dcb.lambda_function_associations: + builder.dependant_node(self, clazz=AwsLambdaFunction, arn=a.lambda_function_arn) + for b in dcb.function_associations: + builder.add_edge(self, clazz=AwsCloudFrontFunction, arn=b.function_arn) + if dcb.realtime_log_config_arn: + builder.add_edge(self, clazz=AwsCloudFrontRealtimeLogConfig, arn=dcb.realtime_log_config_arn) + if dcb.field_level_encryption_id: + builder.add_edge( + self, clazz=AwsCloudFrontFieldLevelEncryptionConfig, id=dcb.field_level_encryption_id + ) + if dcb.response_headers_policy_id: + builder.add_edge(self, clazz=AwsCloudFrontResponseHeadersPolicy, id=dcb.response_headers_policy_id) + if dcb.cache_policy_id: + builder.add_edge(self, clazz=AwsCloudFrontCachePolicy, id=dcb.cache_policy_id) + + # edges from other cache behaviors + for cb_item in cfg.cache_behaviors or []: + for c in cb_item.lambda_function_associations: + builder.add_edge(self, clazz=AwsLambdaFunction, arn=c.lambda_function_arn) + for d in cb_item.function_associations: + builder.add_edge(self, clazz=AwsCloudFrontFunction, arn=d.function_arn) + if cb_item.field_level_encryption_id: + builder.add_edge( + self, clazz=AwsCloudFrontFieldLevelEncryptionConfig, id=cb_item.field_level_encryption_id + ) + if cb_item.realtime_log_config_arn: + builder.add_edge(self, clazz=AwsCloudFrontRealtimeLogConfig, arn=cb_item.realtime_log_config_arn) + if cb_item.cache_policy_id: + builder.add_edge(self, clazz=AwsCloudFrontCachePolicy, id=cb_item.cache_policy_id) + if cb_item.response_headers_policy_id: + builder.add_edge( + self, clazz=AwsCloudFrontResponseHeadersPolicy, id=cb_item.response_headers_policy_id + ) + + # other edges + for entry in cfg.origins or []: builder.add_edge(self, clazz=AwsCloudFrontOriginAccessControl, id=entry.origin_access_control_id) builder.add_edge(self, clazz=AwsS3Bucket, name=entry.id) - if self.distribution_viewer_certificate and self.distribution_viewer_certificate.iam_certificate_id: - builder.add_edge( - self, clazz=AwsIamServerCertificate, id=self.distribution_viewer_certificate.iam_certificate_id - ) + if cfg.viewer_certificate and (cid := cfg.viewer_certificate.iam_certificate_id): + builder.add_edge(self, clazz=AwsIamServerCertificate, id=cid) + + if cfg.web_acl_id: + builder.add_edge(self, clazz=AwsWafWebACL, arn=cfg.web_acl_id) - # TODO edge to ACM certificate when applicable (via self.distribution_viewer_certificate.acm_certificate_arn) - # TODO edge to Web Acl when applicable (via self.distribution_web_acl_id) + if (cert := cfg.viewer_certificate) and (arn := cert.acm_certificate_arn): + builder.add_edge(self, clazz=AwsAcmCertificate, arn=arn) - def pre_delete_resource(self, client: AwsClient, graph: Graph) -> bool: + def pre_delete_resource(self, client: AwsClient, _: Graph) -> bool: dist_config = client.get(service_name, "get-distribution-config", None, None, Id=self.id) if dist_config: dist_config["DistributionConfig"]["Enabled"] = False diff --git a/plugins/aws/resoto_plugin_aws/resource/cloudtrail.py b/plugins/aws/resoto_plugin_aws/resource/cloudtrail.py index b9c3039301..3bd2c80c47 100644 --- a/plugins/aws/resoto_plugin_aws/resource/cloudtrail.py +++ b/plugins/aws/resoto_plugin_aws/resource/cloudtrail.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import ClassVar, Dict, Optional, Type, List -from attr import define, field as attrs_field +from attr import define, field as attrs_field, field from resoto_plugin_aws.aws_client import AwsClient from resoto_plugin_aws.resource.base import AwsApiSpec, GraphBuilder, AwsResource, parse_json @@ -11,57 +11,79 @@ from resoto_plugin_aws.resource.s3 import AwsS3Bucket from resoto_plugin_aws.resource.sns import AwsSnsTopic from resoto_plugin_aws.utils import ToDict +from resotolib.baseresources import ModelReference, EdgeType from resotolib.graph import Graph +from resotolib.json_bender import Bender, S, bend, ForallBend, EmptyToNone from resotolib.types import Json -from resotolib.baseresources import ModelReference, EdgeType -from resotolib.json_bender import Bender, S, bend, ForallBend, EmptyToNone, F service_name = "cloudtrail" +@define(eq=False, slots=False) +class AwsCloudTrailDataResource: + kind: ClassVar[str] = "aws_cloud_trail_data_resource" + mapping: ClassVar[Dict[str, Bender]] = {"type": S("Type"), "values": S("Values", default=[])} + type: Optional[str] = field(default=None, metadata={"description": "The resource type in which you want to log data events."}) # fmt: skip + values: Optional[List[str]] = field(factory=list, metadata={"description": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudTrailEventSelector: + kind: ClassVar[str] = "aws_cloud_trail_event_selector" + mapping: ClassVar[Dict[str, Bender]] = { + "read_write_type": S("ReadWriteType"), + "include_management_events": S("IncludeManagementEvents"), + "data_resources": S("DataResources", default=[]) >> ForallBend(AwsCloudTrailDataResource.mapping), + "exclude_management_event_sources": S("ExcludeManagementEventSources", default=[]), + } + read_write_type: Optional[str] = field(default=None, metadata={"description": "Specify if you want your trail to log read-only events, write-only events, or all."}) # fmt: skip + include_management_events: Optional[bool] = field(default=None, metadata={"description": "Specify if you want your event selector to include management events for your trail."}) # fmt: skip + data_resources: Optional[List[AwsCloudTrailDataResource]] = field(factory=list, metadata={"description": "CloudTrail supports data event logging for Amazon S3 objects, Lambda functions, and Amazon DynamoDB tables with basic event selectors."}) # fmt: skip + exclude_management_event_sources: Optional[List[str]] = field(factory=list, metadata={"description": "An optional list of service event sources from which you do not want management events to be logged on your trail."}) # fmt: skip + + @define(eq=False, slots=False) class AwsCloudTrailAdvancedFieldSelector: kind: ClassVar[str] = "aws_cloud_trail_advanced_field_selector" - kind_display: ClassVar[str] = "AWS CloudTrail Advanced Field Selector" - kind_description: ClassVar[str] = ( - "AWS CloudTrail Advanced Field Selector provides fine-grained control over" - " the fields returned in CloudTrail log events, allowing users to filter and" - " retrieve specific data of interest." - ) mapping: ClassVar[Dict[str, Bender]] = { - "field": S("Field"), - "equals": S("Equals"), - "starts_with": S("StartsWith"), - "ends_with": S("EndsWith"), - "not_equals": S("NotEquals"), - "not_starts_with": S("NotStartsWith"), - "not_ends_with": S("NotEndsWith"), + "selector_field": S("Field"), + "equals": S("Equals", default=[]), + "starts_with": S("StartsWith", default=[]), + "ends_with": S("EndsWith", default=[]), + "not_equals": S("NotEquals", default=[]), + "not_starts_with": S("NotStartsWith", default=[]), + "not_ends_with": S("NotEndsWith", default=[]), } - equals: Optional[List[str]] = attrs_field(default=None) - starts_with: Optional[List[str]] = attrs_field(default=None) - ends_with: Optional[List[str]] = attrs_field(default=None) - not_equals: Optional[List[str]] = attrs_field(default=None) - not_starts_with: Optional[List[str]] = attrs_field(default=None) - not_ends_with: Optional[List[str]] = attrs_field(default=None) + selector_field: Optional[str] = field(default=None, metadata={"description": "A field in a CloudTrail event record on which to filter events to be logged."}) # fmt: skip + equals: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that includes events that match the exact value of the event record field specified as the value of Field. This is the only valid operator that you can use with the readOnly, eventCategory, and resources.type fields."}) # fmt: skip + starts_with: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that includes events that match the first few characters of the event record field specified as the value of Field."}) # fmt: skip + ends_with: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that includes events that match the last few characters of the event record field specified as the value of Field."}) # fmt: skip + not_equals: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that excludes events that match the exact value of the event record field specified as the value of Field."}) # fmt: skip + not_starts_with: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that excludes events that match the first few characters of the event record field specified as the value of Field."}) # fmt: skip + not_ends_with: Optional[List[str]] = field(factory=list, metadata={"description": "An operator that excludes events that match the last few characters of the event record field specified as the value of Field."}) # fmt: skip @define(eq=False, slots=False) -class AwsCloudTrailEventSelector: - kind: ClassVar[str] = "aws_cloud_trail_event_selector" - kind_display: ClassVar[str] = "AWS CloudTrail Event Selector" - kind_description: ClassVar[str] = ( - "CloudTrail Event Selector is a feature in AWS CloudTrail that allows you to" - " choose which events to record and store in your Amazon S3 bucket for" - " auditing and compliance purposes." - ) +class AwsCloudTrailAdvancedEventSelector: + kind: ClassVar[str] = "aws_cloud_trail_advanced_event_selector" mapping: ClassVar[Dict[str, Bender]] = { "name": S("Name"), - "field_selectors": S("FieldSelectors", default=[]) - >> ForallBend(AwsCloudTrailAdvancedFieldSelector.mapping) - >> F(lambda x: {a["field"]: a for a in x}), + "field_selectors": S("FieldSelectors", default=[]) >> ForallBend(AwsCloudTrailAdvancedFieldSelector.mapping), + } + name: Optional[str] = field(default=None, metadata={"description": "An optional, descriptive name for an advanced event selector, such as Log data events for only two S3 buckets."}) # fmt: skip + field_selectors: Optional[List[AwsCloudTrailAdvancedFieldSelector]] = field(factory=list, metadata={"description": "Contains all selector statements in an advanced event selector."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsCloudTrailEventSelectors: + kind: ClassVar[str] = "aws_cloud_trail_event_selectors" + mapping: ClassVar[Dict[str, Bender]] = { + "event_selectors": S("EventSelectors", default=[]) >> ForallBend(AwsCloudTrailEventSelector.mapping), + "advanced_event_selectors": S("AdvancedEventSelectors", default=[]) + >> ForallBend(AwsCloudTrailAdvancedEventSelector.mapping), } - name: Optional[str] = attrs_field(default=None) - field_selectors: Optional[Dict[str, AwsCloudTrailAdvancedFieldSelector]] = attrs_field(default=None) + event_selectors: Optional[List[AwsCloudTrailEventSelector]] = field(factory=list, metadata={"description": "The event selectors that are configured for the trail."}) # fmt: skip + advanced_event_selectors: Optional[List[AwsCloudTrailAdvancedEventSelector]] = field(factory=list, metadata={"description": "The advanced event selectors that are configured for the trail."}) # fmt: skip @define(eq=False, slots=False) @@ -157,7 +179,7 @@ class AwsCloudTrail(AwsResource): trail_has_insight_selectors: Optional[bool] = attrs_field(default=None) trail_is_organization_trail: Optional[bool] = attrs_field(default=None) trail_status: Optional[AwsCloudTrailStatus] = attrs_field(default=None) - trail_event_selectors: Optional[List[AwsCloudTrailEventSelector]] = attrs_field(default=None) + trail_event_selectors: Optional[AwsCloudTrailEventSelectors] = attrs_field(default=None) trail_insight_selectors: Optional[List[str]] = attrs_field(default=None) @classmethod @@ -185,13 +207,9 @@ def collect_trail(trail_arn: str) -> None: collect_insight_selectors(instance) def collect_event_selectors(trail: AwsCloudTrail) -> None: - trail.trail_event_selectors = [] - for item in builder.client.list( - service_name, "get-event-selectors", "AdvancedEventSelectors", TrailName=trail.arn - ): - mapped = bend(AwsCloudTrailEventSelector.mapping, item) - if es := parse_json(mapped, AwsCloudTrailEventSelector, builder): - trail.trail_event_selectors.append(es) + if esj := builder.client.get(service_name, "get-event-selectors", TrailName=trail.arn): + if es := parse_json(esj, AwsCloudTrailEventSelectors, builder, AwsCloudTrailEventSelectors.mapping): + trail.trail_event_selectors = es def collect_insight_selectors(trail: AwsCloudTrail) -> None: trail.trail_insight_selectors = [] diff --git a/plugins/aws/resoto_plugin_aws/resource/cloudwatch.py b/plugins/aws/resoto_plugin_aws/resource/cloudwatch.py index 38fa224a07..8c9384d85d 100644 --- a/plugins/aws/resoto_plugin_aws/resource/cloudwatch.py +++ b/plugins/aws/resoto_plugin_aws/resource/cloudwatch.py @@ -17,6 +17,26 @@ service_name = "cloudwatch" +# Cloudwatch Alarm: Namespace -> Dimension Name -> (Kind, Property) +CloudwatchAlarmReferences: Dict[str, Dict[str, Tuple[str, str]]] = { + "AWS/EC2": { + "InstanceId": ("aws_ec2_instance", "id"), + "AutoScalingGroupName": ("aws_autoscaling_group", "name"), + }, + "AWS/S3": {"BucketName": ("aws_s3_bucket", "name")}, + "AWS/DynamoDB": {"TableName": ("aws_dynamodb_table", "name")}, + "AWS/EBS": {"VolumeId": ("aws_ebs_volume", "id")}, + "AWS/ECS": {"ClusterName": ("aws_ecs_cluster", "name")}, + "AWS/EFS": {"FileSystemId": ("aws_efs_file_system", "id")}, + "AWS/ELB": {"LoadBalancerName": ("aws_elb_load_balancer", "name")}, + "AWS/ALB": {"LoadBalancer": ("aws_alb_load_balancer", "name")}, + "AWS/SQS": {"QueueName": ("aws_sqs_queue", "name")}, + "AWS/SNS": {"TopicName": ("aws_sns_topic", "name")}, + "AWS/Redshift": {"ClusterIdentifier": ("aws_redshift_cluster", "id")}, + "AWS/Autoscaling": {"AutoScalingGroupName": ("aws_autoscaling_group", "name")}, + "AWS/Kinesis": {"StreamName": ("aws_kinesis_stream", "name")}, +} + # noinspection PyUnresolvedReferences class CloudwatchTaggable: @@ -244,10 +264,13 @@ def add_tags(alarm: AwsCloudwatchAlarm) -> None: def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: super().connect_in_graph(builder, source) - for dimension in self.cloudwatch_dimensions: - builder.dependant_node( - self, reverse=True, delete_same_as_default=True, kind="aws_ec2_instance", id=dimension.value - ) + if self.cloudwatch_namespace and (refs := CloudwatchAlarmReferences.get(self.cloudwatch_namespace)): + for dimension in self.cloudwatch_dimensions: + if dimension.name and (connect_def := refs.get(dimension.name)): + kind, prop = connect_def + builder.dependant_node( + self, reverse=True, delete_same_as_default=True, kind=kind, **{prop: dimension.value} + ) def delete_resource(self, client: AwsClient, graph: Graph) -> bool: client.call(aws_service=self.api_spec.service, action="delete-alarms", result_name=None, AlarmNames=[self.name]) diff --git a/plugins/aws/resoto_plugin_aws/resource/dynamodb.py b/plugins/aws/resoto_plugin_aws/resource/dynamodb.py index 8927054c8c..1504e20fe6 100644 --- a/plugins/aws/resoto_plugin_aws/resource/dynamodb.py +++ b/plugins/aws/resoto_plugin_aws/resource/dynamodb.py @@ -47,7 +47,7 @@ def called_mutator_apis(cls) -> List[AwsApiSpec]: @define(eq=False, slots=False) class AwsDynamoDbAttributeDefinition: - kind: ClassVar[str] = "aws_dynamo_db_attribute_definition" + kind: ClassVar[str] = "aws_dynamodb_attribute_definition" kind_display: ClassVar[str] = "AWS DynamoDB Attribute Definition" kind_description: ClassVar[ str @@ -59,7 +59,7 @@ class AwsDynamoDbAttributeDefinition: @define(eq=False, slots=False) class AwsDynamoDbKeySchemaElement: - kind: ClassVar[str] = "aws_dynamo_db_key_schema_element" + kind: ClassVar[str] = "aws_dynamodb_key_schema_element" kind_display: ClassVar[str] = "AWS DynamoDB Key Schema Element" kind_description: ClassVar[str] = ( "DynamoDB Key Schema Element represents the key attributes used to uniquely" @@ -72,7 +72,7 @@ class AwsDynamoDbKeySchemaElement: @define(eq=False, slots=False) class AwsDynamoDbProvisionedThroughputDescription: - kind: ClassVar[str] = "aws_dynamo_db_provisioned_throughput_description" + kind: ClassVar[str] = "aws_dynamodb_provisioned_throughput_description" kind_display: ClassVar[str] = "AWS DynamoDB Provisioned Throughput Description" kind_description: ClassVar[str] = ( "DynamoDB Provisioned Throughput is the measurement of the capacity" @@ -95,7 +95,7 @@ class AwsDynamoDbProvisionedThroughputDescription: @define(eq=False, slots=False) class AwsDynamoDbBillingModeSummary: - kind: ClassVar[str] = "aws_dynamo_db_billing_mode_summary" + kind: ClassVar[str] = "aws_dynamodb_billing_mode_summary" kind_display: ClassVar[str] = "AWS DynamoDB Billing Mode Summary" kind_description: ClassVar[str] = ( "DynamoDB Billing Mode Summary provides information about the billing mode" @@ -112,7 +112,7 @@ class AwsDynamoDbBillingModeSummary: @define(eq=False, slots=False) class AwsDynamoDbProjection: - kind: ClassVar[str] = "aws_dynamo_db_projection" + kind: ClassVar[str] = "aws_dynamodb_projection" kind_display: ClassVar[str] = "AWS DynamoDB Projection" kind_description: ClassVar[str] = ( "AWS DynamoDB Projection specifies the set of attributes that are projected into a DynamoDB secondary" @@ -128,7 +128,7 @@ class AwsDynamoDbProjection: @define(eq=False, slots=False) class AwsDynamoDbLocalSecondaryIndexDescription: - kind: ClassVar[str] = "aws_dynamo_db_local_secondary_index_description" + kind: ClassVar[str] = "aws_dynamodb_local_secondary_index_description" kind_display: ClassVar[str] = "AWS DynamoDB Local Secondary Index Description" kind_description: ClassVar[str] = ( "The AWS DynamoDB Local Secondary Index Description provides details about a Local Secondary Index (LSI)" @@ -153,7 +153,7 @@ class AwsDynamoDbLocalSecondaryIndexDescription: @define(eq=False, slots=False) class AwsDynamoDbGlobalSecondaryIndexDescription: - kind: ClassVar[str] = "aws_dynamo_db_global_secondary_index_description" + kind: ClassVar[str] = "aws_dynamodb_global_secondary_index_description" kind_display: ClassVar[str] = "AWS DynamoDB Global Secondary Index Description" kind_description: ClassVar[str] = ( "A Global Secondary Index (GSI) in DynamoDB is an additional index that you" @@ -184,7 +184,7 @@ class AwsDynamoDbGlobalSecondaryIndexDescription: @define(eq=False, slots=False) class AwsDynamoDbStreamSpecification: - kind: ClassVar[str] = "aws_dynamo_db_stream_specification" + kind: ClassVar[str] = "aws_dynamodb_stream_specification" kind_display: ClassVar[str] = "AWS DynamoDB Stream Specification" kind_description: ClassVar[str] = ( "AWS DynamoDB Stream Specification defines whether a stream is enabled on a DynamoDB table and the" @@ -201,7 +201,7 @@ class AwsDynamoDbStreamSpecification: @define(eq=False, slots=False) class AwsDynamoDbReplicaGlobalSecondaryIndexDescription: - kind: ClassVar[str] = "aws_dynamo_db_replica_global_secondary_index_description" + kind: ClassVar[str] = "aws_dynamodb_replica_global_secondary_index_description" kind_display: ClassVar[str] = "AWS DynamoDB Replica Global Secondary Index Description" kind_description: ClassVar[str] = ( "The AWS DynamoDB Replica Global Secondary Index Description details the properties of a" @@ -221,7 +221,7 @@ class AwsDynamoDbReplicaGlobalSecondaryIndexDescription: @define(eq=False, slots=False) class AwsDynamoDbTableClassSummary: - kind: ClassVar[str] = "aws_dynamo_db_table_class_summary" + kind: ClassVar[str] = "aws_dynamodb_table_class_summary" kind_display: ClassVar[str] = "AWS DynamoDB Table Class Summary" kind_description: ClassVar[str] = ( "The AWS DynamoDB Table Class Summary provides an overview of the table class for" @@ -237,7 +237,7 @@ class AwsDynamoDbTableClassSummary: @define(eq=False, slots=False) class AwsDynamoDbReplicaDescription: - kind: ClassVar[str] = "aws_dynamo_db_replica_description" + kind: ClassVar[str] = "aws_dynamodb_replica_description" kind_display: ClassVar[str] = "AWS DynamoDB Replica Description" kind_description: ClassVar[str] = ( "DynamoDB Replica Description provides detailed information about the replica" @@ -268,7 +268,7 @@ class AwsDynamoDbReplicaDescription: @define(eq=False, slots=False) class AwsDynamoDbRestoreSummary: - kind: ClassVar[str] = "aws_dynamo_db_restore_summary" + kind: ClassVar[str] = "aws_dynamodb_restore_summary" kind_display: ClassVar[str] = "AWS DynamoDB Restore Summary" kind_description: ClassVar[str] = ( "DynamoDB Restore Summary provides an overview of the restore process for" @@ -289,7 +289,7 @@ class AwsDynamoDbRestoreSummary: @define(eq=False, slots=False) class AwsDynamoDbSSEDescription: - kind: ClassVar[str] = "aws_dynamo_db_sse_description" + kind: ClassVar[str] = "aws_dynamodb_sse_description" kind_display: ClassVar[str] = "AWS DynamoDB SSE Description" kind_description: ClassVar[str] = ( "DynamoDB SSE (Server-Side Encryption) provides automatic encryption at rest" @@ -310,7 +310,7 @@ class AwsDynamoDbSSEDescription: @define(eq=False, slots=False) class AwsDynamoDbArchivalSummary: - kind: ClassVar[str] = "aws_dynamo_db_archival_summary" + kind: ClassVar[str] = "aws_dynamodb_archival_summary" kind_display: ClassVar[str] = "AWS DynamoDB Archival Summary" kind_description: ClassVar[str] = ( "DynamoDB Archival Summary provides information about the archival status and" @@ -330,7 +330,7 @@ class AwsDynamoDbArchivalSummary: @define(eq=False, slots=False) class AwsDynamoDbTable(DynamoDbTaggable, AwsResource): - kind: ClassVar[str] = "aws_dynamo_db_table" + kind: ClassVar[str] = "aws_dynamodb_table" kind_display: ClassVar[str] = "AWS DynamoDB Table" kind_description: ClassVar[str] = ( "An AWS DynamoDB Table is a collection of data items organized by a primary key in Amazon DynamoDB," @@ -448,7 +448,7 @@ def called_mutator_apis(cls) -> List[AwsApiSpec]: @define(eq=False, slots=False) class AwsDynamoDbGlobalTable(DynamoDbTaggable, AwsResource): - kind: ClassVar[str] = "aws_dynamo_db_global_table" + kind: ClassVar[str] = "aws_dynamodb_global_table" kind_display: ClassVar[str] = "AWS DynamoDB Global Table" kind_description: ClassVar[str] = ( "AWS DynamoDB Global Tables provide fully managed, multi-region, and globally" diff --git a/plugins/aws/resoto_plugin_aws/resource/ec2.py b/plugins/aws/resoto_plugin_aws/resource/ec2.py index f8234c10b4..f2101425e4 100644 --- a/plugins/aws/resoto_plugin_aws/resource/ec2.py +++ b/plugins/aws/resoto_plugin_aws/resource/ec2.py @@ -1215,7 +1215,10 @@ class AwsEc2Instance(EC2Taggable, AwsResource, BaseInstance): ) api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "describe-instances", "Reservations") reference_kinds: ClassVar[ModelReference] = { - "predecessors": {"default": ["aws_vpc"], "delete": ["aws_ec2_keypair", "aws_vpc"]}, + "predecessors": { + "default": ["aws_vpc", "aws_subnet", "aws_ec2_image"], + "delete": ["aws_ec2_keypair", "aws_vpc", "aws_subnet"], + }, "successors": {"default": ["aws_ec2_keypair"]}, } mapping: ClassVar[Dict[str, Bender]] = { @@ -1464,6 +1467,12 @@ def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: builder.dependant_node(self, clazz=AwsEc2KeyPair, name=self.instance_key_name) if vpc_id := source.get("VpcId"): builder.dependant_node(self, reverse=True, delete_same_as_default=True, clazz=AwsEc2Vpc, name=vpc_id) + if subnet_id := source.get("SubnetId"): + builder.dependant_node(self, reverse=True, delete_same_as_default=True, clazz=AwsEc2Subnet, name=subnet_id) + if image_id := source.get("ImageId"): + builder.add_edge(self, reverse=True, clazz=AwsEc2Image, id=image_id) + if lt_id := self.tags.get("aws:ec2launchtemplate:id"): + builder.add_edge(self, reverse=True, clazz=AwsEc2LaunchTemplate, id=lt_id) def delete_resource(self, client: AwsClient, graph: Graph) -> bool: if self.instance_status == InstanceStatus.TERMINATED: @@ -3195,6 +3204,622 @@ def delete_resource(self, client: AwsClient, graph: Graph) -> bool: return True +@define(eq=False, slots=False) +class AwsEc2EbsBlockDevice: + kind: ClassVar[str] = "aws_ec2_ebs_block_device" + mapping: ClassVar[Dict[str, Bender]] = { + "delete_on_termination": S("DeleteOnTermination"), + "iops": S("Iops"), + "snapshot_id": S("SnapshotId"), + "volume_size": S("VolumeSize"), + "volume_type": S("VolumeType"), + "kms_key_id": S("KmsKeyId"), + "throughput": S("Throughput"), + "outpost_arn": S("OutpostArn"), + "encrypted": S("Encrypted"), + } + delete_on_termination: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the EBS volume is deleted on instance termination."}) # fmt: skip + iops: Optional[int] = field(default=None, metadata={"description": "The number of I/O operations per second (IOPS)."}) # fmt: skip + snapshot_id: Optional[str] = field(default=None, metadata={"description": "The ID of the snapshot."}) # fmt: skip + volume_size: Optional[int] = field(default=None, metadata={"description": "The size of the volume, in GiBs."}) # fmt: skip + volume_type: Optional[str] = field(default=None, metadata={"description": "The volume type. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide."}) # fmt: skip + kms_key_id: Optional[str] = field(default=None, metadata={"description": "Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed CMK under which the EBS volume is encrypted."}) # fmt: skip + throughput: Optional[int] = field(default=None, metadata={"description": "The throughput that the volume supports, in MiB/s. This parameter is valid only for gp3 volumes. Valid Range: Minimum value of 125. Maximum value of 1000."}) # fmt: skip + outpost_arn: Optional[str] = field(default=None, metadata={"description": "The ARN of the Outpost on which the snapshot is stored. This parameter is not supported when using CreateImage."}) # fmt: skip + encrypted: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2BlockDeviceMapping: + kind: ClassVar[str] = "aws_ec2_block_device_mapping" + mapping: ClassVar[Dict[str, Bender]] = { + "device_name": S("DeviceName"), + "virtual_name": S("VirtualName"), + "ebs": S("Ebs") >> Bend(AwsEc2EbsBlockDevice.mapping), + "no_device": S("NoDevice"), + } + device_name: Optional[str] = field(default=None, metadata={"description": "The device name (for example, /dev/sdh or xvdh)."}) # fmt: skip + virtual_name: Optional[str] = field(default=None, metadata={"description": "The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0."}) # fmt: skip + ebs: Optional[AwsEc2EbsBlockDevice] = field(default=None, metadata={"description": "Parameters used to automatically set up EBS volumes when the instance is launched."}) # fmt: skip + no_device: Optional[str] = field(default=None, metadata={"description": "To omit the device from the block device mapping, specify an empty string."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2Image(AwsResource): + kind: ClassVar[str] = "aws_ec2_image" + kind_display: ClassVar[str] = "AWS EC2 Image" + kind_description: ClassVar[str] = ( + "An Amazon Machine Image (AMI) is a supported and maintained image " + "provided by AWS that provides the information required to launch an instance. " + ) + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("ec2", "describe-images", "Images", {"Owners": ["self"]}) + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("ImageId"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("Name"), + "ctime": S("CreationDate"), + "architecture": S("Architecture"), + "image_location": S("ImageLocation"), + "image_type": S("ImageType"), + "public": S("Public"), + "kernel_id": S("KernelId"), + "owner_id": S("OwnerId"), + "platform": S("Platform"), + "platform_details": S("PlatformDetails"), + "usage_operation": S("UsageOperation"), + "product_codes": S("ProductCodes", default=[]) >> ForallBend(AwsEc2ProductCode.mapping), + "ramdisk_id": S("RamdiskId"), + "state": S("State"), + "block_device_mappings": S("BlockDeviceMappings", default=[]) >> ForallBend(AwsEc2BlockDeviceMapping.mapping), + "description": S("Description"), + "ena_support": S("EnaSupport"), + "hypervisor": S("Hypervisor"), + "image_owner_alias": S("ImageOwnerAlias"), + "root_device_name": S("RootDeviceName"), + "root_device_type": S("RootDeviceType"), + "sriov_net_support": S("SriovNetSupport"), + "state_reason": S("StateReason") >> Bend(AwsEc2StateReason.mapping), + "virtualization_type": S("VirtualizationType"), + "boot_mode": S("BootMode"), + "tpm_support": S("TpmSupport"), + "deprecation_time": S("DeprecationTime"), + "imds_support": S("ImdsSupport"), + "source_instance_id": S("SourceInstanceId"), + } + architecture: Optional[str] = field(default=None, metadata={"description": "The architecture of the image."}) # fmt: skip + image_location: Optional[str] = field(default=None, metadata={"description": "The location of the AMI."}) # fmt: skip + image_type: Optional[str] = field(default=None, metadata={"description": "The type of image."}) # fmt: skip + public: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the image has public launch permissions."}) # fmt: skip + kernel_id: Optional[str] = field(default=None, metadata={"description": "The kernel associated with the image, if any. Only applicable for machine images."}) # fmt: skip + owner_id: Optional[str] = field(default=None, metadata={"description": "The ID of the Amazon Web Services account that owns the image."}) # fmt: skip + platform: Optional[str] = field(default=None, metadata={"description": "This value is set to windows for Windows AMIs; otherwise, it is blank."}) # fmt: skip + platform_details: Optional[str] = field(default=None, metadata={"description": "The platform details associated with the billing code of the AMI."}) # fmt: skip + usage_operation: Optional[str] = field(default=None, metadata={"description": "The operation of the Amazon EC2 instance and the billing code that is associated with the AMI."}) # fmt: skip + product_codes: Optional[List[AwsEc2ProductCode]] = field(factory=list, metadata={"description": "Any product codes associated with the AMI."}) # fmt: skip + ramdisk_id: Optional[str] = field(default=None, metadata={"description": "The RAM disk associated with the image, if any."}) # fmt: skip + state: Optional[str] = field(default=None, metadata={"description": "The current state of the AMI."}) # fmt: skip + block_device_mappings: Optional[List[AwsEc2BlockDeviceMapping]] = field(factory=list, metadata={"description": "Any block device mapping entries."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "The description of the AMI that was provided during image creation."}) # fmt: skip + ena_support: Optional[bool] = field(default=None, metadata={"description": "Specifies whether enhanced networking with ENA is enabled."}) # fmt: skip + hypervisor: Optional[str] = field(default=None, metadata={"description": "The hypervisor type of the image. Only xen is supported. ovm is not supported."}) # fmt: skip + image_owner_alias: Optional[str] = field(default=None, metadata={"description": "The Amazon Web Services account alias (for example, amazon, self) or the Amazon Web Services account ID of the AMI owner."}) # fmt: skip + name: Optional[str] = field(default=None, metadata={"description": "The name of the AMI that was provided during image creation."}) # fmt: skip + root_device_name: Optional[str] = field(default=None, metadata={"description": "The device name of the root device volume (for example, /dev/sda1)."}) # fmt: skip + root_device_type: Optional[str] = field(default=None, metadata={"description": "The type of root device used by the AMI. The AMI can use an Amazon EBS volume or an instance store volume."}) # fmt: skip + sriov_net_support: Optional[str] = field(default=None, metadata={"description": "Specifies whether enhanced networking with the Intel 82599 Virtual Function interface is enabled."}) # fmt: skip + state_reason: Optional[AwsEc2StateReason] = field(default=None, metadata={"description": "The reason for the state change."}) # fmt: skip + virtualization_type: Optional[str] = field(default=None, metadata={"description": "The type of virtualization of the AMI."}) # fmt: skip + boot_mode: Optional[str] = field(default=None, metadata={"description": "The boot mode of the image. For more information, see Boot modes in the Amazon EC2 User Guide."}) # fmt: skip + tpm_support: Optional[str] = field(default=None, metadata={"description": "If the image is configured for NitroTPM support, the value is v2.0. For more information, see NitroTPM in the Amazon EC2 User Guide."}) # fmt: skip + deprecation_time: Optional[str] = field(default=None, metadata={"description": "The date and time to deprecate the AMI, in UTC, in the following format: YYYY-MM-DDTHH:MM:SSZ."}) # fmt: skip + imds_support: Optional[str] = field(default=None, metadata={"description": "If v2.0, it indicates that IMDSv2 is specified in the AMI."}) # fmt: skip + source_instance_id: Optional[str] = field(default=None, metadata={"description": "The ID of the instance that the AMI was created from if the AMI was created using CreateImage."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateIamInstanceProfileSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_iam_instance_profile_specification" + mapping: ClassVar[Dict[str, Bender]] = {"arn": S("Arn"), "name": S("Name")} + arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the instance profile."}) # fmt: skip + name: Optional[str] = field(default=None, metadata={"description": "The name of the instance profile."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateEbsBlockDevice: + kind: ClassVar[str] = "aws_ec2_launch_template_ebs_block_device" + mapping: ClassVar[Dict[str, Bender]] = { + "encrypted": S("Encrypted"), + "delete_on_termination": S("DeleteOnTermination"), + "iops": S("Iops"), + "kms_key_id": S("KmsKeyId"), + "snapshot_id": S("SnapshotId"), + "volume_size": S("VolumeSize"), + "volume_type": S("VolumeType"), + "throughput": S("Throughput"), + } + encrypted: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the EBS volume is encrypted."}) # fmt: skip + delete_on_termination: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the EBS volume is deleted on instance termination."}) # fmt: skip + iops: Optional[int] = field(default=None, metadata={"description": "The number of I/O operations per second (IOPS) that the volume supports."}) # fmt: skip + kms_key_id: Optional[str] = field(default=None, metadata={"description": "The ARN of the Key Management Service (KMS) CMK used for encryption."}) # fmt: skip + snapshot_id: Optional[str] = field(default=None, metadata={"description": "The ID of the snapshot."}) # fmt: skip + volume_size: Optional[int] = field(default=None, metadata={"description": "The size of the volume, in GiB."}) # fmt: skip + volume_type: Optional[str] = field(default=None, metadata={"description": "The volume type."}) # fmt: skip + throughput: Optional[int] = field(default=None, metadata={"description": "The throughput that the volume supports, in MiB/s."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateBlockDeviceMapping: + kind: ClassVar[str] = "aws_ec2_launch_template_block_device_mapping" + mapping: ClassVar[Dict[str, Bender]] = { + "device_name": S("DeviceName"), + "virtual_name": S("VirtualName"), + "ebs": S("Ebs") >> Bend(AwsEc2LaunchTemplateEbsBlockDevice.mapping), + "no_device": S("NoDevice"), + } + device_name: Optional[str] = field(default=None, metadata={"description": "The device name."}) # fmt: skip + virtual_name: Optional[str] = field(default=None, metadata={"description": "The virtual device name (ephemeralN)."}) # fmt: skip + ebs: Optional[AwsEc2LaunchTemplateEbsBlockDevice] = field(default=None, metadata={"description": "Information about the block device for an EBS volume."}) # fmt: skip + no_device: Optional[str] = field(default=None, metadata={"description": "To omit the device from the block device mapping, specify an empty string."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateInstanceIpv6Address: + kind: ClassVar[str] = "aws_ec2_launch_template_instance_ipv6_address" + mapping: ClassVar[Dict[str, Bender]] = {"ipv6_address": S("Ipv6Address"), "is_primary_ipv6": S("IsPrimaryIpv6")} + ipv6_address: Optional[str] = field(default=None, metadata={"description": "The IPv6 address."}) # fmt: skip + is_primary_ipv6: Optional[bool] = field(default=None, metadata={"description": "Determines if an IPv6 address associated with a network interface is the primary IPv6 address. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information, see RunInstances."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplatePrivateIpAddressSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_private_ip_address_specification" + mapping: ClassVar[Dict[str, Bender]] = {"primary": S("Primary"), "private_ip_address": S("PrivateIpAddress")} + primary: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary."}) # fmt: skip + private_ip_address: Optional[str] = field(default=None, metadata={"description": "The private IPv4 address."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateEnaSrdSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_ena_srd_specification" + mapping: ClassVar[Dict[str, Bender]] = { + "ena_srd_enabled": S("EnaSrdEnabled"), + "ena_srd_udp_specification": S("EnaSrdUdpSpecification", "EnaSrdUdpEnabled"), + } + ena_srd_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether ENA Express is enabled for the network interface."}) # fmt: skip + ena_srd_udp_specification: Optional[bool] = field(default=None, metadata={"description": "Configures ENA Express for UDP network traffic."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateConnectionTrackingSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_connection_tracking_specification" + mapping: ClassVar[Dict[str, Bender]] = { + "tcp_established_timeout": S("TcpEstablishedTimeout"), + "udp_timeout": S("UdpTimeout"), + "udp_stream_timeout": S("UdpStreamTimeout"), + } + tcp_established_timeout: Optional[int] = field(default=None, metadata={"description": "Timeout (in seconds) for idle TCP connections in an established state. Min: 60 seconds. Max: 432000 seconds (5 days). Default: 432000 seconds. Recommended: Less than 432000 seconds."}) # fmt: skip + udp_timeout: Optional[int] = field(default=None, metadata={"description": "Timeout (in seconds) for idle UDP flows that have seen traffic only in a single direction or a single request-response transaction. Min: 30 seconds. Max: 60 seconds. Default: 30 seconds."}) # fmt: skip + udp_stream_timeout: Optional[int] = field(default=None, metadata={"description": "Timeout (in seconds) for idle UDP flows classified as streams which have seen more than one request-response transaction. Min: 60 seconds. Max: 180 seconds (3 minutes). Default: 180 seconds."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateInstanceNetworkInterfaceSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_instance_network_interface_specification" + mapping: ClassVar[Dict[str, Bender]] = { + "associate_carrier_ip_address": S("AssociateCarrierIpAddress"), + "associate_public_ip_address": S("AssociatePublicIpAddress"), + "delete_on_termination": S("DeleteOnTermination"), + "description": S("Description"), + "device_index": S("DeviceIndex"), + "groups": S("Groups", default=[]), + "interface_type": S("InterfaceType"), + "ipv6_address_count": S("Ipv6AddressCount"), + "ipv6_addresses": S("Ipv6Addresses", default=[]) >> ForallBend(AwsEc2LaunchTemplateInstanceIpv6Address.mapping), + "network_interface_id": S("NetworkInterfaceId"), + "private_ip_address": S("PrivateIpAddress"), + "private_ip_addresses": S("PrivateIpAddresses", default=[]) + >> ForallBend(AwsEc2LaunchTemplatePrivateIpAddressSpecification.mapping), + "secondary_private_ip_address_count": S("SecondaryPrivateIpAddressCount"), + "subnet_id": S("SubnetId"), + "network_card_index": S("NetworkCardIndex"), + "ipv4_prefixes": S("Ipv4Prefixes", default=[]) >> ForallBend(S("Ipv4Prefix")), + "ipv4_prefix_count": S("Ipv4PrefixCount"), + "ipv6_prefixes": S("Ipv6Prefixes", default=[]) >> ForallBend(S("Ipv6Prefix")), + "ipv6_prefix_count": S("Ipv6PrefixCount"), + "primary_ipv6": S("PrimaryIpv6"), + "ena_srd_specification": S("EnaSrdSpecification") >> Bend(AwsEc2LaunchTemplateEnaSrdSpecification.mapping), + "connection_tracking_specification": S("ConnectionTrackingSpecification") + >> Bend(AwsEc2LaunchTemplateConnectionTrackingSpecification.mapping), + } + associate_carrier_ip_address: Optional[bool] = field(default=None, metadata={"description": "Indicates whether to associate a Carrier IP address with eth0 for a new network interface. Use this option when you launch an instance in a Wavelength Zone and want to associate a Carrier IP address with the network interface. For more information about Carrier IP addresses, see Carrier IP addresses in the Wavelength Developer Guide."}) # fmt: skip + associate_public_ip_address: Optional[bool] = field(default=None, metadata={"description": "Indicates whether to associate a public IPv4 address with eth0 for a new network interface. Starting on February 1, 2024, Amazon Web Services will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page."}) # fmt: skip + delete_on_termination: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the network interface is deleted when the instance is terminated."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "A description for the network interface."}) # fmt: skip + device_index: Optional[int] = field(default=None, metadata={"description": "The device index for the network interface attachment."}) # fmt: skip + groups: Optional[List[str]] = field(factory=list, metadata={"description": "The IDs of one or more security groups."}) # fmt: skip + interface_type: Optional[str] = field(default=None, metadata={"description": "The type of network interface."}) # fmt: skip + ipv6_address_count: Optional[int] = field(default=None, metadata={"description": "The number of IPv6 addresses for the network interface."}) # fmt: skip + ipv6_addresses: Optional[List[AwsEc2LaunchTemplateInstanceIpv6Address]] = field(factory=list, metadata={"description": "The IPv6 addresses for the network interface."}) # fmt: skip + network_interface_id: Optional[str] = field(default=None, metadata={"description": "The ID of the network interface."}) # fmt: skip + private_ip_address: Optional[str] = field(default=None, metadata={"description": "The primary private IPv4 address of the network interface."}) # fmt: skip + private_ip_addresses: Optional[List[AwsEc2LaunchTemplatePrivateIpAddressSpecification]] = field(factory=list, metadata={"description": "One or more private IPv4 addresses."}) # fmt: skip + secondary_private_ip_address_count: Optional[int] = field(default=None, metadata={"description": "The number of secondary private IPv4 addresses for the network interface."}) # fmt: skip + subnet_id: Optional[str] = field(default=None, metadata={"description": "The ID of the subnet for the network interface."}) # fmt: skip + network_card_index: Optional[int] = field(default=None, metadata={"description": "The index of the network card."}) # fmt: skip + ipv4_prefixes: Optional[List[str]] = field(factory=list, metadata={"description": "One or more IPv4 prefixes assigned to the network interface."}) # fmt: skip + ipv4_prefix_count: Optional[int] = field(default=None, metadata={"description": "The number of IPv4 prefixes that Amazon Web Services automatically assigned to the network interface."}) # fmt: skip + ipv6_prefixes: Optional[List[str]] = field(factory=list, metadata={"description": "One or more IPv6 prefixes assigned to the network interface."}) # fmt: skip + ipv6_prefix_count: Optional[int] = field(default=None, metadata={"description": "The number of IPv6 prefixes that Amazon Web Services automatically assigned to the network interface."}) # fmt: skip + primary_ipv6: Optional[bool] = field(default=None, metadata={"description": "The primary IPv6 address of the network interface. When you enable an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address until the instance is terminated or the network interface is detached. For more information about primary IPv6 addresses, see RunInstances."}) # fmt: skip + ena_srd_specification: Optional[AwsEc2LaunchTemplateEnaSrdSpecification] = field(default=None, metadata={"description": "Contains the ENA Express settings for instances launched from your launch template."}) # fmt: skip + connection_tracking_specification: Optional[AwsEc2LaunchTemplateConnectionTrackingSpecification] = field(default=None, metadata={"description": "A security group connection tracking specification that enables you to set the timeout for connection tracking on an Elastic network interface. For more information, see Connection tracking timeouts in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplatePlacement: + kind: ClassVar[str] = "aws_ec2_launch_template_placement" + mapping: ClassVar[Dict[str, Bender]] = { + "availability_zone": S("AvailabilityZone"), + "affinity": S("Affinity"), + "group_name": S("GroupName"), + "host_id": S("HostId"), + "tenancy": S("Tenancy"), + "spread_domain": S("SpreadDomain"), + "host_resource_group_arn": S("HostResourceGroupArn"), + "partition_number": S("PartitionNumber"), + "group_id": S("GroupId"), + } + availability_zone: Optional[str] = field(default=None, metadata={"description": "The Availability Zone of the instance."}) # fmt: skip + affinity: Optional[str] = field(default=None, metadata={"description": "The affinity setting for the instance on the Dedicated Host."}) # fmt: skip + group_name: Optional[str] = field(default=None, metadata={"description": "The name of the placement group for the instance."}) # fmt: skip + host_id: Optional[str] = field(default=None, metadata={"description": "The ID of the Dedicated Host for the instance."}) # fmt: skip + tenancy: Optional[str] = field(default=None, metadata={"description": "The tenancy of the instance. An instance with a tenancy of dedicated runs on single-tenant hardware."}) # fmt: skip + spread_domain: Optional[str] = field(default=None, metadata={"description": "Reserved for future use."}) # fmt: skip + host_resource_group_arn: Optional[str] = field(default=None, metadata={"description": "The ARN of the host resource group in which to launch the instances."}) # fmt: skip + partition_number: Optional[int] = field(default=None, metadata={"description": "The number of the partition the instance should launch in. Valid only if the placement group strategy is set to partition."}) # fmt: skip + group_id: Optional[str] = field(default=None, metadata={"description": "The Group ID of the placement group. You must specify the Placement Group Group ID to launch an instance in a shared placement group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateTagSpecification: + kind: ClassVar[str] = "aws_ec2_launch_template_tag_specification" + mapping: ClassVar[Dict[str, Bender]] = {"resource_type": S("ResourceType")} + resource_type: Optional[str] = field(default=None, metadata={"description": "The type of resource to tag."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateElasticInferenceAcceleratorResponse: + kind: ClassVar[str] = "aws_ec2_launch_template_elastic_inference_accelerator_response" + mapping: ClassVar[Dict[str, Bender]] = {"type": S("Type"), "count": S("Count")} + type: Optional[str] = field(default=None, metadata={"description": "The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, and eia1.xlarge."}) # fmt: skip + count: Optional[int] = field(default=None, metadata={"description": "The number of elastic inference accelerators to attach to the instance. Default: 1"}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateSpotMarketOptions: + kind: ClassVar[str] = "aws_ec2_launch_template_spot_market_options" + mapping: ClassVar[Dict[str, Bender]] = { + "max_price": S("MaxPrice"), + "spot_instance_type": S("SpotInstanceType"), + "block_duration_minutes": S("BlockDurationMinutes"), + "valid_until": S("ValidUntil"), + "instance_interruption_behavior": S("InstanceInterruptionBehavior"), + } + max_price: Optional[str] = field(default=None, metadata={"description": "The maximum hourly price you're willing to pay for the Spot Instances. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price. If you specify a maximum price, your Spot Instances will be interrupted more frequently than if you do not specify this parameter."}) # fmt: skip + spot_instance_type: Optional[str] = field(default=None, metadata={"description": "The Spot Instance request type."}) # fmt: skip + block_duration_minutes: Optional[int] = field(default=None, metadata={"description": "The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360)."}) # fmt: skip + valid_until: Optional[datetime] = field(default=None, metadata={"description": "The end date of the request. For a one-time request, the request remains active until all instances launch, the request is canceled, or this date is reached. If the request is persistent, it remains active until it is canceled or this date and time is reached."}) # fmt: skip + instance_interruption_behavior: Optional[str] = field(default=None, metadata={"description": "The behavior when a Spot Instance is interrupted."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateInstanceMarketOptions: + kind: ClassVar[str] = "aws_ec2_launch_template_instance_market_options" + mapping: ClassVar[Dict[str, Bender]] = { + "market_type": S("MarketType"), + "spot_options": S("SpotOptions") >> Bend(AwsEc2LaunchTemplateSpotMarketOptions.mapping), + } + market_type: Optional[str] = field(default=None, metadata={"description": "The market type."}) # fmt: skip + spot_options: Optional[AwsEc2LaunchTemplateSpotMarketOptions] = field(default=None, metadata={"description": "The options for Spot Instances."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateCpuOptions: + kind: ClassVar[str] = "aws_ec2_launch_template_cpu_options" + mapping: ClassVar[Dict[str, Bender]] = { + "core_count": S("CoreCount"), + "threads_per_core": S("ThreadsPerCore"), + "amd_sev_snp": S("AmdSevSnp"), + } + core_count: Optional[int] = field(default=None, metadata={"description": "The number of CPU cores for the instance."}) # fmt: skip + threads_per_core: Optional[int] = field(default=None, metadata={"description": "The number of threads per CPU core."}) # fmt: skip + amd_sev_snp: Optional[str] = field(default=None, metadata={"description": "Indicates whether the instance is enabled for AMD SEV-SNP. For more information, see AMD SEV-SNP."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateCapacityReservationTargetResponse: + kind: ClassVar[str] = "aws_ec2_launch_template_capacity_reservation_target_response" + mapping: ClassVar[Dict[str, Bender]] = { + "capacity_reservation_id": S("CapacityReservationId"), + "capacity_reservation_resource_group_arn": S("CapacityReservationResourceGroupArn"), + } + capacity_reservation_id: Optional[str] = field(default=None, metadata={"description": "The ID of the targeted Capacity Reservation."}) # fmt: skip + capacity_reservation_resource_group_arn: Optional[str] = field(default=None, metadata={"description": "The ARN of the targeted Capacity Reservation group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateCapacityReservationSpecificationResponse: + kind: ClassVar[str] = "aws_ec2_launch_template_capacity_reservation_specification_response" + mapping: ClassVar[Dict[str, Bender]] = { + "capacity_reservation_preference": S("CapacityReservationPreference"), + "capacity_reservation_target": S("CapacityReservationTarget") + >> Bend(AwsEc2LaunchTemplateCapacityReservationTargetResponse.mapping), + } + capacity_reservation_preference: Optional[str] = field(default=None, metadata={"description": "Indicates the instance's Capacity Reservation preferences. Possible preferences include: open - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). none - The instance avoids running in a Capacity Reservation even if one is available. The instance runs in On-Demand capacity."}) # fmt: skip + capacity_reservation_target: Optional[AwsEc2LaunchTemplateCapacityReservationTargetResponse] = field(default=None, metadata={"description": "Information about the target Capacity Reservation or Capacity Reservation group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateInstanceMetadataOptions: + kind: ClassVar[str] = "aws_ec2_launch_template_instance_metadata_options" + mapping: ClassVar[Dict[str, Bender]] = { + "state": S("State"), + "http_tokens": S("HttpTokens"), + "http_put_response_hop_limit": S("HttpPutResponseHopLimit"), + "http_endpoint": S("HttpEndpoint"), + "http_protocol_ipv6": S("HttpProtocolIpv6"), + "instance_metadata_tags": S("InstanceMetadataTags"), + } + state: Optional[str] = field(default=None, metadata={"description": "The state of the metadata option changes. pending - The metadata options are being updated and the instance is not ready to process metadata traffic with the new selection. applied - The metadata options have been successfully applied on the instance."}) # fmt: skip + http_tokens: Optional[str] = field(default=None, metadata={"description": "Indicates whether IMDSv2 is required. optional - IMDSv2 is optional. You can choose whether to send a session token in your instance metadata retrieval requests. If you retrieve IAM role credentials without a session token, you receive the IMDSv1 role credentials. If you retrieve IAM role credentials using a valid session token, you receive the IMDSv2 role credentials. required - IMDSv2 is required. You must send a session token in your instance metadata retrieval requests. With this option, retrieving the IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials are not available."}) # fmt: skip + http_put_response_hop_limit: Optional[int] = field(default=None, metadata={"description": "The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. Default: 1 Possible values: Integers from 1 to 64"}) # fmt: skip + http_endpoint: Optional[str] = field(default=None, metadata={"description": "Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled. If you specify a value of disabled, you will not be able to access your instance metadata."}) # fmt: skip + http_protocol_ipv6: Optional[str] = field(default=None, metadata={"description": "Enables or disables the IPv6 endpoint for the instance metadata service. Default: disabled"}) # fmt: skip + instance_metadata_tags: Optional[str] = field(default=None, metadata={"description": "Set to enabled to allow access to instance tags from the instance metadata. Set to disabled to turn off access to instance tags from the instance metadata. For more information, see Work with instance tags using the instance metadata. Default: disabled"}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateVCpuCountRange: + kind: ClassVar[str] = "aws_ec2_launch_template_v_cpu_count_range" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum number of vCPUs. If the value is 0, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum number of vCPUs. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateMemoryMiB: + kind: ClassVar[str] = "aws_ec2_launch_template_memory_mi_b" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum amount of memory, in MiB. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum amount of memory, in MiB. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateMemoryGiBPerVCpu: + kind: ClassVar[str] = "aws_ec2_launch_template_memory_gi_b_per_v_cpu" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[float] = field(default=None, metadata={"description": "The minimum amount of memory per vCPU, in GiB. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[float] = field(default=None, metadata={"description": "The maximum amount of memory per vCPU, in GiB. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateNetworkInterfaceCount: + kind: ClassVar[str] = "aws_ec2_launch_template_network_interface_count" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum number of network interfaces. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum number of network interfaces. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateTotalLocalStorageGB: + kind: ClassVar[str] = "aws_ec2_launch_template_total_local_storage_gb" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[float] = field(default=None, metadata={"description": "The minimum amount of total local storage, in GB. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[float] = field(default=None, metadata={"description": "The maximum amount of total local storage, in GB. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateBaselineEbsBandwidthMbps: + kind: ClassVar[str] = "aws_ec2_launch_template_baseline_ebs_bandwidth_mbps" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum baseline bandwidth, in Mbps. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum baseline bandwidth, in Mbps. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateAcceleratorCount: + kind: ClassVar[str] = "aws_ec2_launch_template_accelerator_count" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum number of accelerators. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum number of accelerators. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateAcceleratorTotalMemoryMiB: + kind: ClassVar[str] = "aws_ec2_launch_template_accelerator_total_memory_mi_b" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[int] = field(default=None, metadata={"description": "The minimum amount of accelerator memory, in MiB. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[int] = field(default=None, metadata={"description": "The maximum amount of accelerator memory, in MiB. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateNetworkBandwidthGbps: + kind: ClassVar[str] = "aws_ec2_launch_template_network_bandwidth_gbps" + mapping: ClassVar[Dict[str, Bender]] = {"min": S("Min"), "max": S("Max")} + min: Optional[float] = field(default=None, metadata={"description": "The minimum amount of network bandwidth, in Gbps. If this parameter is not specified, there is no minimum limit."}) # fmt: skip + max: Optional[float] = field(default=None, metadata={"description": "The maximum amount of network bandwidth, in Gbps. If this parameter is not specified, there is no maximum limit."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateInstanceRequirements: + kind: ClassVar[str] = "aws_ec2_launch_template_instance_requirements" + mapping: ClassVar[Dict[str, Bender]] = { + "v_cpu_count": S("VCpuCount") >> Bend(AwsEc2LaunchTemplateVCpuCountRange.mapping), + "memory_mi_b": S("MemoryMiB") >> Bend(AwsEc2LaunchTemplateMemoryMiB.mapping), + "cpu_manufacturers": S("CpuManufacturers", default=[]), + "memory_gi_b_per_v_cpu": S("MemoryGiBPerVCpu") >> Bend(AwsEc2LaunchTemplateMemoryGiBPerVCpu.mapping), + "excluded_instance_types": S("ExcludedInstanceTypes", default=[]), + "instance_generations": S("InstanceGenerations", default=[]), + "spot_max_price_percentage_over_lowest_price": S("SpotMaxPricePercentageOverLowestPrice"), + "on_demand_max_price_percentage_over_lowest_price": S("OnDemandMaxPricePercentageOverLowestPrice"), + "bare_metal": S("BareMetal"), + "burstable_performance": S("BurstablePerformance"), + "require_hibernate_support": S("RequireHibernateSupport"), + "network_interface_count": S("NetworkInterfaceCount") + >> Bend(AwsEc2LaunchTemplateNetworkInterfaceCount.mapping), + "local_storage": S("LocalStorage"), + "local_storage_types": S("LocalStorageTypes", default=[]), + "total_local_storage_gb": S("TotalLocalStorageGB") >> Bend(AwsEc2LaunchTemplateTotalLocalStorageGB.mapping), + "baseline_ebs_bandwidth_mbps": S("BaselineEbsBandwidthMbps") + >> Bend(AwsEc2LaunchTemplateBaselineEbsBandwidthMbps.mapping), + "accelerator_types": S("AcceleratorTypes", default=[]), + "accelerator_count": S("AcceleratorCount") >> Bend(AwsEc2LaunchTemplateAcceleratorCount.mapping), + "accelerator_manufacturers": S("AcceleratorManufacturers", default=[]), + "accelerator_names": S("AcceleratorNames", default=[]), + "accelerator_total_memory_mi_b": S("AcceleratorTotalMemoryMiB") + >> Bend(AwsEc2LaunchTemplateAcceleratorTotalMemoryMiB.mapping), + "network_bandwidth_gbps": S("NetworkBandwidthGbps") >> Bend(AwsEc2LaunchTemplateNetworkBandwidthGbps.mapping), + "allowed_instance_types": S("AllowedInstanceTypes", default=[]), + } + v_cpu_count: Optional[AwsEc2LaunchTemplateVCpuCountRange] = field(default=None, metadata={"description": "The minimum and maximum number of vCPUs."}) # fmt: skip + memory_mi_b: Optional[AwsEc2LaunchTemplateMemoryMiB] = field(default=None, metadata={"description": "The minimum and maximum amount of memory, in MiB."}) # fmt: skip + cpu_manufacturers: Optional[List[str]] = field(factory=list, metadata={"description": "The CPU manufacturers to include. For instance types with Intel CPUs, specify intel. For instance types with AMD CPUs, specify amd. For instance types with Amazon Web Services CPUs, specify amazon-web-services. Don't confuse the CPU manufacturer with the CPU architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. Default: Any manufacturer"}) # fmt: skip + memory_gi_b_per_v_cpu: Optional[AwsEc2LaunchTemplateMemoryGiBPerVCpu] = field(default=None, metadata={"description": "The minimum and maximum amount of memory per vCPU, in GiB. Default: No minimum or maximum limits"}) # fmt: skip + excluded_instance_types: Optional[List[str]] = field(factory=list, metadata={"description": "The instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk (*), to exclude an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*,Amazon EC2 will exclude the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will exclude all the M5a instance types, but not the M5n instance types. If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes. Default: No excluded instance types"}) # fmt: skip + instance_generations: Optional[List[str]] = field(factory=list, metadata={"description": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see Instance types in the Amazon EC2 User Guide. For current generation instance types, specify current. For previous generation instance types, specify previous. Default: Current and previous generation instance types"}) # fmt: skip + spot_max_price_percentage_over_lowest_price: Optional[int] = field(default=None, metadata={"description": "The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as 999999. This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. Default: 100"}) # fmt: skip + on_demand_max_price_percentage_over_lowest_price: Optional[int] = field(default=None, metadata={"description": "The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as 999999. This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. Default: 20"}) # fmt: skip + bare_metal: Optional[str] = field(default=None, metadata={"description": "Indicates whether bare metal instance types must be included, excluded, or required. To include bare metal instance types, specify included. To require only bare metal instance types, specify required. To exclude bare metal instance types, specify excluded. Default: excluded"}) # fmt: skip + burstable_performance: Optional[str] = field(default=None, metadata={"description": "Indicates whether burstable performance T instance types are included, excluded, or required. For more information, see Burstable performance instances. To include burstable performance instance types, specify included. To require only burstable performance instance types, specify required. To exclude burstable performance instance types, specify excluded. Default: excluded"}) # fmt: skip + require_hibernate_support: Optional[bool] = field(default=None, metadata={"description": "Indicates whether instance types must support hibernation for On-Demand Instances. This parameter is not supported for GetSpotPlacementScores. Default: false"}) # fmt: skip + network_interface_count: Optional[AwsEc2LaunchTemplateNetworkInterfaceCount] = field(default=None, metadata={"description": "The minimum and maximum number of network interfaces. Default: No minimum or maximum limits"}) # fmt: skip + local_storage: Optional[str] = field(default=None, metadata={"description": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, Amazon EC2 instance store in the Amazon EC2 User Guide. To include instance types with instance store volumes, specify included. To require only instance types with instance store volumes, specify required. To exclude instance types with instance store volumes, specify excluded. Default: included"}) # fmt: skip + local_storage_types: Optional[List[str]] = field(factory=list, metadata={"description": "The type of local storage that is required. For instance types with hard disk drive (HDD) storage, specify hdd. For instance types with solid state drive (SSD) storage, specify ssd. Default: hdd and ssd"}) # fmt: skip + total_local_storage_gb: Optional[AwsEc2LaunchTemplateTotalLocalStorageGB] = field(default=None, metadata={"description": "The minimum and maximum amount of total local storage, in GB. Default: No minimum or maximum limits"}) # fmt: skip + baseline_ebs_bandwidth_mbps: Optional[AwsEc2LaunchTemplateBaselineEbsBandwidthMbps] = field(default=None, metadata={"description": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see Amazon EBS–optimized instances in the Amazon EC2 User Guide. Default: No minimum or maximum limits"}) # fmt: skip + accelerator_types: Optional[List[str]] = field(factory=list, metadata={"description": "The accelerator types that must be on the instance type. For instance types with GPU accelerators, specify gpu. For instance types with FPGA accelerators, specify fpga. For instance types with inference accelerators, specify inference. Default: Any accelerator type"}) # fmt: skip + accelerator_count: Optional[AwsEc2LaunchTemplateAcceleratorCount] = field(default=None, metadata={"description": "The minimum and maximum number of accelerators (GPUs, FPGAs, or Amazon Web Services Inferentia chips) on an instance. To exclude accelerator-enabled instance types, set Max to 0. Default: No minimum or maximum limits"}) # fmt: skip + accelerator_manufacturers: Optional[List[str]] = field(factory=list, metadata={"description": "Indicates whether instance types must have accelerators by specific manufacturers. For instance types with Amazon Web Services devices, specify amazon-web-services. For instance types with AMD devices, specify amd. For instance types with Habana devices, specify habana. For instance types with NVIDIA devices, specify nvidia. For instance types with Xilinx devices, specify xilinx. Default: Any manufacturer"}) # fmt: skip + accelerator_names: Optional[List[str]] = field(factory=list, metadata={"description": "The accelerators that must be on the instance type. For instance types with NVIDIA A10G GPUs, specify a10g. For instance types with NVIDIA A100 GPUs, specify a100. For instance types with NVIDIA H100 GPUs, specify h100. For instance types with Amazon Web Services Inferentia chips, specify inferentia. For instance types with NVIDIA GRID K520 GPUs, specify k520. For instance types with NVIDIA K80 GPUs, specify k80. For instance types with NVIDIA M60 GPUs, specify m60. For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520. For instance types with NVIDIA T4 GPUs, specify t4. For instance types with NVIDIA T4G GPUs, specify t4g. For instance types with Xilinx VU9P FPGAs, specify vu9p. For instance types with NVIDIA V100 GPUs, specify v100. Default: Any accelerator"}) # fmt: skip + accelerator_total_memory_mi_b: Optional[AwsEc2LaunchTemplateAcceleratorTotalMemoryMiB] = field(default=None, metadata={"description": "The minimum and maximum amount of total accelerator memory, in MiB. Default: No minimum or maximum limits"}) # fmt: skip + network_bandwidth_gbps: Optional[AwsEc2LaunchTemplateNetworkBandwidthGbps] = field(default=None, metadata={"description": "The minimum and maximum amount of network bandwidth, in gigabits per second (Gbps). Default: No minimum or maximum limits"}) # fmt: skip + allowed_instance_types: Optional[List[str]] = field(factory=list, metadata={"description": "The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes. You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*. For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types. If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes. Default: All instance types"}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplatePrivateDnsNameOptions: + kind: ClassVar[str] = "aws_ec2_launch_template_private_dns_name_options" + mapping: ClassVar[Dict[str, Bender]] = { + "hostname_type": S("HostnameType"), + "enable_resource_name_dns_a_record": S("EnableResourceNameDnsARecord"), + "enable_resource_name_dns_aaaa_record": S("EnableResourceNameDnsAAAARecord"), + } + hostname_type: Optional[str] = field(default=None, metadata={"description": "The type of hostname to assign to an instance."}) # fmt: skip + enable_resource_name_dns_a_record: Optional[bool] = field(default=None, metadata={"description": "Indicates whether to respond to DNS queries for instance hostnames with DNS A records."}) # fmt: skip + enable_resource_name_dns_aaaa_record: Optional[bool] = field(default=None, metadata={"description": "Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplateData: + kind: ClassVar[str] = "aws_ec2_launch_template_data" + mapping: ClassVar[Dict[str, Bender]] = { + "kernel_id": S("KernelId"), + "ebs_optimized": S("EbsOptimized"), + "iam_instance_profile": S("IamInstanceProfile") + >> Bend(AwsEc2LaunchTemplateIamInstanceProfileSpecification.mapping), + "block_device_mappings": S("BlockDeviceMappings", default=[]) + >> ForallBend(AwsEc2LaunchTemplateBlockDeviceMapping.mapping), + "network_interfaces": S("NetworkInterfaces", default=[]) + >> ForallBend(AwsEc2LaunchTemplateInstanceNetworkInterfaceSpecification.mapping), + "image_id": S("ImageId"), + "instance_type": S("InstanceType"), + "key_name": S("KeyName"), + "monitoring": S("Monitoring", "Enabled"), + "placement": S("Placement") >> Bend(AwsEc2LaunchTemplatePlacement.mapping), + "ram_disk_id": S("RamDiskId"), + "disable_api_termination": S("DisableApiTermination"), + "instance_initiated_shutdown_behavior": S("InstanceInitiatedShutdownBehavior"), + "user_data": S("UserData"), + "tag_specifications": S("TagSpecifications", default=[]) + >> ForallBend(AwsEc2LaunchTemplateTagSpecification.mapping), + "elastic_gpu_specifications": S("ElasticGpuSpecifications", default=[]) >> ForallBend(S("Type")), + "elastic_inference_accelerators": S("ElasticInferenceAccelerators", default=[]) + >> ForallBend(AwsEc2LaunchTemplateElasticInferenceAcceleratorResponse.mapping), + "security_group_ids": S("SecurityGroupIds", default=[]), + "security_groups": S("SecurityGroups", default=[]), + "instance_market_options": S("InstanceMarketOptions") + >> Bend(AwsEc2LaunchTemplateInstanceMarketOptions.mapping), + "credit_specification": S("CreditSpecification", "CpuCredits"), + "cpu_options": S("CpuOptions") >> Bend(AwsEc2LaunchTemplateCpuOptions.mapping), + "capacity_reservation_specification": S("CapacityReservationSpecification") + >> Bend(AwsEc2LaunchTemplateCapacityReservationSpecificationResponse.mapping), + "license_specifications": S("LicenseSpecifications", default=[]) >> ForallBend(S("LicenseConfigurationArn")), + "hibernation_options": S("HibernationOptions", "Configured"), + "metadata_options": S("MetadataOptions") >> Bend(AwsEc2LaunchTemplateInstanceMetadataOptions.mapping), + "enclave_options": S("EnclaveOptions", "Enabled"), + "instance_requirements": S("InstanceRequirements") >> Bend(AwsEc2LaunchTemplateInstanceRequirements.mapping), + "private_dns_name_options": S("PrivateDnsNameOptions") + >> Bend(AwsEc2LaunchTemplatePrivateDnsNameOptions.mapping), + "maintenance_options": S("MaintenanceOptions", "AutoRecovery"), + "disable_api_stop": S("DisableApiStop"), + } + kernel_id: Optional[str] = field(default=None, metadata={"description": "The ID of the kernel, if applicable."}) # fmt: skip + ebs_optimized: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the instance is optimized for Amazon EBS I/O."}) # fmt: skip + iam_instance_profile: Optional[AwsEc2LaunchTemplateIamInstanceProfileSpecification] = field(default=None, metadata={"description": "The IAM instance profile."}) # fmt: skip + block_device_mappings: Optional[List[AwsEc2LaunchTemplateBlockDeviceMapping]] = field(factory=list, metadata={"description": "The block device mappings."}) # fmt: skip + network_interfaces: Optional[List[AwsEc2LaunchTemplateInstanceNetworkInterfaceSpecification]] = field(factory=list, metadata={"description": "The network interfaces."}) # fmt: skip + image_id: Optional[str] = field(default=None, metadata={"description": "The ID of the AMI or a Systems Manager parameter. The Systems Manager parameter will resolve to the ID of the AMI at instance launch. The value depends on what you specified in the request. The possible values are: If an AMI ID was specified in the request, then this is the AMI ID. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as true, then this is the AMI ID that the parameter is mapped to in the Parameter Store. If a Systems Manager parameter was specified in the request, and ResolveAlias was configured as false, then this is the parameter value. For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + instance_type: Optional[str] = field(default=None, metadata={"description": "The instance type."}) # fmt: skip + key_name: Optional[str] = field(default=None, metadata={"description": "The name of the key pair."}) # fmt: skip + monitoring: Optional[bool] = field(default=None, metadata={"description": "The monitoring for the instance."}) # fmt: skip + placement: Optional[AwsEc2LaunchTemplatePlacement] = field(default=None, metadata={"description": "The placement of the instance."}) # fmt: skip + ram_disk_id: Optional[str] = field(default=None, metadata={"description": "The ID of the RAM disk, if applicable."}) # fmt: skip + disable_api_termination: Optional[bool] = field(default=None, metadata={"description": "If set to true, indicates that the instance cannot be terminated using the Amazon EC2 console, command line tool, or API."}) # fmt: skip + instance_initiated_shutdown_behavior: Optional[str] = field(default=None, metadata={"description": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown)."}) # fmt: skip + user_data: Optional[str] = field(default=None, metadata={"description": "The user data for the instance."}) # fmt: skip + tag_specifications: Optional[List[AwsEc2LaunchTemplateTagSpecification]] = field(factory=list, metadata={"description": "The tags that are applied to the resources that are created during instance launch."}) # fmt: skip + elastic_gpu_specifications: Optional[List[str]] = field(factory=list, metadata={"description": "The elastic GPU specification."}) # fmt: skip + elastic_inference_accelerators: Optional[List[AwsEc2LaunchTemplateElasticInferenceAcceleratorResponse]] = field(factory=list, metadata={"description": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads. You cannot specify accelerators from different generations in the same request. Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service."}) # fmt: skip + security_group_ids: Optional[List[str]] = field(factory=list, metadata={"description": "The security group IDs."}) # fmt: skip + security_groups: Optional[List[str]] = field(factory=list, metadata={"description": "The security group names."}) # fmt: skip + instance_market_options: Optional[AwsEc2LaunchTemplateInstanceMarketOptions] = field(default=None, metadata={"description": "The market (purchasing) option for the instances."}) # fmt: skip + credit_specification: Optional[str] = field(default=None, metadata={"description": "The credit option for CPU usage of the instance."}) # fmt: skip + cpu_options: Optional[AwsEc2LaunchTemplateCpuOptions] = field(default=None, metadata={"description": "The CPU options for the instance. For more information, see Optimizing CPU options in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + capacity_reservation_specification: Optional[AwsEc2LaunchTemplateCapacityReservationSpecificationResponse] = field(default=None, metadata={"description": "Information about the Capacity Reservation targeting option."}) # fmt: skip + license_specifications: Optional[List[str]] = field(factory=list, metadata={"description": "The license configurations."}) # fmt: skip + hibernation_options: Optional[bool] = field(default=None, metadata={"description": "Indicates whether an instance is configured for hibernation. For more information, see Hibernate your instance in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + metadata_options: Optional[AwsEc2LaunchTemplateInstanceMetadataOptions] = field(default=None, metadata={"description": "The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + enclave_options: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves."}) # fmt: skip + instance_requirements: Optional[AwsEc2LaunchTemplateInstanceRequirements] = field(default=None, metadata={"description": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes. If you specify InstanceRequirements, you can't specify InstanceTypes."}) # fmt: skip + private_dns_name_options: Optional[AwsEc2LaunchTemplatePrivateDnsNameOptions] = field(default=None, metadata={"description": "The options for the instance hostname."}) # fmt: skip + maintenance_options: Optional[str] = field(default=None, metadata={"description": "The maintenance options for your instance."}) # fmt: skip + disable_api_stop: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the instance is enabled for stop protection. For more information, see Stop protection in the Amazon Elastic Compute Cloud User Guide."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsEc2LaunchTemplate(EC2Taggable, AwsResource): + kind: ClassVar[str] = "aws_ec2_launch_template" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec( + "ec2", "describe-launch-template-versions", "LaunchTemplateVersions", {"Versions": ["$Default", "$Latest"]} + ) + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("LaunchTemplateId"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("LaunchTemplateName"), + "ctime": S("CreateTime"), + "version_number": S("VersionNumber"), + "version_description": S("VersionDescription"), + "created_by": S("CreatedBy"), + "is_default_version": S("DefaultVersion"), + "launch_template_data": S("LaunchTemplateData") >> Bend(AwsEc2LaunchTemplateData.mapping), + } + version_number: Optional[int] = field(default=None, metadata={"description": "The version number."}) # fmt: skip + version_description: Optional[str] = field(default=None, metadata={"description": "The description for the version."}) # fmt: skip + created_by: Optional[str] = field(default=None, metadata={"description": "The principal that created the version."}) # fmt: skip + is_default_version: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the version is the default version."}) # fmt: skip + launch_template_data: Optional[AwsEc2LaunchTemplateData] = field(default=None, metadata={"description": "Information about the launch template."}) # fmt: skip + + # endregion resources: List[Type[AwsResource]] = [ @@ -3204,7 +3829,9 @@ def delete_resource(self, client: AwsClient, graph: Graph) -> bool: AwsEc2Host, AwsEc2Instance, AwsEc2InternetGateway, + AwsEc2Image, AwsEc2KeyPair, + AwsEc2LaunchTemplate, AwsEc2NatGateway, AwsEc2NetworkAcl, AwsEc2NetworkInterface, diff --git a/plugins/aws/resoto_plugin_aws/resource/ecr.py b/plugins/aws/resoto_plugin_aws/resource/ecr.py index a15777ad3d..1238206d13 100644 --- a/plugins/aws/resoto_plugin_aws/resource/ecr.py +++ b/plugins/aws/resoto_plugin_aws/resource/ecr.py @@ -1,12 +1,17 @@ +import json +import logging from typing import ClassVar, Dict, Optional, List, Type from attrs import define, field +from boto3.exceptions import Boto3Error -from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec +from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder from resoto_plugin_aws.utils import ToDict from resotolib.json_bender import Bender, S, Bend +from resotolib.types import Json -service_name = "ecs" +service_name = "ecr" +log = logging.getLogger("resoto.plugins.aws") @define(eq=False, slots=False) @@ -21,6 +26,7 @@ class AwsEcrEncryptionConfiguration: class AwsEcrRepository(AwsResource): kind: ClassVar[str] = "aws_ecr_repository" api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("ecr", "describe-repositories", "repositories") + public_spec: ClassVar[AwsApiSpec] = AwsApiSpec("ecr-public", "describe-repositories", "repositories") mapping: ClassVar[Dict[str, Bender]] = { "id": S("repositoryName"), "tags": S("Tags", default=[]) >> ToDict(), @@ -39,6 +45,52 @@ class AwsEcrRepository(AwsResource): image_tag_mutability: Optional[str] = field(default=None, metadata={"description": "The tag mutability setting for the repository."}) # fmt: skip image_scan_on_push: Optional[bool] = field(default=None, metadata={"description": "The image is scanned on every push."}) # fmt: skip encryption_configuration: Optional[AwsEcrEncryptionConfiguration] = field(default=None, metadata={"description": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest."}) # fmt: skip + repository_visibility: Optional[str] = field(default=None, metadata={"description": "The repository is either public or private."}) # fmt: skip + lifecycle_policy: Optional[Json] = field(default=None, metadata={"description": "The repository lifecycle policy."}) # fmt: skip + + @classmethod + def collect_resources(cls, builder: GraphBuilder) -> None: + def fetch_lifecycle_policy(repository: AwsEcrRepository) -> None: + with builder.suppress(f"{service_name}.get-lifecycle-policy"): + if policy := builder.client.get( + service_name, + "get-lifecycle-policy", + repositoryName=repository.name, + expected_errors=["LifecyclePolicyNotFoundException"], + ): + repository.lifecycle_policy = json.loads(policy["lifecyclePolicyText"]) + + def collect(visibility: str, spec: AwsApiSpec) -> None: + try: + kwargs = spec.parameter or {} + items = builder.client.list( + aws_service=spec.service, + action=spec.api_action, + result_name=spec.result_property, + expected_errors=spec.expected_errors, + **kwargs, + ) + for js in items: + if instance := cls.from_api(js, builder): + instance.repository_visibility = visibility + builder.submit_work(service_name, fetch_lifecycle_policy, instance) + builder.add_node(instance, js) + except Boto3Error as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + # collect private and public repositories + collect("private", cls.api_spec) + collect("public", cls.public_spec) + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [cls.api_spec, cls.public_spec, AwsApiSpec("ecr", "get-lifecycle-policy", None)] # @define(eq=False, slots=False) diff --git a/plugins/aws/resoto_plugin_aws/resource/efs.py b/plugins/aws/resoto_plugin_aws/resource/efs.py index e37a2eafae..652a021347 100644 --- a/plugins/aws/resoto_plugin_aws/resource/efs.py +++ b/plugins/aws/resoto_plugin_aws/resource/efs.py @@ -1,3 +1,4 @@ +import json from typing import Optional, ClassVar, Dict, List, Type import math @@ -110,6 +111,7 @@ class AwsEfsFileSystem(EfsTaggable, AwsResource, BaseNetworkShare): throughput_mode: Optional[str] = field(default=None) provisioned_throughput_in_mibps: Optional[float] = field(default=None) availability_zone_name: Optional[str] = field(default=None) + file_system_policy: Optional[Json] = field(default=None) @classmethod def called_collect_apis(cls) -> List[AwsApiSpec]: @@ -118,10 +120,15 @@ def called_collect_apis(cls) -> List[AwsApiSpec]: AwsApiSpec( service_name, "describe-mount-targets", override_iam_permission="elasticfilesystem:DescribeMountTargets" ), + AwsApiSpec( + service_name, + "describe-file-system-policy", + override_iam_permission="elasticfilesystem:DescribeFileSystemPolicy", + ), ] @classmethod - def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None: + def collect(cls: Type[AwsResource], js_list: List[Json], builder: GraphBuilder) -> None: def collect_mount_points(fs: AwsEfsFileSystem) -> None: for mt_raw in builder.client.list( service_name, "describe-mount-targets", "MountTargets", FileSystemId=fs.id @@ -130,10 +137,21 @@ def collect_mount_points(fs: AwsEfsFileSystem) -> None: builder.add_node(mt, mt_raw) builder.add_edge(fs, node=mt) - for js in json: + def fetch_file_system_policy(fs: AwsEfsFileSystem) -> None: + with builder.suppress("describe-file-system-policy"): + if policy := builder.client.get( + service_name, + "describe-file-system-policy", + FileSystemId=fs.id, + expected_errors=["PolicyNotFound"], + ): + fs.file_system_policy = json.loads(policy["Policy"]) + + for js in js_list: if instance := cls.from_api(js, builder): builder.add_node(instance, js) builder.submit_work(service_name, collect_mount_points, instance) + builder.submit_work(service_name, fetch_file_system_policy, instance) def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: if kms_key_id := source.get("KmsKeyId"): diff --git a/plugins/aws/resoto_plugin_aws/resource/elb.py b/plugins/aws/resoto_plugin_aws/resource/elb.py index fad34b5289..784db80d7c 100644 --- a/plugins/aws/resoto_plugin_aws/resource/elb.py +++ b/plugins/aws/resoto_plugin_aws/resource/elb.py @@ -2,7 +2,7 @@ from attrs import define, field -from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec +from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec, parse_json from resoto_plugin_aws.resource.ec2 import AwsEc2Subnet, AwsEc2SecurityGroup, AwsEc2Vpc, AwsEc2Instance from resoto_plugin_aws.resource.cloudwatch import AwsCloudwatchQuery, AwsCloudwatchMetricData, update_resource_metrics from resoto_plugin_aws.utils import ToDict, MetricNormalization @@ -197,6 +197,54 @@ class AwsElbSourceSecurityGroup: group_name: Optional[str] = field(default=None) +@define(eq=False, slots=False) +class AwsElbAccessLog: + kind: ClassVar[str] = "aws_elb_access_log" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "s3_bucket_name": S("S3BucketName"), + "emit_interval": S("EmitInterval"), + "s3_bucket_prefix": S("S3BucketPrefix"), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "Specifies whether access logs are enabled for the load balancer."}) # fmt: skip + s3_bucket_name: Optional[str] = field(default=None, metadata={"description": "The name of the Amazon S3 bucket where the access logs are stored."}) # fmt: skip + emit_interval: Optional[int] = field(default=None, metadata={"description": "The interval for publishing the access logs. You can specify an interval of either 5 minutes or 60 minutes. Default: 60 minutes"}) # fmt: skip + s3_bucket_prefix: Optional[str] = field(default=None, metadata={"description": "The logical hierarchy you created for your Amazon S3 bucket, for example my-bucket-prefix/prod. If the prefix is not provided, the log is placed at the root level of the bucket."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsElbConnectionDraining: + kind: ClassVar[str] = "aws_elb_connection_draining" + mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("Enabled"), "timeout": S("Timeout")} + enabled: Optional[bool] = field(default=None, metadata={"description": "Specifies whether connection draining is enabled for the load balancer."}) # fmt: skip + timeout: Optional[int] = field(default=None, metadata={"description": "The maximum time, in seconds, to keep the existing connections open before deregistering the instances."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsElbAdditionalAttribute: + kind: ClassVar[str] = "aws_elb_additional_attribute" + mapping: ClassVar[Dict[str, Bender]] = {"key": S("Key"), "value": S("Value")} + key: Optional[str] = field(default=None, metadata={"description": "The name of the attribute. The following attribute is supported. elb.http.desyncmitigationmode - Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor, defensive, and strictest. The default is defensive."}) # fmt: skip + value: Optional[str] = field(default=None, metadata={"description": "This value of the attribute."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsElbLoadBalancerAttributes: + kind: ClassVar[str] = "aws_elb_load_balancer_attributes" + mapping: ClassVar[Dict[str, Bender]] = { + "cross_zone_load_balancing": S("CrossZoneLoadBalancing", "Enabled"), + "access_log": S("AccessLog") >> Bend(AwsElbAccessLog.mapping), + "connection_draining": S("ConnectionDraining") >> Bend(AwsElbConnectionDraining.mapping), + "connection_settings": S("ConnectionSettings", "IdleTimeout"), + "additional_attributes": S("AdditionalAttributes", default=[]) >> ForallBend(AwsElbAdditionalAttribute.mapping), + } + cross_zone_load_balancing: Optional[bool] = field(default=None, metadata={"description": "If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones. For more information, see Configure Cross-Zone Load Balancing in the Classic Load Balancers Guide."}) # fmt: skip + access_log: Optional[AwsElbAccessLog] = field(default=None, metadata={"description": "If enabled, the load balancer captures detailed information of all requests and delivers the information to the Amazon S3 bucket that you specify. For more information, see Enable Access Logs in the Classic Load Balancers Guide."}) # fmt: skip + connection_draining: Optional[AwsElbConnectionDraining] = field(default=None, metadata={"description": "If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance. For more information, see Configure Connection Draining in the Classic Load Balancers Guide."}) # fmt: skip + connection_settings: Optional[int] = field(default=None, metadata={"description": "If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration. By default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see Configure Idle Connection Timeout in the Classic Load Balancers Guide."}) # fmt: skip + additional_attributes: Optional[List[AwsElbAdditionalAttribute]] = field(factory=list, metadata={"description": "Any additional attributes."}) # fmt: skip + + @define(eq=False, slots=False) class AwsElb(ElbTaggable, AwsResource, BaseLoadBalancer): kind: ClassVar[str] = "aws_elb" @@ -249,20 +297,38 @@ class AwsElb(ElbTaggable, AwsResource, BaseLoadBalancer): elb_availability_zones: List[str] = field(factory=list) elb_health_check: Optional[AwsElbHealthCheck] = field(default=None) elb_source_security_group: Optional[AwsElbSourceSecurityGroup] = field(default=None) + elb_attributes: Optional[AwsElbLoadBalancerAttributes] = field(default=None) @classmethod def called_collect_apis(cls) -> List[AwsApiSpec]: return [ cls.api_spec, AwsApiSpec( - cls.api_spec.service, + service_name, "describe-tags", override_iam_permission="elasticloadbalancing:DescribeTags", ), + AwsApiSpec( + service_name, + "describe-load-balancer-attributes", + override_iam_permission="elasticloadbalancing:DescribeLoadBalancerAttributes", + ), ] @classmethod def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None: + def fetch_attributes(elb: AwsElb) -> None: + if attributes := builder.client.get( + service_name, + "describe-load-balancer-attributes", + "LoadBalancerAttributes", + LoadBalancerName=elb.name, + expected_errors=["LoadBalancerNotFound"], + ): + elb.elb_attributes = parse_json( + attributes, AwsElbLoadBalancerAttributes, builder, AwsElbAdditionalAttribute.mapping + ) + def add_tags(elb: AwsElb) -> None: tags = builder.client.list( service_name, @@ -278,6 +344,7 @@ def add_tags(elb: AwsElb) -> None: if instance := cls.from_api(js, builder): builder.add_node(instance, js) builder.submit_work(service_name, add_tags, instance) + builder.submit_work(service_name, fetch_attributes, instance) @classmethod def collect_usage_metrics(cls: Type[AwsResource], builder: GraphBuilder) -> None: diff --git a/plugins/aws/resoto_plugin_aws/resource/glacier.py b/plugins/aws/resoto_plugin_aws/resource/glacier.py index a142ccc477..20b60e8def 100644 --- a/plugins/aws/resoto_plugin_aws/resource/glacier.py +++ b/plugins/aws/resoto_plugin_aws/resource/glacier.py @@ -1,3 +1,4 @@ +import json from typing import ClassVar, Dict, List, Optional, Type from attrs import define, field @@ -229,6 +230,7 @@ class AwsGlacierVault(AwsResource): glacier_last_inventory_date: Optional[str] = field(default=None) glacier_number_of_archives: Optional[int] = field(default=None) glacier_size_in_bytes: Optional[int] = field(default=None) + glacier_access_policy: Optional[Json] = field(default=None) @classmethod def called_collect_apis(cls) -> List[AwsApiSpec]: @@ -239,16 +241,28 @@ def called_collect_apis(cls) -> List[AwsApiSpec]: ] @classmethod - def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None: + def collect(cls: Type[AwsResource], source: List[Json], builder: GraphBuilder) -> None: def add_tags(vault: AwsGlacierVault) -> None: tags = builder.client.get(service_name, "list-tags-for-vault", "Tags", vaultName=vault.name) if tags: vault.tags = tags - for vault in json: + def access_policy(vault: AwsGlacierVault) -> None: + response = builder.client.get( + service_name, + "get-vault-access-policy", + "policy", + vaultName=vault.name, + expected_errors=["ResourceNotFoundException"], + ) + if response and (policy_string := response.get("Policy")): + vault.glacier_access_policy = json.loads(policy_string) + + for vault in source: if vault_instance := cls.from_api(vault, builder): builder.add_node(vault_instance, vault) builder.submit_work(service_name, add_tags, vault_instance) + builder.submit_work(service_name, access_policy, vault_instance) for job in builder.client.list(service_name, "list-jobs", "JobList", vaultName=vault_instance.name): if job_instance := AwsGlacierJob.from_api(job, builder): builder.add_node(job_instance, job) diff --git a/plugins/aws/resoto_plugin_aws/resource/kms.py b/plugins/aws/resoto_plugin_aws/resource/kms.py index 769aa62a11..e9d589dbb6 100644 --- a/plugins/aws/resoto_plugin_aws/resource/kms.py +++ b/plugins/aws/resoto_plugin_aws/resource/kms.py @@ -1,4 +1,4 @@ -from contextlib import suppress +import json from typing import ClassVar, Dict, List, Optional, Type from attrs import define, field from resoto_plugin_aws.aws_client import AwsClient @@ -116,13 +116,20 @@ class AwsKmsKey(AwsResource, BaseAccessKey): kms_pending_deletion_window_in_days: Optional[int] = field(default=None) kms_mac_algorithms: List[str] = field(factory=list) kms_key_rotation_enabled: Optional[bool] = field(default=None) + kms_key_policy: Optional[Json] = field(default=None) @classmethod def called_collect_apis(cls) -> List[AwsApiSpec]: - return [cls.api_spec, AwsApiSpec(service_name, "describe-key"), AwsApiSpec(service_name, "list-resource-tags")] + return [ + cls.api_spec, + AwsApiSpec(service_name, "describe-key"), + AwsApiSpec(service_name, "list-resource-tags"), + AwsApiSpec(service_name, "get-key-policy"), + AwsApiSpec(service_name, "get-key-rotation-status"), + ] @classmethod - def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None: + def collect(cls: Type[AwsResource], js_list: List[Json], builder: GraphBuilder) -> None: def add_instance(key: Dict[str, str]) -> None: key_metadata = builder.client.get( service_name, "describe-key", result_name="KeyMetadata", KeyId=key["KeyId"] @@ -131,11 +138,25 @@ def add_instance(key: Dict[str, str]) -> None: if instance := AwsKmsKey.from_api(key_metadata, builder): builder.add_node(instance) builder.submit_work(service_name, add_tags, instance) + builder.submit_work(service_name, fetch_key_policy, instance) if instance.kms_key_manager == "CUSTOMER" and instance.access_key_status == "Enabled": builder.submit_work(service_name, add_rotation_status, instance) + def fetch_key_policy(key: AwsKmsKey) -> None: + with builder.suppress(f"{service_name}.get-key-policy"): + key_policy: Optional[str] = builder.client.get( # type: ignore + service_name, + "get-key-policy", + result_name="Policy", + KeyId=key.id, + PolicyName="default", + expected_errors=["NotFoundException"], + ) + if key_policy is not None: + key.kms_key_policy = json.loads(key_policy) + def add_rotation_status(key: AwsKmsKey) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-key-rotation-status"): key.kms_key_rotation_enabled = builder.client.get( # type: ignore service_name, "get-key-rotation-status", result_name="KeyRotationEnabled", KeyId=key.id ) @@ -151,8 +172,8 @@ def add_tags(key: AwsKmsKey) -> None: if tags: key.tags = bend(ToDict(key="TagKey", value="TagValue"), tags) - for js in json: - add_instance(js) + for js in js_list: + builder.submit_work(service_name, add_instance, js) def update_resource_tag(self, client: AwsClient, key: str, value: str) -> bool: client.call( diff --git a/plugins/aws/resoto_plugin_aws/resource/lambda_.py b/plugins/aws/resoto_plugin_aws/resource/lambda_.py index 2d1111706f..7b9ffb397d 100644 --- a/plugins/aws/resoto_plugin_aws/resource/lambda_.py +++ b/plugins/aws/resoto_plugin_aws/resource/lambda_.py @@ -78,8 +78,8 @@ class AwsLambdaEnvironmentError: @define(eq=False, slots=False) -class AwsLambdaEnvironmentResponse: - kind: ClassVar[str] = "aws_lambda_environment_response" +class AwsLambdaEnvironment: + kind: ClassVar[str] = "aws_lambda_environment" kind_display: ClassVar[str] = "AWS Lambda Environment Response" kind_description: ClassVar[str] = ( "The AWS Lambda Environment Response provides information about the environment variables configured" @@ -242,12 +242,12 @@ class AwsLambdaFunction(AwsResource, BaseServerlessFunction): "aws_vpc", "aws_ec2_subnet", "aws_ec2_security_group", - "aws_api_gateway_rest_api", - "aws_api_gateway_resource", + "aws_apigateway_rest_api", + "aws_apigateway_resource", ], "delete": ["aws_vpc", "aws_ec2_subnet", "aws_ec2_security_group", "aws_kms_key"], }, - "successors": {"default": ["aws_kms_key"], "delete": ["aws_api_gateway_rest_api", "aws_api_gateway_resource"]}, + "successors": {"default": ["aws_kms_key"], "delete": ["aws_apigateway_rest_api", "aws_apigateway_resource"]}, } mapping: ClassVar[Dict[str, Bender]] = { "id": S("FunctionName"), @@ -264,7 +264,7 @@ class AwsLambdaFunction(AwsResource, BaseServerlessFunction): "function_code_sha256": S("CodeSha256"), "function_version": S("Version"), "function_dead_letter_config": S("DeadLetterConfig", "TargetArn"), - "function_environment": S("Environment") >> Bend(AwsLambdaEnvironmentResponse.mapping), + "function_environment": S("Environment") >> Bend(AwsLambdaEnvironment.mapping), "function_kms_key_arn": S("KMSKeyArn"), "function_tracing_config": S("TracingConfig", "Mode"), "function_master_arn": S("MasterArn"), @@ -295,7 +295,7 @@ class AwsLambdaFunction(AwsResource, BaseServerlessFunction): function_code_sha256: Optional[str] = field(default=None) function_version: Optional[str] = field(default=None) function_dead_letter_config: Optional[str] = field(default=None) - function_environment: Optional[AwsLambdaEnvironmentResponse] = field(default=None) + function_environment: Optional[AwsLambdaEnvironment] = field(default=None) function_kms_key_arn: Optional[str] = field(default=None) function_tracing_config: Optional[str] = field(default=None) function_master_arn: Optional[str] = field(default=None) diff --git a/plugins/aws/resoto_plugin_aws/resource/opensearch.py b/plugins/aws/resoto_plugin_aws/resource/opensearch.py new file mode 100644 index 0000000000..0ab7416107 --- /dev/null +++ b/plugins/aws/resoto_plugin_aws/resource/opensearch.py @@ -0,0 +1,359 @@ +import logging +from datetime import datetime +from typing import ClassVar, Dict, Optional, List, Type + +from attrs import define, field +from boto3.exceptions import Boto3Error + +from resoto_plugin_aws.resource.base import AwsResource, GraphBuilder, AwsApiSpec +from resoto_plugin_aws.resource.cognito import AwsCognitoUserPool +from resoto_plugin_aws.resource.ec2 import AwsEc2Subnet, AwsEc2SecurityGroup, AwsEc2Vpc, AwsEc2InstanceType +from resoto_plugin_aws.utils import ToDict +from resotolib.json_bender import Bender, S, Bend, ParseJson +from resotolib.types import Json + +log = logging.getLogger("resoto.plugins.aws") + +service_name = "opensearch" + + +@define(eq=False, slots=False) +class AwsOpenSearchClusterConfig: + kind: ClassVar[str] = "aws_opensearch_cluster_config" + mapping: ClassVar[Dict[str, Bender]] = { + "instance_type": S("InstanceType"), + "instance_count": S("InstanceCount"), + "dedicated_master_enabled": S("DedicatedMasterEnabled"), + "zone_awareness_enabled": S("ZoneAwarenessEnabled"), + "zone_awareness_config": S("ZoneAwarenessConfig", "AvailabilityZoneCount"), + "dedicated_master_type": S("DedicatedMasterType"), + "dedicated_master_count": S("DedicatedMasterCount"), + "warm_enabled": S("WarmEnabled"), + "warm_type": S("WarmType"), + "warm_count": S("WarmCount"), + "cold_storage_options": S("ColdStorageOptions", "Enabled"), + "multi_az_with_standby_enabled": S("MultiAZWithStandbyEnabled"), + } + instance_type: Optional[str] = field(default=None, metadata={"description": "Instance type of data nodes in the cluster."}) # fmt: skip + instance_count: Optional[int] = field(default=None, metadata={"description": "Number of data nodes in the cluster. This number must be greater than 1, otherwise you receive a validation exception."}) # fmt: skip + dedicated_master_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether dedicated master nodes are enabled for the cluster.True if the cluster will use a dedicated master node.False if the cluster will not."}) # fmt: skip + zone_awareness_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether multiple Availability Zones are enabled. For more information, see Configuring a multi-AZ domain in Amazon OpenSearch Service."}) # fmt: skip + zone_awareness_config: Optional[int] = field(default=None, metadata={"description": "Container for zone awareness configuration options. Only required if ZoneAwarenessEnabled is true."}) # fmt: skip + dedicated_master_type: Optional[str] = field(default=None, metadata={"description": "OpenSearch Service instance type of the dedicated master nodes in the cluster."}) # fmt: skip + dedicated_master_count: Optional[int] = field(default=None, metadata={"description": "Number of dedicated master nodes in the cluster. This number must be greater than 2 and not 4, otherwise you receive a validation exception."}) # fmt: skip + warm_enabled: Optional[bool] = field(default=None, metadata={"description": "Whether to enable warm storage for the cluster."}) # fmt: skip + warm_type: Optional[str] = field(default=None, metadata={"description": "The instance type for the cluster's warm nodes."}) # fmt: skip + warm_count: Optional[int] = field(default=None, metadata={"description": "The number of warm nodes in the cluster."}) # fmt: skip + cold_storage_options: Optional[bool] = field(default=None, metadata={"description": "Container for cold storage configuration options."}) # fmt: skip + multi_az_with_standby_enabled: Optional[bool] = field(default=None, metadata={"description": "A boolean that indicates whether a multi-AZ domain is turned on with a standby AZ. For more information, see Configuring a multi-AZ domain in Amazon OpenSearch Service."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchEBSOptions: + kind: ClassVar[str] = "aws_opensearch_ebs_options" + mapping: ClassVar[Dict[str, Bender]] = { + "ebs_enabled": S("EBSEnabled"), + "volume_type": S("VolumeType"), + "volume_size": S("VolumeSize"), + "iops": S("Iops"), + "throughput": S("Throughput"), + } + ebs_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether EBS volumes are attached to data nodes in an OpenSearch Service domain."}) # fmt: skip + volume_type: Optional[str] = field(default=None, metadata={"description": "Specifies the type of EBS volumes attached to data nodes."}) # fmt: skip + volume_size: Optional[int] = field(default=None, metadata={"description": "Specifies the size (in GiB) of EBS volumes attached to data nodes."}) # fmt: skip + iops: Optional[int] = field(default=None, metadata={"description": "Specifies the baseline input/output (I/O) performance of EBS volumes attached to data nodes. Applicable only for the gp3 and provisioned IOPS EBS volume types."}) # fmt: skip + throughput: Optional[int] = field(default=None, metadata={"description": "Specifies the throughput (in MiB/s) of the EBS volumes attached to data nodes. Applicable only for the gp3 volume type."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchVPCDerivedInfo: + kind: ClassVar[str] = "aws_opensearch_vpc_derived_info" + mapping: ClassVar[Dict[str, Bender]] = { + "vpc_id": S("VPCId"), + "subnet_ids": S("SubnetIds", default=[]), + "availability_zones": S("AvailabilityZones", default=[]), + "security_group_ids": S("SecurityGroupIds", default=[]), + } + vpc_id: Optional[str] = field(default=None, metadata={"description": "The ID for your VPC. Amazon VPC generates this value when you create a VPC."}) # fmt: skip + subnet_ids: Optional[List[str]] = field(factory=list, metadata={"description": "A list of subnet IDs associated with the VPC endpoints for the domain."}) # fmt: skip + availability_zones: Optional[List[str]] = field(factory=list, metadata={"description": "The list of Availability Zones associated with the VPC subnets."}) # fmt: skip + security_group_ids: Optional[List[str]] = field(factory=list, metadata={"description": "The list of security group IDs associated with the VPC endpoints for the domain."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchCognitoOptions: + kind: ClassVar[str] = "aws_opensearch_cognito_options" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "user_pool_id": S("UserPoolId"), + "identity_pool_id": S("IdentityPoolId"), + "role_arn": S("RoleArn"), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "Whether to enable or disable Amazon Cognito authentication for OpenSearch Dashboards."}) # fmt: skip + user_pool_id: Optional[str] = field(default=None, metadata={"description": "The Amazon Cognito user pool ID that you want OpenSearch Service to use for OpenSearch Dashboards authentication."}) # fmt: skip + identity_pool_id: Optional[str] = field(default=None, metadata={"description": "The Amazon Cognito identity pool ID that you want OpenSearch Service to use for OpenSearch Dashboards authentication."}) # fmt: skip + role_arn: Optional[str] = field(default=None, metadata={"description": "The AmazonOpenSearchServiceCognitoAccess role that allows OpenSearch Service to configure your user pool and identity pool."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchEncryptionAtRestOptions: + kind: ClassVar[str] = "aws_opensearch_encryption_at_rest_options" + mapping: ClassVar[Dict[str, Bender]] = {"enabled": S("Enabled"), "kms_key_id": S("KmsKeyId")} + enabled: Optional[bool] = field(default=None, metadata={"description": "True to enable encryption at rest."}) # fmt: skip + kms_key_id: Optional[str] = field(default=None, metadata={"description": "The KMS key ID. Takes the form 1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchLogPublishingOption: + kind: ClassVar[str] = "aws_opensearch_log_publishing_option" + mapping: ClassVar[Dict[str, Bender]] = { + "cloud_watch_logs_log_group_arn": S("CloudWatchLogsLogGroupArn"), + "enabled": S("Enabled"), + } + cloud_watch_logs_log_group_arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the CloudWatch Logs group to publish logs to."}) # fmt: skip + enabled: Optional[bool] = field(default=None, metadata={"description": "Whether the log should be published."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchServiceSoftwareOptions: + kind: ClassVar[str] = "aws_opensearch_service_software_options" + mapping: ClassVar[Dict[str, Bender]] = { + "current_version": S("CurrentVersion"), + "new_version": S("NewVersion"), + "update_available": S("UpdateAvailable"), + "cancellable": S("Cancellable"), + "update_status": S("UpdateStatus"), + "description": S("Description"), + "automated_update_date": S("AutomatedUpdateDate"), + "optional_deployment": S("OptionalDeployment"), + } + current_version: Optional[str] = field(default=None, metadata={"description": "The current service software version present on the domain."}) # fmt: skip + new_version: Optional[str] = field(default=None, metadata={"description": "The new service software version, if one is available."}) # fmt: skip + update_available: Optional[bool] = field(default=None, metadata={"description": "True if you're able to update your service software version. False if you can't update your service software version."}) # fmt: skip + cancellable: Optional[bool] = field(default=None, metadata={"description": "True if you're able to cancel your service software version update. False if you can't cancel your service software update."}) # fmt: skip + update_status: Optional[str] = field(default=None, metadata={"description": "The status of your service software update."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "A description of the service software update status."}) # fmt: skip + automated_update_date: Optional[datetime] = field(default=None, metadata={"description": "The timestamp, in Epoch time, until which you can manually request a service software update. After this date, we automatically update your service software."}) # fmt: skip + optional_deployment: Optional[bool] = field(default=None, metadata={"description": "True if a service software is never automatically updated. False if a service software is automatically updated after the automated update date."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchDomainEndpointOptions: + kind: ClassVar[str] = "aws_opensearch_domain_endpoint_options" + mapping: ClassVar[Dict[str, Bender]] = { + "enforce_https": S("EnforceHTTPS"), + "tls_security_policy": S("TLSSecurityPolicy"), + "custom_endpoint_enabled": S("CustomEndpointEnabled"), + "custom_endpoint": S("CustomEndpoint"), + "custom_endpoint_certificate_arn": S("CustomEndpointCertificateArn"), + } + enforce_https: Optional[bool] = field(default=None, metadata={"description": "True to require that all traffic to the domain arrive over HTTPS."}) # fmt: skip + tls_security_policy: Optional[str] = field(default=None, metadata={"description": "Specify the TLS security policy to apply to the HTTPS endpoint of the domain. The policy can be one of the following values: Policy-Min-TLS-1-0-2019-07: TLS security policy that supports TLS version 1.0 to TLS version 1.2 Policy-Min-TLS-1-2-2019-07: TLS security policy that supports only TLS version 1.2"}) # fmt: skip + custom_endpoint_enabled: Optional[bool] = field(default=None, metadata={"description": "Whether to enable a custom endpoint for the domain."}) # fmt: skip + custom_endpoint: Optional[str] = field(default=None, metadata={"description": "The fully qualified URL for the custom endpoint."}) # fmt: skip + custom_endpoint_certificate_arn: Optional[str] = field(default=None, metadata={"description": "The ARN for your security certificate, managed in Amazon Web Services Certificate Manager (ACM)."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchSAMLIdp: + kind: ClassVar[str] = "aws_opensearch_saml_idp" + mapping: ClassVar[Dict[str, Bender]] = {"metadata_content": S("MetadataContent"), "entity_id": S("EntityId")} + metadata_content: Optional[str] = field(default=None, metadata={"description": "The metadata of the SAML application, in XML format."}) # fmt: skip + entity_id: Optional[str] = field(default=None, metadata={"description": "The unique entity ID of the application in the SAML identity provider."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchSAMLOptionsOutput: + kind: ClassVar[str] = "aws_opensearch_saml_options_output" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "idp": S("Idp") >> Bend(AwsOpenSearchSAMLIdp.mapping), + "subject_key": S("SubjectKey"), + "roles_key": S("RolesKey"), + "session_timeout_minutes": S("SessionTimeoutMinutes"), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "True if SAML is enabled."}) # fmt: skip + idp: Optional[AwsOpenSearchSAMLIdp] = field(default=None, metadata={"description": "Describes the SAML identity provider's information."}) # fmt: skip + subject_key: Optional[str] = field(default=None, metadata={"description": "The key used for matching the SAML subject attribute."}) # fmt: skip + roles_key: Optional[str] = field(default=None, metadata={"description": "The key used for matching the SAML roles attribute."}) # fmt: skip + session_timeout_minutes: Optional[int] = field(default=None, metadata={"description": "The duration, in minutes, after which a user session becomes inactive."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchAdvancedSecurityOptions: + kind: ClassVar[str] = "aws_opensearch_advanced_security_options" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "internal_user_database_enabled": S("InternalUserDatabaseEnabled"), + "saml_options": S("SAMLOptions") >> Bend(AwsOpenSearchSAMLOptionsOutput.mapping), + "anonymous_auth_disable_date": S("AnonymousAuthDisableDate"), + "anonymous_auth_enabled": S("AnonymousAuthEnabled"), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "True if fine-grained access control is enabled."}) # fmt: skip + internal_user_database_enabled: Optional[bool] = field(default=None, metadata={"description": "True if the internal user database is enabled."}) # fmt: skip + saml_options: Optional[AwsOpenSearchSAMLOptionsOutput] = field(default=None, metadata={"description": "Container for information about the SAML configuration for OpenSearch Dashboards."}) # fmt: skip + anonymous_auth_disable_date: Optional[datetime] = field(default=None, metadata={"description": "Date and time when the migration period will be disabled. Only necessary when enabling fine-grained access control on an existing domain."}) # fmt: skip + anonymous_auth_enabled: Optional[bool] = field(default=None, metadata={"description": "True if a 30-day migration period is enabled, during which administrators can create role mappings. Only necessary when enabling fine-grained access control on an existing domain."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchAutoTuneOptionsOutput: + kind: ClassVar[str] = "aws_opensearch_auto_tune_options_output" + mapping: ClassVar[Dict[str, Bender]] = { + "state": S("State"), + "error_message": S("ErrorMessage"), + "use_off_peak_window": S("UseOffPeakWindow"), + } + state: Optional[str] = field(default=None, metadata={"description": "The current state of Auto-Tune on the domain."}) # fmt: skip + error_message: Optional[str] = field(default=None, metadata={"description": "Any errors that occurred while enabling or disabling Auto-Tune."}) # fmt: skip + use_off_peak_window: Optional[bool] = field(default=None, metadata={"description": "Whether the domain's off-peak window will be used to deploy Auto-Tune changes rather than a maintenance schedule."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchChangeProgressDetails: + kind: ClassVar[str] = "aws_opensearch_change_progress_details" + mapping: ClassVar[Dict[str, Bender]] = {"change_id": S("ChangeId"), "message": S("Message")} + change_id: Optional[str] = field(default=None, metadata={"description": "The ID of the configuration change."}) # fmt: skip + message: Optional[str] = field(default=None, metadata={"description": "A message corresponding to the status of the configuration change."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchWindowStartTime: + kind: ClassVar[str] = "aws_opensearch_window_start_time" + mapping: ClassVar[Dict[str, Bender]] = {"hours": S("Hours"), "minutes": S("Minutes")} + hours: Optional[int] = field(default=None, metadata={"description": "The start hour of the window in Coordinated Universal Time (UTC), using 24-hour time. For example, 17 refers to 5:00 P.M. UTC."}) # fmt: skip + minutes: Optional[int] = field(default=None, metadata={"description": "The start minute of the window, in UTC."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchOffPeakWindow: + kind: ClassVar[str] = "aws_opensearch_off_peak_window" + mapping: ClassVar[Dict[str, Bender]] = { + "window_start_time": S("WindowStartTime") >> Bend(AwsOpenSearchWindowStartTime.mapping) + } + window_start_time: Optional[AwsOpenSearchWindowStartTime] = field(default=None, metadata={"description": "A custom start time for the off-peak window, in Coordinated Universal Time (UTC). The window length will always be 10 hours, so you can't specify an end time. For example, if you specify 11:00 P.M. UTC as a start time, the end time will automatically be set to 9:00 A.M."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchOffPeakWindowOptions: + kind: ClassVar[str] = "aws_opensearch_off_peak_window_options" + mapping: ClassVar[Dict[str, Bender]] = { + "enabled": S("Enabled"), + "off_peak_window": S("OffPeakWindow") >> Bend(AwsOpenSearchOffPeakWindow.mapping), + } + enabled: Optional[bool] = field(default=None, metadata={"description": "Whether to enable an off-peak window. This option is only available when modifying a domain created prior to February 16, 2023, not when creating a new domain. All domains created after this date have the off-peak window enabled by default. You can't disable the off-peak window after it's enabled for a domain."}) # fmt: skip + off_peak_window: Optional[AwsOpenSearchOffPeakWindow] = field(default=None, metadata={"description": "Off-peak window settings for the domain."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsOpenSearchDomain(AwsResource): + kind: ClassVar[str] = "aws_opensearch_domain" + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("DomainId"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("DomainName"), + "arn": S("ARN"), + "created": S("Created"), + "deleted": S("Deleted"), + "endpoint": S("Endpoint"), + "endpoint_v2": S("EndpointV2"), + "endpoints": S("Endpoints"), + "processing": S("Processing"), + "upgrade_processing": S("UpgradeProcessing"), + "engine_version": S("EngineVersion"), + "cluster_config": S("ClusterConfig") >> Bend(AwsOpenSearchClusterConfig.mapping), + "ebs_options": S("EBSOptions") >> Bend(AwsOpenSearchEBSOptions.mapping), + "access_policies": S("AccessPolicies") >> ParseJson(), + "ip_address_type": S("IPAddressType"), + "snapshot_options": S("SnapshotOptions", "AutomatedSnapshotStartHour"), + "vpc_options": S("VPCOptions") >> Bend(AwsOpenSearchVPCDerivedInfo.mapping), + "cognito_options": S("CognitoOptions") >> Bend(AwsOpenSearchCognitoOptions.mapping), + "encryption_at_rest_options": S("EncryptionAtRestOptions") + >> Bend(AwsOpenSearchEncryptionAtRestOptions.mapping), + "node_to_node_encryption_options": S("NodeToNodeEncryptionOptions", "Enabled"), + "advanced_options": S("AdvancedOptions"), + "log_publishing_options": S("LogPublishingOptions"), + "service_software_options": S("ServiceSoftwareOptions") >> Bend(AwsOpenSearchServiceSoftwareOptions.mapping), + "domain_endpoint_options": S("DomainEndpointOptions") >> Bend(AwsOpenSearchDomainEndpointOptions.mapping), + "advanced_security_options": S("AdvancedSecurityOptions") >> Bend(AwsOpenSearchAdvancedSecurityOptions.mapping), + "auto_tune_options": S("AutoTuneOptions") >> Bend(AwsOpenSearchAutoTuneOptionsOutput.mapping), + "change_progress_details": S("ChangeProgressDetails") >> Bend(AwsOpenSearchChangeProgressDetails.mapping), + "off_peak_window_options": S("OffPeakWindowOptions") >> Bend(AwsOpenSearchOffPeakWindowOptions.mapping), + "software_update_options": S("SoftwareUpdateOptions", "AutoSoftwareUpdateEnabled"), + } + created: Optional[bool] = field(default=None, metadata={"description": "Creation status of an OpenSearch Service domain. True if domain creation is complete. False if domain creation is still in progress."}) # fmt: skip + deleted: Optional[bool] = field(default=None, metadata={"description": "Deletion status of an OpenSearch Service domain. True if domain deletion is complete. False if domain deletion is still in progress. Once deletion is complete, the status of the domain is no longer returned."}) # fmt: skip + endpoint: Optional[str] = field(default=None, metadata={"description": "Domain-specific endpoint used to submit index, search, and data upload requests to the domain."}) # fmt: skip + endpoint_v2: Optional[str] = field(default=None, metadata={"description": "The domain endpoint to which index and search requests are submitted. For example, search-imdb-movies-oopcnjfn6ugo.eu-west-1.es.amazonaws.com or doc-imdb-movies-oopcnjfn6u.eu-west-1.es.amazonaws.com."}) # fmt: skip + endpoints: Optional[Dict[str, str]] = field(default=None, metadata={"description": "The key-value pair that exists if the OpenSearch Service domain uses VPC endpoints.. Example key, value: 'vpc','vpc-endpoint-h2dsd34efgyghrtguk5gt6j2foh4.us-east-1.es.amazonaws.com'."}) # fmt: skip + processing: Optional[bool] = field(default=None, metadata={"description": "The status of the domain configuration. True if OpenSearch Service is processing configuration changes. False if the configuration is active."}) # fmt: skip + upgrade_processing: Optional[bool] = field(default=None, metadata={"description": "The status of a domain version upgrade to a new version of OpenSearch or Elasticsearch. True if OpenSearch Service is in the process of a version upgrade. False if the configuration is active."}) # fmt: skip + engine_version: Optional[str] = field(default=None, metadata={"description": "Version of OpenSearch or Elasticsearch that the domain is running, in the format Elasticsearch_X.Y or OpenSearch_X.Y."}) # fmt: skip + cluster_config: Optional[AwsOpenSearchClusterConfig] = field(default=None, metadata={"description": "Container for the cluster configuration of the domain."}) # fmt: skip + ebs_options: Optional[AwsOpenSearchEBSOptions] = field(default=None, metadata={"description": "Container for EBS-based storage settings for the domain."}) # fmt: skip + access_policies: Optional[Json] = field(default=None, metadata={"description": "Identity and Access Management (IAM) policy document specifying the access policies for the domain."}) # fmt: skip + ip_address_type: Optional[str] = field(default=None, metadata={"description": "The type of IP addresses supported by the endpoint for the domain."}) # fmt: skip + snapshot_options: Optional[int] = field(default=None, metadata={"description": "DEPRECATED. Container for parameters required to configure automated snapshots of domain indexes."}) # fmt: skip + vpc_options: Optional[AwsOpenSearchVPCDerivedInfo] = field(default=None, metadata={"description": "The VPC configuration for the domain."}) # fmt: skip + cognito_options: Optional[AwsOpenSearchCognitoOptions] = field(default=None, metadata={"description": "Key-value pairs to configure Amazon Cognito authentication for OpenSearch Dashboards."}) # fmt: skip + encryption_at_rest_options: Optional[AwsOpenSearchEncryptionAtRestOptions] = field(default=None, metadata={"description": "Encryption at rest settings for the domain."}) # fmt: skip + node_to_node_encryption_options: Optional[bool] = field(default=None, metadata={"description": "Whether node-to-node encryption is enabled or disabled."}) # fmt: skip + advanced_options: Optional[Dict[str, str]] = field(default=None, metadata={"description": "Key-value pairs that specify advanced configuration options."}) # fmt: skip + log_publishing_options: Optional[Dict[str, AwsOpenSearchLogPublishingOption]] = field(default=None, metadata={"description": "Log publishing options for the domain."}) # fmt: skip + service_software_options: Optional[AwsOpenSearchServiceSoftwareOptions] = field(default=None, metadata={"description": "The current status of the domain's service software."}) # fmt: skip + domain_endpoint_options: Optional[AwsOpenSearchDomainEndpointOptions] = field(default=None, metadata={"description": "Additional options for the domain endpoint, such as whether to require HTTPS for all traffic."}) # fmt: skip + advanced_security_options: Optional[AwsOpenSearchAdvancedSecurityOptions] = field(default=None, metadata={"description": "Settings for fine-grained access control."}) # fmt: skip + auto_tune_options: Optional[AwsOpenSearchAutoTuneOptionsOutput] = field(default=None, metadata={"description": "Auto-Tune settings for the domain."}) # fmt: skip + change_progress_details: Optional[AwsOpenSearchChangeProgressDetails] = field(default=None, metadata={"description": "Information about a configuration change happening on the domain."}) # fmt: skip + off_peak_window_options: Optional[AwsOpenSearchOffPeakWindowOptions] = field(default=None, metadata={"description": "Options that specify a custom 10-hour window during which OpenSearch Service can perform configuration changes on the domain."}) # fmt: skip + software_update_options: Optional[bool] = field(default=None, metadata={"description": "Service software update options for the domain."}) # fmt: skip + + @classmethod + def collect_resources(cls, builder: GraphBuilder) -> None: + try: + if dl_raw := builder.client.list(service_name, "list-domain-names", "DomainNames"): + items = builder.client.list( + aws_service=service_name, + action="describe-domains", + result_name="DomainStatusList", + DomainNames=[d["DomainName"] for d in dl_raw], + ) + cls.collect(items, builder) + except Boto3Error as e: + msg = f"Error while collecting AwsOpenSearchDomain in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting AwsOpenSearchDomain in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: + if cluster_config := self.cluster_config: + if instance_type := cluster_config.instance_type: + builder.add_edge(self, reverse=True, clazz=AwsEc2InstanceType, id=instance_type) + if vpc_options := self.vpc_options: + if vpc_id := vpc_options.vpc_id: + builder.dependant_node(self, reverse=True, delete_same_as_default=True, clazz=AwsEc2Vpc, id=vpc_id) + for security_group_id in self.vpc_options.security_group_ids or []: + builder.dependant_node( + self, reverse=True, delete_same_as_default=True, clazz=AwsEc2SecurityGroup, id=security_group_id + ) + for subnet_id in self.vpc_options.subnet_ids or []: + builder.dependant_node( + self, reverse=True, delete_same_as_default=True, clazz=AwsEc2Subnet, id=subnet_id + ) + if cognito_options := self.cognito_options: + if user_pool_id := cognito_options.user_pool_id: + builder.add_edge(self, clazz=AwsCognitoUserPool, id=user_pool_id) + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [AwsApiSpec(service_name, "list-domain-names"), AwsApiSpec(service_name, "describe-domain-names")] + + @classmethod + def service_name(cls) -> Optional[str]: + return service_name + + +resources: List[Type[AwsResource]] = [AwsOpenSearchDomain] diff --git a/plugins/aws/resoto_plugin_aws/resource/rds.py b/plugins/aws/resoto_plugin_aws/resource/rds.py index 639aebde13..f62ba5433c 100644 --- a/plugins/aws/resoto_plugin_aws/resource/rds.py +++ b/plugins/aws/resoto_plugin_aws/resource/rds.py @@ -1,18 +1,20 @@ from datetime import datetime from typing import ClassVar, Dict, List, Optional, Type + from attr import define, field + +from resoto_plugin_aws.aws_client import AwsClient from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder from resoto_plugin_aws.resource.cloudwatch import AwsCloudwatchQuery, AwsCloudwatchMetricData from resoto_plugin_aws.resource.ec2 import AwsEc2SecurityGroup, AwsEc2Subnet, AwsEc2Vpc from resoto_plugin_aws.resource.kinesis import AwsKinesisStream from resoto_plugin_aws.resource.kms import AwsKmsKey -from resoto_plugin_aws.utils import ToDict -from resotolib.baseresources import BaseDatabase, ModelReference +from resoto_plugin_aws.utils import ToDict, TagsValue +from resotolib.baseresources import BaseDatabase, ModelReference, BaseSnapshot from resotolib.graph import Graph from resotolib.json_bender import F, K, S, Bend, Bender, ForallBend, bend from resotolib.types import Json from resotolib.utils import utc -from resoto_plugin_aws.aws_client import AwsClient service_name = "rds" @@ -316,19 +318,6 @@ class AwsRdsDBRole: status: Optional[str] = field(default=None) -@define(eq=False, slots=False) -class AwsRdsTag: - kind: ClassVar[str] = "aws_rds_tag" - kind_display: ClassVar[str] = "AWS RDS Tag" - kind_description: ClassVar[str] = ( - "Tags for Amazon RDS instances and resources, which are key-value pairs to" - " help manage and organize resources." - ) - mapping: ClassVar[Dict[str, Bender]] = {"key": S("Key"), "value": S("Value")} - key: Optional[str] = field(default=None) - value: Optional[str] = field(default=None) - - @define(eq=False, slots=False) class AwsRdsInstance(RdsTaggable, AwsResource, BaseDatabase): kind: ClassVar[str] = "aws_rds_instance" @@ -347,7 +336,7 @@ class AwsRdsInstance(RdsTaggable, AwsResource, BaseDatabase): } mapping: ClassVar[Dict[str, Bender]] = { "id": S("DBInstanceIdentifier"), - "tags": S("TagList", default=[]) >> ForallBend(AwsRdsTag.mapping) >> ToDict(), + "tags": S("TagList", default=[]) >> ToDict(), "name": S("DBName"), "ctime": S("InstanceCreateTime"), "arn": S("DBInstanceArn"), @@ -738,7 +727,6 @@ class AwsRdsCluster(RdsTaggable, AwsResource, BaseDatabase): "rds_db_cluster_parameter_group": S("DBClusterParameterGroup"), "rds_db_subnet_group_name": S("DBSubnetGroup"), "rds_automatic_restart_time": S("AutomaticRestartTime"), - "rds_percent_progress": S("PercentProgress"), "rds_earliest_restorable_time": S("EarliestRestorableTime"), "rds_endpoint": S("Endpoint"), "rds_reader_endpoint": S("ReaderEndpoint"), @@ -812,7 +800,6 @@ class AwsRdsCluster(RdsTaggable, AwsResource, BaseDatabase): rds_db_cluster_parameter_group: Optional[str] = field(default=None) rds_db_subnet_group_name: Optional[str] = field(default=None) rds_automatic_restart_time: Optional[datetime] = field(default=None) - rds_percent_progress: Optional[str] = field(default=None) rds_earliest_restorable_time: Optional[datetime] = field(default=None) rds_endpoint: Optional[str] = field(default=None) rds_reader_endpoint: Optional[str] = field(default=None) @@ -895,4 +882,178 @@ def called_mutator_apis(cls) -> List[AwsApiSpec]: return super().called_mutator_apis() + [AwsApiSpec(service_name, "delete-db-cluster")] -resources: List[Type[AwsResource]] = [AwsRdsCluster, AwsRdsInstance] +@define(eq=False, slots=False) +class AwsRdsSnapshot(RdsTaggable, AwsResource, BaseSnapshot): + kind: ClassVar[str] = "aws_rds_snapshot" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("rds", "describe-db-snapshots", "DBSnapshots") + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("DBSnapshotIdentifier"), + "tags": S("TagList", default=[]) >> ToDict(), + "name": S("Tags", default=[]) >> TagsValue("Name"), + "ctime": S("SnapshotCreateTime"), + "arn": S("DBSnapshotArn"), + "rds_db_instance_identifier": S("DBInstanceIdentifier"), + "rds_engine": S("Engine"), + "rds_allocated_storage": S("AllocatedStorage"), + "snapshot_status": S("Status"), + "rds_port": S("Port"), + "rds_availability_zone": S("AvailabilityZone"), + "rds_vpc_id": S("VpcId"), + "rds_instance_create_time": S("InstanceCreateTime"), + "rds_master_username": S("MasterUsername"), + "rds_engine_version": S("EngineVersion"), + "rds_license_model": S("LicenseModel"), + "rds_snapshot_type": S("SnapshotType"), + "rds_iops": S("Iops"), + "rds_option_group_name": S("OptionGroupName"), + "rds_percent_progress": S("PercentProgress"), + "rds_source_region": S("SourceRegion"), + "rds_source_db_snapshot_identifier": S("SourceDBSnapshotIdentifier"), + "rds_storage_type": S("StorageType"), + "rds_tde_credential_arn": S("TdeCredentialArn"), + "rds_encrypted": S("Encrypted"), + "rds_kms_key_id": S("KmsKeyId"), + "rds_timezone": S("Timezone"), + "rds_iam_database_authentication_enabled": S("IAMDatabaseAuthenticationEnabled"), + "rds_processor_features": S("ProcessorFeatures", default=[]) >> ToDict(key="Name", value="Value"), + "rds_dbi_resource_id": S("DbiResourceId"), + "rds_original_snapshot_create_time": S("OriginalSnapshotCreateTime"), + "rds_snapshot_database_time": S("SnapshotDatabaseTime"), + "rds_snapshot_target": S("SnapshotTarget"), + "rds_storage_throughput": S("StorageThroughput"), + "rds_db_system_id": S("DBSystemId"), + "rds_dedicated_log_volume": S("DedicatedLogVolume"), + "rds_multi_tenant": S("MultiTenant"), + } + rds_db_instance_identifier: Optional[str] = field(default=None, metadata={"description": "Specifies the DB instance identifier of the DB instance this DB snapshot was created from."}) # fmt: skip + rds_engine: Optional[str] = field(default=None, metadata={"description": "Specifies the name of the database engine."}) # fmt: skip + rds_allocated_storage: Optional[int] = field(default=None, metadata={"description": "Specifies the allocated storage size in gibibytes (GiB)."}) # fmt: skip + rds_port: Optional[int] = field(default=None, metadata={"description": "Specifies the port that the database engine was listening on at the time of the snapshot."}) # fmt: skip + rds_availability_zone: Optional[str] = field(default=None, metadata={"description": "Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot."}) # fmt: skip + rds_vpc_id: Optional[str] = field(default=None, metadata={"description": "Provides the VPC ID associated with the DB snapshot."}) # fmt: skip + rds_instance_create_time: Optional[datetime] = field(default=None, metadata={"description": "Specifies the time in Coordinated Universal Time (UTC) when the DB instance, from which the snapshot was taken, was created."}) # fmt: skip + rds_master_username: Optional[str] = field(default=None, metadata={"description": "Provides the master username for the DB snapshot."}) # fmt: skip + rds_engine_version: Optional[str] = field(default=None, metadata={"description": "Specifies the version of the database engine."}) # fmt: skip + rds_license_model: Optional[str] = field(default=None, metadata={"description": "License model information for the restored DB instance."}) # fmt: skip + rds_snapshot_type: Optional[str] = field(default=None, metadata={"description": "Provides the type of the DB snapshot."}) # fmt: skip + rds_iops: Optional[int] = field(default=None, metadata={"description": "Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot."}) # fmt: skip + rds_option_group_name: Optional[str] = field(default=None, metadata={"description": "Provides the option group name for the DB snapshot."}) # fmt: skip + rds_percent_progress: Optional[int] = field(default=None, metadata={"description": "The percentage of the estimated data that has been transferred."}) # fmt: skip + rds_source_region: Optional[str] = field(default=None, metadata={"description": "The Amazon Web Services Region that the DB snapshot was created in or copied from."}) # fmt: skip + rds_source_db_snapshot_identifier: Optional[str] = field(default=None, metadata={"description": "The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has a value in the case of a cross-account or cross-Region copy."}) # fmt: skip + rds_storage_type: Optional[str] = field(default=None, metadata={"description": "Specifies the storage type associated with DB snapshot."}) # fmt: skip + rds_tde_credential_arn: Optional[str] = field(default=None, metadata={"description": "The ARN from the key store with which to associate the instance for TDE encryption."}) # fmt: skip + rds_encrypted: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the DB snapshot is encrypted."}) # fmt: skip + rds_kms_key_id: Optional[str] = field(default=None, metadata={"description": "If Encrypted is true, the Amazon Web Services KMS key identifier for the encrypted DB snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key."}) # fmt: skip + rds_timezone: Optional[str] = field(default=None, metadata={"description": "The time zone of the DB snapshot. In most cases, the Timezone element is empty. Timezone content appears only for snapshots taken from Microsoft SQL Server DB instances that were created with a time zone specified."}) # fmt: skip + rds_iam_database_authentication_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled."}) # fmt: skip + rds_processor_features: Optional[Dict[str, str]] = field(default=None, metadata={"description": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance when the DB snapshot was created."}) # fmt: skip + rds_dbi_resource_id: Optional[str] = field(default=None, metadata={"description": "The identifier for the source DB instance, which can't be changed and which is unique to an Amazon Web Services Region."}) # fmt: skip + rds_original_snapshot_create_time: Optional[datetime] = field(default=None, metadata={"description": "Specifies the time of the CreateDBSnapshot operation in Coordinated Universal Time (UTC). Doesn't change when the snapshot is copied."}) # fmt: skip + rds_snapshot_database_time: Optional[datetime] = field(default=None, metadata={"description": "The timestamp of the most recent transaction applied to the database that you're backing up. Thus, if you restore a snapshot, SnapshotDatabaseTime is the most recent transaction in the restored DB instance. In contrast, originalSnapshotCreateTime specifies the system time that the snapshot completed. If you back up a read replica, you can determine the replica lag by comparing SnapshotDatabaseTime with originalSnapshotCreateTime. For example, if originalSnapshotCreateTime is two hours later than SnapshotDatabaseTime, then the replica lag is two hours."}) # fmt: skip + rds_snapshot_target: Optional[str] = field(default=None, metadata={"description": "Specifies where manual snapshots are stored: Amazon Web Services Outposts or the Amazon Web Services Region."}) # fmt: skip + rds_storage_throughput: Optional[int] = field(default=None, metadata={"description": "Specifies the storage throughput for the DB snapshot."}) # fmt: skip + rds_db_system_id: Optional[str] = field(default=None, metadata={"description": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB."}) # fmt: skip + rds_dedicated_log_volume: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the DB instance has a dedicated log volume (DLV) enabled."}) # fmt: skip + rds_multi_tenant: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the snapshot is of a DB instance using the multi-tenant configuration (TRUE) or the single-tenant configuration (FALSE)."}) # fmt: skip + rds_attributes: Optional[Dict[str, List[str]]] = None + + def post_process(self, builder: GraphBuilder, source: Json) -> None: + def fetch_snapshot_attributes() -> None: + with builder.suppress("rds.describe-db-snapshot-attributes"): + if attrs := builder.client.get( + "rds", + "describe-db-snapshot-attributes", + "DBSnapshotAttributesResult.DBSnapshotAttributes", + DBSnapshotIdentifier=self.id, + ): + self.rds_attributes = bend(ToDict(key="AttributeName", value="AttributeValues"), attrs) + + builder.submit_work(service_name, fetch_snapshot_attributes) + + def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: + if dbi := self.rds_db_instance_identifier: + builder.add_edge(self, reverse=True, clazz=AwsRdsInstance, id=dbi) + if vpc_id := self.rds_vpc_id: + builder.add_edge(self, reverse=True, clazz=AwsEc2Vpc, id=vpc_id) + + +@define(eq=False, slots=False) +class AwsRdsClusterSnapshot(AwsResource): + kind: ClassVar[str] = "aws_rds_cluster_snapshot" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("rds", "describe-db-cluster-snapshots", "DBClusterSnapshots") + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("DBClusterSnapshotIdentifier"), + "tags": S("TagList", default=[]) >> ToDict(), + "name": S("TagList", default=[]) >> TagsValue("Name"), + "ctime": S("SnapshotCreateTime"), + "rds_availability_zones": S("AvailabilityZones", default=[]), + "rds_db_cluster_identifier": S("DBClusterIdentifier"), + "rds_engine": S("Engine"), + "rds_engine_mode": S("EngineMode"), + "rds_allocated_storage": S("AllocatedStorage"), + "rds_status": S("Status"), + "rds_port": S("Port"), + "rds_vpc_id": S("VpcId"), + "rds_cluster_create_time": S("ClusterCreateTime"), + "rds_master_username": S("MasterUsername"), + "rds_engine_version": S("EngineVersion"), + "rds_license_model": S("LicenseModel"), + "rds_snapshot_type": S("SnapshotType"), + "rds_percent_progress": S("PercentProgress"), + "rds_storage_encrypted": S("StorageEncrypted"), + "rds_kms_key_id": S("KmsKeyId"), + "rds_db_cluster_snapshot_arn": S("DBClusterSnapshotArn"), + "rds_source_db_cluster_snapshot_arn": S("SourceDBClusterSnapshotArn"), + "rds_iam_database_authentication_enabled": S("IAMDatabaseAuthenticationEnabled"), + "rds_db_system_id": S("DBSystemId"), + "rds_storage_type": S("StorageType"), + "rds_db_cluster_resource_id": S("DbClusterResourceId"), + } + rds_availability_zones: Optional[List[str]] = field(factory=list, metadata={"description": "The list of Availability Zones (AZs) where instances in the DB cluster snapshot can be restored."}) # fmt: skip + rds_db_cluster_identifier: Optional[str] = field(default=None, metadata={"description": "The DB cluster identifier of the DB cluster that this DB cluster snapshot was created from."}) # fmt: skip + rds_engine: Optional[str] = field(default=None, metadata={"description": "The name of the database engine for this DB cluster snapshot."}) # fmt: skip + rds_engine_mode: Optional[str] = field(default=None, metadata={"description": "The engine mode of the database engine for this DB cluster snapshot."}) # fmt: skip + rds_allocated_storage: Optional[int] = field(default=None, metadata={"description": "The allocated storage size of the DB cluster snapshot in gibibytes (GiB)."}) # fmt: skip + rds_status: Optional[str] = field(default=None, metadata={"description": "The status of this DB cluster snapshot. Valid statuses are the following: available copying creating"}) # fmt: skip + rds_port: Optional[int] = field(default=None, metadata={"description": "The port that the DB cluster was listening on at the time of the snapshot."}) # fmt: skip + rds_vpc_id: Optional[str] = field(default=None, metadata={"description": "The VPC ID associated with the DB cluster snapshot."}) # fmt: skip + rds_cluster_create_time: Optional[datetime] = field(default=None, metadata={"description": "The time when the DB cluster was created, in Universal Coordinated Time (UTC)."}) # fmt: skip + rds_master_username: Optional[str] = field(default=None, metadata={"description": "The master username for this DB cluster snapshot."}) # fmt: skip + rds_engine_version: Optional[str] = field(default=None, metadata={"description": "The version of the database engine for this DB cluster snapshot."}) # fmt: skip + rds_license_model: Optional[str] = field(default=None, metadata={"description": "The license model information for this DB cluster snapshot."}) # fmt: skip + rds_snapshot_type: Optional[str] = field(default=None, metadata={"description": "The type of the DB cluster snapshot."}) # fmt: skip + rds_percent_progress: Optional[int] = field(default=None, metadata={"description": "The percentage of the estimated data that has been transferred."}) # fmt: skip + rds_storage_encrypted: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the DB cluster snapshot is encrypted."}) # fmt: skip + rds_kms_key_id: Optional[str] = field(default=None, metadata={"description": "If StorageEncrypted is true, the Amazon Web Services KMS key identifier for the encrypted DB cluster snapshot. The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key."}) # fmt: skip + rds_db_cluster_snapshot_arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) for the DB cluster snapshot."}) # fmt: skip + rds_source_db_cluster_snapshot_arn: Optional[str] = field(default=None, metadata={"description": "If the DB cluster snapshot was copied from a source DB cluster snapshot, the Amazon Resource Name (ARN) for the source DB cluster snapshot, otherwise, a null value."}) # fmt: skip + rds_iam_database_authentication_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether mapping of Amazon Web Services Identity and Access Management (IAM) accounts to database accounts is enabled."}) # fmt: skip + rds_db_system_id: Optional[str] = field(default=None, metadata={"description": "Reserved for future use."}) # fmt: skip + rds_storage_type: Optional[str] = field(default=None, metadata={"description": "The storage type associated with the DB cluster snapshot. This setting is only for Aurora DB clusters."}) # fmt: skip + rds_db_cluster_resource_id: Optional[str] = field(default=None, metadata={"description": "The resource ID of the DB cluster that this DB cluster snapshot was created from."}) # fmt: skip + rds_attributes: Optional[Dict[str, List[str]]] = None + + def post_process(self, builder: GraphBuilder, source: Json) -> None: + def fetch_snapshot_attributes() -> None: + with builder.suppress("rds.describe-db-cluster-snapshot-attributes"): + if attrs := builder.client.get( + "rds", + "describe-db-cluster-snapshot-attributes", + "DBClusterSnapshotAttributesResult.DBClusterSnapshotAttributes", + DBClusterSnapshotIdentifier=self.id, + ): + self.rds_attributes = bend(ToDict(key="AttributeName", value="AttributeValues"), attrs) + + builder.submit_work(service_name, fetch_snapshot_attributes) + + def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: + if dbi := self.rds_db_cluster_identifier: + builder.add_edge(self, reverse=True, clazz=AwsRdsCluster, id=dbi) + if vpc_id := self.rds_vpc_id: + builder.add_edge(self, reverse=True, clazz=AwsEc2Vpc, id=vpc_id) + if key_id := self.rds_kms_key_id: + builder.add_edge(self, clazz=AwsKmsKey, id=AwsKmsKey.normalise_id(key_id)) + + +resources: List[Type[AwsResource]] = [AwsRdsCluster, AwsRdsInstance, AwsRdsSnapshot, AwsRdsClusterSnapshot] diff --git a/plugins/aws/resoto_plugin_aws/resource/redshift.py b/plugins/aws/resoto_plugin_aws/resource/redshift.py index 404045883d..cfeb03c1d3 100644 --- a/plugins/aws/resoto_plugin_aws/resource/redshift.py +++ b/plugins/aws/resoto_plugin_aws/resource/redshift.py @@ -2,7 +2,7 @@ from attrs import define, field -from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder +from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder, parse_json from resoto_plugin_aws.resource.kms import AwsKmsKey from resotolib.baseresources import ModelReference from resotolib.graph import Graph @@ -389,6 +389,29 @@ class AwsRedshiftReservedNodeExchangeStatus: target_reserved_node_count: Optional[int] = field(default=None) +@define(eq=False, slots=False) +class AwsRedshiftLoggingStatus: + kind: ClassVar[str] = "aws_redshift_logging_status" + mapping: ClassVar[Dict[str, Bender]] = { + "logging_enabled": S("LoggingEnabled"), + "bucket_name": S("BucketName"), + "s3_key_prefix": S("S3KeyPrefix"), + "last_successful_delivery_time": S("LastSuccessfulDeliveryTime"), + "last_failure_time": S("LastFailureTime"), + "last_failure_message": S("LastFailureMessage"), + "log_destination_type": S("LogDestinationType"), + "log_exports": S("LogExports", default=[]), + } + logging_enabled: Optional[bool] = field(default=None, metadata={"description": "true if logging is on, false if logging is off."}) # fmt: skip + bucket_name: Optional[str] = field(default=None, metadata={"description": "The name of the S3 bucket where the log files are stored."}) # fmt: skip + s3_key_prefix: Optional[str] = field(default=None, metadata={"description": "The prefix applied to the log file names."}) # fmt: skip + last_successful_delivery_time: Optional[datetime] = field(default=None, metadata={"description": "The last time that logs were delivered."}) # fmt: skip + last_failure_time: Optional[datetime] = field(default=None, metadata={"description": "The last time when logs failed to be delivered."}) # fmt: skip + last_failure_message: Optional[str] = field(default=None, metadata={"description": "The message indicating that logs failed to be delivered."}) # fmt: skip + log_destination_type: Optional[str] = field(default=None, metadata={"description": "The log destination type. An enum with possible values of s3 and cloudwatch."}) # fmt: skip + log_exports: Optional[List[str]] = field(factory=list, metadata={"description": "The collection of exported log types. Possible values are connectionlog, useractivitylog, and userlog."}) # fmt: skip + + @define(eq=False, slots=False) class AwsRedshiftCluster(AwsResource): kind: ClassVar[str] = "aws_redshift_cluster" @@ -519,13 +542,30 @@ class AwsRedshiftCluster(AwsResource): redshift_aqua_configuration: Optional[AwsRedshiftAquaConfiguration] = field(default=None) redshift_default_iam_role_arn: Optional[str] = field(default=None) redshift_reserved_node_exchange_status: Optional[AwsRedshiftReservedNodeExchangeStatus] = field(default=None) + redshift_logging_status: Optional[AwsRedshiftLoggingStatus] = field(default=None) + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [cls.api_spec, AwsApiSpec(service_name, "describe-logging-status")] @classmethod def collect(cls: Type[AwsResource], json: List[Json], builder: GraphBuilder) -> None: + def fetch_logging_status(rc: AwsRedshiftCluster) -> None: + with builder.suppress("redshift.describe-logging-status"): + if raw := builder.client.get( + aws_service=service_name, + action="describe-logging-status", + ClusterIdentifier=rc.id, + ): + rc.redshift_logging_status = parse_json( + raw, AwsRedshiftLoggingStatus, builder, AwsRedshiftLoggingStatus.mapping + ) + for js in json: if cluster := cls.from_api(js, builder): cluster.set_arn(builder=builder, resource=f"cluster:{cluster.id}") builder.add_node(cluster, js) + builder.submit_work(service_name, fetch_logging_status, cluster) def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: if self.redshift_vpc_id: diff --git a/plugins/aws/resoto_plugin_aws/resource/route53.py b/plugins/aws/resoto_plugin_aws/resource/route53.py index 3ff567ae5f..f47460184c 100644 --- a/plugins/aws/resoto_plugin_aws/resource/route53.py +++ b/plugins/aws/resoto_plugin_aws/resource/route53.py @@ -3,7 +3,7 @@ from attrs import define, field from resoto_plugin_aws.aws_client import AwsClient -from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder +from resoto_plugin_aws.resource.base import AwsResource, AwsApiSpec, GraphBuilder, parse_json from resoto_plugin_aws.utils import ToDict from resotolib.baseresources import ( BaseDNSZone, @@ -46,6 +46,17 @@ class AwsRoute53LinkedService: description: Optional[str] = field(default=None) +@define(eq=False, slots=False) +class AwsRoute53LoggingConfig: + kind: ClassVar[str] = "aws_route53_logging_config" + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("Id"), + "cloud_watch_logs_log_group_arn": S("CloudWatchLogsLogGroupArn"), + } + id: Optional[str] = field(default=None, metadata={"description": "The ID for a configuration for DNS query logging."}) # fmt: skip + cloud_watch_logs_log_group_arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the CloudWatch Logs log group that Amazon Route 53 is publishing logs to."}) # fmt: skip + + @define(eq=False, slots=False) class AwsRoute53Zone(AwsResource, BaseDNSZone): kind: ClassVar[str] = "aws_route53_zone" @@ -73,6 +84,7 @@ class AwsRoute53Zone(AwsResource, BaseDNSZone): zone_config: Optional[AwsRoute53ZoneConfig] = field(default=None) zone_resource_record_set_count: Optional[int] = field(default=None) zone_linked_service: Optional[AwsRoute53LinkedService] = field(default=None) + zone_logging_config: Optional[AwsRoute53LoggingConfig] = field(default=None) @classmethod def called_collect_apis(cls) -> List[AwsApiSpec]: @@ -95,10 +107,20 @@ def add_tags(zone: AwsRoute53Zone) -> None: if tags: zone.tags = bend(S("Tags", default=[]) >> ToDict(), tags) + def fetch_logging_configuration(zone: AwsRoute53Zone) -> None: + with builder.suppress("route53.list-query-logging-configs"): + if res := builder.client.list( + service_name, "list-query-logging-configs", "QueryLoggingConfigs", HostedZoneId=zone.id + ): + zone.zone_logging_config = parse_json( + res[0], AwsRoute53LoggingConfig, builder, AwsRoute53LoggingConfig.mapping + ) + for js in json: if zone := AwsRoute53Zone.from_api(js, builder): builder.add_node(zone, js) builder.submit_work(service_name, add_tags, zone) + builder.submit_work(service_name, fetch_logging_configuration, zone) for rs_js in builder.client.list( service_name, "list-resource-record-sets", "ResourceRecordSets", HostedZoneId=zone.id ): diff --git a/plugins/aws/resoto_plugin_aws/resource/s3.py b/plugins/aws/resoto_plugin_aws/resource/s3.py index fe8e874e6e..c65cb87e49 100644 --- a/plugins/aws/resoto_plugin_aws/resource/s3.py +++ b/plugins/aws/resoto_plugin_aws/resource/s3.py @@ -1,4 +1,4 @@ -from contextlib import suppress +import logging from json import loads as json_loads from typing import ClassVar, Dict, List, Type, Optional, cast, Any @@ -15,6 +15,7 @@ from resotolib.types import Json service_name = "s3" +log = logging.getLogger("resoto.plugins.aws") @define(eq=False, slots=False) @@ -201,7 +202,7 @@ def add_tags(bucket: AwsS3Bucket) -> None: bucket.tags = cast(Dict[str, Optional[str]], tags) def add_bucket_encryption(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-bucket-encryption"): bck.bucket_encryption_rules = [] for raw in builder.client.list( service_name, @@ -215,7 +216,7 @@ def add_bucket_encryption(bck: AwsS3Bucket) -> None: bck.bucket_encryption_rules.append(rule) def add_bucket_policy(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-bucket-policy"): if raw_policy := builder.client.get( service_name, "get-bucket-policy", @@ -226,7 +227,7 @@ def add_bucket_policy(bck: AwsS3Bucket) -> None: bck.bucket_policy = json_loads(raw_policy) # type: ignore # this is a string def add_bucket_versioning(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-bucket-versioning"): if raw_versioning := builder.client.get( service_name, "get-bucket-versioning", None, Bucket=bck.name, expected_errors=["NoSuchBucket"] ): @@ -237,7 +238,7 @@ def add_bucket_versioning(bck: AwsS3Bucket) -> None: bck.bucket_mfa_delete = False def add_public_access(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-public-access-block"): if raw_access := builder.client.get( service_name, "get-public-access-block", @@ -251,7 +252,7 @@ def add_public_access(bck: AwsS3Bucket) -> None: ) def add_acls(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-bucket-acl"): if raw := builder.client.get( service_name, "get-bucket-acl", Bucket=bck.name, expected_errors=["NoSuchBucket"] ): @@ -259,7 +260,7 @@ def add_acls(bck: AwsS3Bucket) -> None: bck.bucket_acl = parse_json(mapped, AwsS3BucketAcl, builder) def add_bucket_logging(bck: AwsS3Bucket) -> None: - with suppress(Exception): + with builder.suppress(f"{service_name}.get-bucket-logging"): if raw := builder.client.get( service_name, "get-bucket-logging", diff --git a/plugins/aws/resoto_plugin_aws/resource/secretsmanager.py b/plugins/aws/resoto_plugin_aws/resource/secretsmanager.py index 345bc01239..b68486dce1 100644 --- a/plugins/aws/resoto_plugin_aws/resource/secretsmanager.py +++ b/plugins/aws/resoto_plugin_aws/resource/secretsmanager.py @@ -14,7 +14,7 @@ @define(eq=False, slots=False) class AwsSecretsManagerRotationRulesType: - kind: ClassVar[str] = "aws_secrets_manager_rotation_rules_type" + kind: ClassVar[str] = "aws_secretsmanager_rotation_rules_type" mapping: ClassVar[Dict[str, Bender]] = { "automatically_after_days": S("AutomaticallyAfterDays"), "duration": S("Duration"), @@ -27,7 +27,7 @@ class AwsSecretsManagerRotationRulesType: @define(eq=False, slots=False) class AwsSecretsManagerSecret(AwsResource): - kind: ClassVar[str] = "aws_secrets_manager_secret" + kind: ClassVar[str] = "aws_secretsmanager_secret" api_spec: ClassVar[AwsApiSpec] = AwsApiSpec(service_name, "list-secrets", "SecretList") mapping: ClassVar[Dict[str, Bender]] = { "id": S("Name"), diff --git a/plugins/aws/resoto_plugin_aws/resource/sns.py b/plugins/aws/resoto_plugin_aws/resource/sns.py index 38a76bfd3a..7da3f848a4 100644 --- a/plugins/aws/resoto_plugin_aws/resource/sns.py +++ b/plugins/aws/resoto_plugin_aws/resource/sns.py @@ -7,7 +7,7 @@ from resoto_plugin_aws.utils import ToDict from resotolib.baseresources import EdgeType, ModelReference from resotolib.graph import Graph -from resotolib.json_bender import F, Bender, S, bend +from resotolib.json_bender import F, Bender, S, bend, ParseJson from resotolib.types import Json service_name = "sns" @@ -37,7 +37,7 @@ class AwsSnsTopic(AwsResource): "topic_subscriptions_confirmed": S("SubscriptionsConfirmed") >> F(lambda x: int(x)), "topic_subscriptions_deleted": S("SubscriptionsDeleted") >> F(lambda x: int(x)), "topic_subscriptions_pending": S("SubscriptionsPending") >> F(lambda x: int(x)), - "topic_policy": S("Policy"), + "topic_policy": S("Policy") >> ParseJson(), "topic_delivery_policy": S("DeliveryPolicy"), "topic_effective_delivery_policy": S("EffectiveDeliveryPolicy"), "topic_owner": S("Owner"), @@ -48,7 +48,7 @@ class AwsSnsTopic(AwsResource): topic_subscriptions_confirmed: Optional[int] = field(default=None) topic_subscriptions_deleted: Optional[int] = field(default=None) topic_subscriptions_pending: Optional[int] = field(default=None) - topic_policy: Optional[str] = field(default=None) + topic_policy: Optional[Json] = field(default=None) topic_delivery_policy: Optional[str] = field(default=None) topic_effective_delivery_policy: Optional[str] = field(default=None) topic_owner: Optional[str] = field(default=None) diff --git a/plugins/aws/resoto_plugin_aws/resource/sqs.py b/plugins/aws/resoto_plugin_aws/resource/sqs.py index f346bbf085..c04d2154df 100644 --- a/plugins/aws/resoto_plugin_aws/resource/sqs.py +++ b/plugins/aws/resoto_plugin_aws/resource/sqs.py @@ -56,7 +56,7 @@ class AwsSqsQueue(AwsResource): "sqs_approximate_number_of_messages": S("ApproximateNumberOfMessages") >> AsInt(), "sqs_approximate_number_of_messages_not_visible": S("ApproximateNumberOfMessagesNotVisible") >> AsInt(), "sqs_approximate_number_of_messages_delayed": S("ApproximateNumberOfMessagesDelayed") >> AsInt(), - "sqs_policy": S("Policy") >> ParseJson(keys_to_snake=True), + "sqs_policy": S("Policy") >> ParseJson(), "sqs_redrive_policy": S("RedrivePolicy") >> ParseJson() >> Bend(AwsSqsRedrivePolicy.mapping), "sqs_fifo_queue": S("FifoQueue") >> AsBool(), "sqs_content_based_deduplication": S("ContentBasedDeduplication") >> AsBool(), diff --git a/plugins/aws/resoto_plugin_aws/resource/ssm.py b/plugins/aws/resoto_plugin_aws/resource/ssm.py index 92deeae3eb..1b00514455 100644 --- a/plugins/aws/resoto_plugin_aws/resource/ssm.py +++ b/plugins/aws/resoto_plugin_aws/resource/ssm.py @@ -1,15 +1,22 @@ +import json +import logging from datetime import datetime from typing import ClassVar, Dict, Optional, List, Type +import yaml from attrs import define, field +from boto3.exceptions import Boto3Error -from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder +from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder, parse_json +from resoto_plugin_aws.resource.dynamodb import AwsDynamoDbTable from resoto_plugin_aws.resource.ec2 import AwsEc2Instance +from resoto_plugin_aws.resource.s3 import AwsS3Bucket from resoto_plugin_aws.utils import ToDict from resotolib.baseresources import ModelReference -from resotolib.json_bender import Bender, S, Bend, AsDateString +from resotolib.json_bender import Bender, S, Bend, AsDateString, ForallBend, K from resotolib.types import Json +log = logging.getLogger("resoto.plugins.aws") service_name = "ssm" @@ -25,8 +32,8 @@ class AwsSSMInstanceAggregatedAssociationOverview: @define(eq=False, slots=False) -class AwsSSMInstanceInformation(AwsResource): - kind: ClassVar[str] = "aws_ssm_instance_information" +class AwsSSMInstance(AwsResource): + kind: ClassVar[str] = "aws_ssm_instance" api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("ssm", "describe-instance-information", "InstanceInformationList") reference_kinds: ClassVar[ModelReference] = { "successors": {"default": ["aws_ec2_instance"]}, @@ -83,4 +90,288 @@ def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: builder.dependant_node(self, clazz=AwsEc2Instance, id=instance_id) -resources: List[Type[AwsResource]] = [AwsSSMInstanceInformation] +@define(eq=False, slots=False) +class AwsSSMDocumentParameter: + kind: ClassVar[str] = "aws_ssm_document_parameter" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "type": S("Type"), + "description": S("Description"), + "default_value": S("DefaultValue"), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the parameter."}) # fmt: skip + type: Optional[str] = field(default=None, metadata={"description": "The type of parameter. The type can be either String or StringList."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "A description of what the parameter does, how to use it, the default value, and whether or not the parameter is optional."}) # fmt: skip + default_value: Optional[str] = field(default=None, metadata={"description": "If specified, the default values for the parameters. Parameters without a default value are required. Parameters with a default value are optional."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMDocumentRequires: + kind: ClassVar[str] = "aws_ssm_document_requires" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "version": S("Version"), + "require_type": S("RequireType"), + "version_name": S("VersionName"), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the required SSM document. The name can be an Amazon Resource Name (ARN)."}) # fmt: skip + version: Optional[str] = field(default=None, metadata={"description": "The document version required by the current document."}) # fmt: skip + require_type: Optional[str] = field(default=None, metadata={"description": "The document type of the required SSM document."}) # fmt: skip + version_name: Optional[str] = field(default=None, metadata={"description": "An optional field specifying the version of the artifact associated with the document. For example, Release 12, Update 6. This value is unique across all versions of a document, and can't be changed."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMReviewInformation: + kind: ClassVar[str] = "aws_ssm_review_information" + mapping: ClassVar[Dict[str, Bender]] = { + "reviewed_time": S("ReviewedTime"), + "status": S("Status"), + "reviewer": S("Reviewer"), + } + reviewed_time: Optional[datetime] = field(default=None, metadata={"description": "The time that the reviewer took action on the document review request."}) # fmt: skip + status: Optional[str] = field(default=None, metadata={"description": "The current status of the document review request."}) # fmt: skip + reviewer: Optional[str] = field(default=None, metadata={"description": "The reviewer assigned to take action on the document review request."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMAccountSharingInfo: + kind: ClassVar[str] = "aws_ssm_account_sharing_info" + mapping: ClassVar[Dict[str, Bender]] = { + "account_id": S("AccountId"), + "shared_document_version": S("SharedDocumentVersion"), + } + account_id: Optional[str] = field(default=None, metadata={"description": "The Amazon Web Services account ID where the current document is shared."}) # fmt: skip + shared_document_version: Optional[str] = field(default=None, metadata={"description": "The version of the current document shared with the account."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMDocument(AwsResource): + kind: ClassVar[str] = "aws_ssm_document" + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("Name"), + "tags": S("Tags", default=[]) >> ToDict(), + "name": S("Name"), + "ctime": S("CreatedDate"), + "sha1": S("Sha1"), + "hash": S("Hash"), + "hash_type": S("HashType"), + "display_name": S("DisplayName"), + "version_name": S("VersionName"), + "owner": S("Owner"), + "status": S("Status"), + "status_information": S("StatusInformation"), + "document_version": S("DocumentVersion"), + "description": S("Description"), + "parameters": S("Parameters", default=[]) >> ForallBend(AwsSSMDocumentParameter.mapping), + "platform_types": S("PlatformTypes", default=[]), + "document_type": S("DocumentType"), + "schema_version": S("SchemaVersion"), + "latest_version": S("LatestVersion"), + "default_version": S("DefaultVersion"), + "document_format": S("DocumentFormat"), + "target_type": S("TargetType"), + "attachments_information": S("AttachmentsInformation", default=[]) >> ForallBend(S("Name")), + "requires": S("Requires", default=[]) >> ForallBend(AwsSSMDocumentRequires.mapping), + "author": S("Author"), + "review_information": S("ReviewInformation", default=[]) >> ForallBend(AwsSSMReviewInformation.mapping), + "approved_version": S("ApprovedVersion"), + "pending_review_version": S("PendingReviewVersion"), + "review_status": S("ReviewStatus"), + "category": S("Category", default=[]), + "category_enum": S("CategoryEnum", default=[]), + } + sha1: Optional[str] = field(default=None, metadata={"description": "The SHA1 hash of the document, which you can use for verification."}) # fmt: skip + hash: Optional[str] = field(default=None, metadata={"description": "The Sha256 or Sha1 hash created by the system when the document was created. Sha1 hashes have been deprecated."}) # fmt: skip + hash_type: Optional[str] = field(default=None, metadata={"description": "The hash type of the document. Valid values include Sha256 or Sha1. Sha1 hashes have been deprecated."}) # fmt: skip + name: Optional[str] = field(default=None, metadata={"description": "The name of the SSM document."}) # fmt: skip + display_name: Optional[str] = field(default=None, metadata={"description": "The friendly name of the SSM document. This value can differ for each version of the document. If you want to update this value, see UpdateDocument."}) # fmt: skip + version_name: Optional[str] = field(default=None, metadata={"description": "The version of the artifact associated with the document."}) # fmt: skip + owner: Optional[str] = field(default=None, metadata={"description": "The Amazon Web Services user that created the document."}) # fmt: skip + status: Optional[str] = field(default=None, metadata={"description": "The status of the SSM document."}) # fmt: skip + status_information: Optional[str] = field(default=None, metadata={"description": "A message returned by Amazon Web Services Systems Manager that explains the Status value. For example, a Failed status might be explained by the StatusInformation message, The specified S3 bucket doesn't exist. Verify that the URL of the S3 bucket is correct."}) # fmt: skip + document_version: Optional[str] = field(default=None, metadata={"description": "The document version."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "A description of the document."}) # fmt: skip + parameters: Optional[List[AwsSSMDocumentParameter]] = field(factory=list, metadata={"description": "A description of the parameters for a document."}) # fmt: skip + platform_types: Optional[List[str]] = field(factory=list, metadata={"description": "The list of operating system (OS) platforms compatible with this SSM document."}) # fmt: skip + document_type: Optional[str] = field(default=None, metadata={"description": "The type of document."}) # fmt: skip + schema_version: Optional[str] = field(default=None, metadata={"description": "The schema version."}) # fmt: skip + latest_version: Optional[str] = field(default=None, metadata={"description": "The latest version of the document."}) # fmt: skip + default_version: Optional[str] = field(default=None, metadata={"description": "The default version."}) # fmt: skip + document_format: Optional[str] = field(default=None, metadata={"description": "The document format, either JSON or YAML."}) # fmt: skip + target_type: Optional[str] = field(default=None, metadata={"description": "The target type which defines the kinds of resources the document can run on. For example, /AWS::EC2::Instance. For a list of valid resource types, see Amazon Web Services resource and property types reference in the CloudFormation User Guide."}) # fmt: skip + attachments_information: Optional[List[str]] = field(factory=list, metadata={"description": "Details about the document attachments, including names, locations, sizes, and so on."}) # fmt: skip + requires: Optional[List[AwsSSMDocumentRequires]] = field(factory=list, metadata={"description": "A list of SSM documents required by a document. For example, an ApplicationConfiguration document requires an ApplicationConfigurationSchema document."}) # fmt: skip + author: Optional[str] = field(default=None, metadata={"description": "The user in your organization who created the document."}) # fmt: skip + review_information: Optional[List[AwsSSMReviewInformation]] = field(factory=list, metadata={"description": "Details about the review of a document."}) # fmt: skip + approved_version: Optional[str] = field(default=None, metadata={"description": "The version of the document currently approved for use in the organization."}) # fmt: skip + pending_review_version: Optional[str] = field(default=None, metadata={"description": "The version of the document that is currently under review."}) # fmt: skip + review_status: Optional[str] = field(default=None, metadata={"description": "The current status of the review."}) # fmt: skip + category: Optional[List[str]] = field(factory=list, metadata={"description": "The classification of a document to help you identify and categorize its use."}) # fmt: skip + category_enum: Optional[List[str]] = field(factory=list, metadata={"description": "The value that identifies a document's category."}) # fmt: skip + content: Optional[Json] = field(default=None, metadata={"description": "The content of the document"}) # fmt: skip + document_shared_with_accounts: Optional[List[str]] = field(factory=list, metadata={"description": "The account IDs that have permission to use this document. The ID can be either an Amazon Web Services account or All."}) # fmt: skip + document_sharing_info: Optional[List[AwsSSMAccountSharingInfo]] = field(factory=list, metadata={"description": "A list of Amazon Web Services accounts where the current document is shared and the version shared with each account."}) # fmt: skip + + @classmethod + def collect_resources(cls, builder: GraphBuilder) -> None: + def collect_document(name: str) -> None: + with builder.suppress(f"{service_name}.describe-document"): + js = builder.client.get(service_name, "describe-document", "Document", Name=name) + doc = builder.client.get(service_name, "get-document", Name=name) + share = builder.client.get( + service_name, "describe-document-permission", Name=name, PermissionType="Share" + ) + + if ( + (js and doc and share) + and (content := doc.get("Content")) + and (content_format := doc.get("DocumentFormat")) + and (instance := cls.from_api(js, builder)) + ): + if content_format == "JSON": + instance.content = json.loads(content) + elif content_format == "YAML": + instance.content = yaml.safe_load(content) + else: + instance.content = content + instance.document_shared_with_accounts = share.get("AccountIds", []) + instance.document_sharing_info = [ + sharing_info + for sharing_info in [ + parse_json(jsi, AwsSSMAccountSharingInfo, builder, AwsSSMAccountSharingInfo.mapping) + for jsi in share.get("AccountSharingInfoList", []) + ] + if sharing_info is not None + ] + builder.add_node(instance, js) + + # Default behavior: in case the class has an ApiSpec, call the api and call collect. + log.debug(f"Collecting {cls.__name__} in region {builder.region.name}") + try: + for item in builder.client.list( + aws_service=service_name, + action="list-documents", + result_name="DocumentIdentifiers", + Filters=[{"Key": "Owner", "Values": ["Self"]}], + ): + builder.submit_work(service_name, collect_document, item["Name"]) + except Boto3Error as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [ + AwsApiSpec(service_name, "list-documents"), + AwsApiSpec(service_name, "describe-document"), + AwsApiSpec(service_name, "get-document"), + ] + + @classmethod + def service_name(cls) -> Optional[str]: + return service_name + + +@define(eq=False, slots=False) +class AwsSSMComplianceExecutionSummary: + kind: ClassVar[str] = "aws_ssm_compliance_execution_summary" + mapping: ClassVar[Dict[str, Bender]] = { + "execution_time": S("ExecutionTime"), + "execution_id": S("ExecutionId"), + "execution_type": S("ExecutionType"), + } + execution_time: Optional[datetime] = field(default=None, metadata={"description": "The time the execution ran as a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'."}) # fmt: skip + execution_id: Optional[str] = field(default=None, metadata={"description": "An ID created by the system when PutComplianceItems was called. For example, CommandID is a valid execution ID. You can use this ID in subsequent calls."}) # fmt: skip + execution_type: Optional[str] = field(default=None, metadata={"description": "The type of execution. For example, Command is a valid execution type."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMSeveritySummary: + kind: ClassVar[str] = "aws_ssm_severity_summary" + mapping: ClassVar[Dict[str, Bender]] = { + "critical_count": S("CriticalCount"), + "high_count": S("HighCount"), + "medium_count": S("MediumCount"), + "low_count": S("LowCount"), + "informational_count": S("InformationalCount"), + "unspecified_count": S("UnspecifiedCount"), + } + critical_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of Critical. Critical severity is determined by the organization that published the compliance items."}) # fmt: skip + high_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of high. High severity is determined by the organization that published the compliance items."}) # fmt: skip + medium_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of medium. Medium severity is determined by the organization that published the compliance items."}) # fmt: skip + low_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of low. Low severity is determined by the organization that published the compliance items."}) # fmt: skip + informational_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of informational. Informational severity is determined by the organization that published the compliance items."}) # fmt: skip + unspecified_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources or compliance items that have a severity level of unspecified. Unspecified severity is determined by the organization that published the compliance items."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMCompliantSummary: + kind: ClassVar[str] = "aws_ssm_compliant_summary" + mapping: ClassVar[Dict[str, Bender]] = { + "compliant_count": S("CompliantCount"), + "severity_summary": S("SeveritySummary") >> Bend(AwsSSMSeveritySummary.mapping), + } + compliant_count: Optional[int] = field(default=None, metadata={"description": "The total number of resources that are compliant."}) # fmt: skip + severity_summary: Optional[AwsSSMSeveritySummary] = field(default=None, metadata={"description": "A summary of the compliance severity by compliance type."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsSSMNonCompliantSummary: + kind: ClassVar[str] = "aws_ssm_non_compliant_summary" + mapping: ClassVar[Dict[str, Bender]] = { + "non_compliant_count": S("NonCompliantCount"), + "severity_summary": S("SeveritySummary") >> Bend(AwsSSMSeveritySummary.mapping), + } + non_compliant_count: Optional[int] = field(default=None, metadata={"description": "The total number of compliance items that aren't compliant."}) # fmt: skip + severity_summary: Optional[AwsSSMSeveritySummary] = field(default=None, metadata={"description": "A summary of the non-compliance severity by compliance type"}) # fmt: skip + + +ResourceTypeLookup = { + "ManagedInstance": AwsEc2Instance, + "AWS::EC2::Instance": AwsEc2Instance, + "AWS::DynamoDB::Table": AwsDynamoDbTable, + "Document": AwsSSMDocument, + "AWS::S3::Bucket": AwsS3Bucket, +} + + +@define(eq=False, slots=False) +class AwsSSMResourceCompliance(AwsResource): + kind: ClassVar[str] = "aws_ssm_resource_compliance" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec( + "ssm", "list-resource-compliance-summaries", "ResourceComplianceSummaryItems" + ) + reference_kinds: ClassVar[ModelReference] = { + "successors": {"default": ["aws_ec2_instance", "aws_dynamodb_table", "aws_s3_bucket", "aws_ssm_document"]} + } + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("ComplianceType") + K("_") + S("ResourceType") + K("_") + S("ResourceId"), + "compliance_type": S("ComplianceType"), + "resource_type": S("ResourceType"), + "resource_id": S("ResourceId"), + "status": S("Status"), + "overall_severity": S("OverallSeverity"), + "execution_summary": S("ExecutionSummary") >> Bend(AwsSSMComplianceExecutionSummary.mapping), + "compliant_summary": S("CompliantSummary") >> Bend(AwsSSMCompliantSummary.mapping), + "non_compliant_summary": S("NonCompliantSummary") >> Bend(AwsSSMNonCompliantSummary.mapping), + } + compliance_type: Optional[str] = field(default=None, metadata={"description": "The compliance type."}) # fmt: skip + resource_type: Optional[str] = field(default=None, metadata={"description": "The resource type."}) # fmt: skip + resource_id: Optional[str] = field(default=None, metadata={"description": "The resource ID."}) # fmt: skip + status: Optional[str] = field(default=None, metadata={"description": "The compliance status for the resource."}) # fmt: skip + overall_severity: Optional[str] = field(default=None, metadata={"description": "The highest severity item found for the resource. The resource is compliant for this item."}) # fmt: skip + execution_summary: Optional[AwsSSMComplianceExecutionSummary] = field(default=None, metadata={"description": "Information about the execution."}) # fmt: skip + compliant_summary: Optional[AwsSSMCompliantSummary] = field(default=None, metadata={"description": "A list of items that are compliant for the resource."}) # fmt: skip + non_compliant_summary: Optional[AwsSSMNonCompliantSummary] = field(default=None, metadata={"description": "A list of items that aren't compliant for the resource."}) # fmt: skip + + def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: + if (rt := self.resource_type) and (rid := self.resource_id) and (clazz := ResourceTypeLookup.get(rt)): + builder.add_edge(self, clazz=clazz, id=rid) + + +resources: List[Type[AwsResource]] = [AwsSSMInstance, AwsSSMDocument, AwsSSMResourceCompliance] diff --git a/plugins/aws/resoto_plugin_aws/resource/waf.py b/plugins/aws/resoto_plugin_aws/resource/waf.py new file mode 100644 index 0000000000..3aefe3344f --- /dev/null +++ b/plugins/aws/resoto_plugin_aws/resource/waf.py @@ -0,0 +1,925 @@ +from __future__ import annotations + +import logging +from typing import ClassVar, Dict, Optional, List, Type + +from attrs import define, field +from boto3.exceptions import Boto3Error + +from resoto_plugin_aws.resource.base import AwsApiSpec, AwsResource, GraphBuilder, parse_json +from resoto_plugin_aws.utils import ToDict +from resotolib.baseresources import ModelReference +from resotolib.json_bender import Bender, S, Bend, ForallBend, ParseJson, MapDict +from resotolib.types import Json + +log = logging.getLogger("resoto.plugins.aws") +service_name = "wafv2" + + +@define(eq=False, slots=False) +class AwsWafCustomHTTPHeader: + kind: ClassVar[str] = "aws_waf_custom_http_header" + mapping: ClassVar[Dict[str, Bender]] = {"name": S("Name"), "value": S("Value")} + name: Optional[str] = field(default=None, metadata={"description": "The name of the custom header."}) # fmt: skip + value: Optional[str] = field(default=None, metadata={"description": "The value of the custom header."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCustomResponse: + kind: ClassVar[str] = "aws_waf_custom_response" + mapping: ClassVar[Dict[str, Bender]] = { + "response_code": S("ResponseCode"), + "custom_response_body_key": S("CustomResponseBodyKey"), + "response_headers": S("ResponseHeaders", default=[]) >> ForallBend(AwsWafCustomHTTPHeader.mapping), + } + response_code: Optional[int] = field(default=None, metadata={"description": "The HTTP status code to return to the client."}) # fmt: skip + custom_response_body_key: Optional[str] = field(default=None, metadata={"description": "References the response body that you want WAF to return to the web request client."}) # fmt: skip + response_headers: Optional[List[AwsWafCustomHTTPHeader]] = field(factory=list, metadata={"description": "The HTTP headers to use in the response."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafDefaultAction: + kind: ClassVar[str] = "aws_waf_default_action" + mapping: ClassVar[Dict[str, Bender]] = { + "block": S("Block", "CustomResponse") >> Bend(AwsWafCustomResponse.mapping), + "allow": S("Allow", "CustomRequestHandling", "InsertHeaders") >> ForallBend(AwsWafCustomHTTPHeader.mapping), + } + block: Optional[AwsWafCustomResponse] = field(default=None, metadata={"description": "Specifies that WAF should block requests by default."}) # fmt: skip + allow: Optional[List[AwsWafCustomHTTPHeader]] = field(default=None, metadata={"description": "Specifies that WAF should allow requests by default."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafJsonMatchPattern: + kind: ClassVar[str] = "aws_waf_json_match_pattern" + mapping: ClassVar[Dict[str, Bender]] = { + "all": S("All"), + "included_paths": S("IncludedPaths", default=[]), + } + all: Optional[Json] = field(default=None, metadata={"description": "Match all of the elements. See also MatchScope in JsonBody."}) # fmt: skip + included_paths: Optional[List[str]] = field(factory=list, metadata={"description": "Match only the specified include paths."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafJsonBody: + kind: ClassVar[str] = "aws_waf_json_body" + mapping: ClassVar[Dict[str, Bender]] = { + "match_pattern": S("MatchPattern") >> Bend(AwsWafJsonMatchPattern.mapping), + "match_scope": S("MatchScope"), + "invalid_fallback_behavior": S("InvalidFallbackBehavior"), + "oversize_handling": S("OversizeHandling"), + } + match_pattern: Optional[AwsWafJsonMatchPattern] = field(default=None, metadata={"description": "The patterns to look for in the JSON body."}) # fmt: skip + match_scope: Optional[str] = field(default=None, metadata={"description": "The parts of the JSON to match against using the MatchPattern."}) # fmt: skip + invalid_fallback_behavior: Optional[str] = field(default=None, metadata={"description": "What WAF should do if it fails to completely parse the JSON body."}) # fmt: skip + oversize_handling: Optional[str] = field(default=None, metadata={"description": "What WAF should do if the body is larger than WAF can inspect."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafHeaderMatchPattern: + kind: ClassVar[str] = "aws_waf_header_match_pattern" + mapping: ClassVar[Dict[str, Bender]] = { + "all": S("All"), + "included_headers": S("IncludedHeaders", default=[]), + "excluded_headers": S("ExcludedHeaders", default=[]), + } + all: Optional[Json] = field(default=None, metadata={"description": "Inspect all headers."}) # fmt: skip + included_headers: Optional[List[str]] = field(factory=list, metadata={"description": "Inspect only the headers that have a key that matches one of the strings specified here."}) # fmt: skip + excluded_headers: Optional[List[str]] = field(factory=list, metadata={"description": "Inspect only the headers whose keys don't match any of the strings specified here."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafHeaders: + kind: ClassVar[str] = "aws_waf_headers" + mapping: ClassVar[Dict[str, Bender]] = { + "match_pattern": S("MatchPattern") >> Bend(AwsWafHeaderMatchPattern.mapping), + "match_scope": S("MatchScope"), + "oversize_handling": S("OversizeHandling"), + } + match_pattern: Optional[AwsWafHeaderMatchPattern] = field(default=None, metadata={"description": "The filter to use to identify the subset of headers to inspect in a web request."}) # fmt: skip + match_scope: Optional[str] = field(default=None, metadata={"description": "The parts of the headers to match with the rule inspection criteria. If you specify ALL, WAF inspects both keys and values."}) # fmt: skip + oversize_handling: Optional[str] = field(default=None, metadata={"description": "What WAF should do if the headers of the request are more numerous or larger than WAF can inspect."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCookieMatchPattern: + kind: ClassVar[str] = "aws_waf_cookie_match_pattern" + mapping: ClassVar[Dict[str, Bender]] = { + "all": S("All"), + "included_cookies": S("IncludedCookies", default=[]), + "excluded_cookies": S("ExcludedCookies", default=[]), + } + all: Optional[Json] = field(default=None, metadata={"description": "Inspect all cookies."}) # fmt: skip + included_cookies: Optional[List[str]] = field(factory=list, metadata={"description": "Inspect only the cookies that have a key that matches one of the strings specified here."}) # fmt: skip + excluded_cookies: Optional[List[str]] = field(factory=list, metadata={"description": "Inspect only the cookies whose keys don't match any of the strings specified here."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCookies: + kind: ClassVar[str] = "aws_waf_cookies" + mapping: ClassVar[Dict[str, Bender]] = { + "match_pattern": S("MatchPattern") >> Bend(AwsWafCookieMatchPattern.mapping), + "match_scope": S("MatchScope"), + "oversize_handling": S("OversizeHandling"), + } + match_pattern: Optional[AwsWafCookieMatchPattern] = field(default=None, metadata={"description": "The filter to use to identify the subset of cookies to inspect in a web request."}) # fmt: skip + match_scope: Optional[str] = field(default=None, metadata={"description": "The parts of the cookies to inspect with the rule inspection criteria. If you specify ALL, WAF inspects both keys and values."}) # fmt: skip + oversize_handling: Optional[str] = field(default=None, metadata={"description": "What WAF should do if the cookies of the request are more numerous or larger than WAF can inspect."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafFieldToMatch: + kind: ClassVar[str] = "aws_waf_field_to_match" + mapping: ClassVar[Dict[str, Bender]] = { + "single_header": S("SingleHeader", "Name"), + "single_query_argument": S("SingleQueryArgument", "Name"), + "all_query_arguments": S("AllQueryArguments"), + "uri_path": S("UriPath"), + "query_string": S("QueryString"), + "body": S("Body", "OversizeHandling"), + "method": S("Method"), + "json_body": S("JsonBody") >> Bend(AwsWafJsonBody.mapping), + "headers": S("Headers") >> Bend(AwsWafHeaders.mapping), + "cookies": S("Cookies") >> Bend(AwsWafCookies.mapping), + "header_order": S("HeaderOrder", "OversizeHandling"), + "ja3_fingerprint": S("JA3Fingerprint", "FallbackBehavior"), + } + single_header: Optional[str] = field(default=None, metadata={"description": "Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer. This setting isn't case sensitive."}) # fmt: skip + single_query_argument: Optional[str] = field(default=None, metadata={"description": "Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion. The name can "}) # fmt: skip + all_query_arguments: Optional[Json] = field(default=None, metadata={"description": "Inspect all query arguments."}) # fmt: skip + uri_path: Optional[Json] = field(default=None, metadata={"description": "Inspect the request URI path. This is the part of the web request that identifies a resource, for example, /images/daily-ad.jpg."}) # fmt: skip + query_string: Optional[Json] = field(default=None, metadata={"description": "Inspect the query string. This is the part of a URL that appears after a ? character, if any."}) # fmt: skip + body: Optional[str] = field(default=None, metadata={"description": "Inspect the request body as plain text. The request body immediately follows the request headers."}) # fmt: skip + method: Optional[Json] = field(default=None, metadata={"description": "Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform."}) # fmt: skip + json_body: Optional[AwsWafJsonBody] = field(default=None, metadata={"description": "Inspect the request body as JSON. The request body immediately follows the request headers."}) # fmt: skip + headers: Optional[AwsWafHeaders] = field(default=None, metadata={"description": "Inspect the request headers."}) # fmt: skip + cookies: Optional[AwsWafCookies] = field(default=None, metadata={"description": "Inspect the request cookies."}) # fmt: skip + header_order: Optional[str] = field(default=None, metadata={"description": "Inspect a string containing the list of the request's header names, ordered as they appear in the web request that WAF receives for inspection"}) # fmt: skip + ja3_fingerprint: Optional[str] = field(default=None, metadata={"description": "Match against the request's JA3 fingerprint. The JA3 fingerprint is a 32-character hash derived from the TLS Client Hello of an incoming request."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafTextTransformation: + kind: ClassVar[str] = "aws_waf_text_transformation" + mapping: ClassVar[Dict[str, Bender]] = {"priority": S("Priority"), "type": S("Type")} + priority: Optional[int] = field(default=None, metadata={"description": "Sets the relative processing order for multiple transformations. WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content."}) # fmt: skip + type: Optional[str] = field(default=None, metadata={"description": "For detailed descriptions of each of the transformation types, see Text transformations in the WAF Developer Guide."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafByteMatchStatement: + kind: ClassVar[str] = "aws_waf_byte_match_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "search_string": S("SearchString"), + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + "positional_constraint": S("PositionalConstraint"), + } + search_string: Optional[str] = field(default=None, metadata={"description": "A string value that you want WAF to search for. WAF searches only in the part of web requests that you designate for inspection in FieldToMatch."}) # fmt: skip + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + positional_constraint: Optional[str] = field(default=None, metadata={"description": "The area within the portion of the web request that you want WAF to search for SearchString."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafSqliMatchStatement: + kind: ClassVar[str] = "aws_waf_sqli_match_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + "sensitivity_level": S("SensitivityLevel"), + } + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + sensitivity_level: Optional[str] = field(default=None, metadata={"description": "The sensitivity that you want WAF to use to inspect for SQL injection attacks."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafXssMatchStatement: + kind: ClassVar[str] = "aws_waf_xss_match_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + } + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafSizeConstraintStatement: + kind: ClassVar[str] = "aws_waf_size_constraint_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "comparison_operator": S("ComparisonOperator"), + "size": S("Size"), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + } + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + comparison_operator: Optional[str] = field(default=None, metadata={"description": "The operator to use to compare the request part to the size setting."}) # fmt: skip + size: Optional[int] = field(default=None, metadata={"description": "The size, in byte, to compare to the request part, after any transformations."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafForwardedIPConfig: + kind: ClassVar[str] = "aws_waf_forwarded_ip_config" + mapping: ClassVar[Dict[str, Bender]] = {"header_name": S("HeaderName"), "fallback_behavior": S("FallbackBehavior")} + header_name: Optional[str] = field(default=None, metadata={"description": "The name of the HTTP header to use for the IP address. For example, to use the X-Forwarded-For (XFF) header, set this to X-Forwarded-For. "}) # fmt: skip + fallback_behavior: Optional[str] = field(default=None, metadata={"description": "The match status to assign to the web request if the request doesn't have a valid IP address in the specified position."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafGeoMatchStatement: + kind: ClassVar[str] = "aws_waf_geo_match_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "country_codes": S("CountryCodes", default=[]), + "forwarded_ip_config": S("ForwardedIPConfig") >> Bend(AwsWafForwardedIPConfig.mapping), + } + country_codes: Optional[List[str]] = field(factory=list, metadata={"description": "An array of two-character country codes that you want to match against, for example, [ US, CN ], from the alpha-2 country ISO codes of the ISO 3166 international standard."}) # fmt: skip + forwarded_ip_config: Optional[AwsWafForwardedIPConfig] = field(default=None, metadata={"description": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCountAction: + kind: ClassVar[str] = "aws_waf_count_action" + mapping: ClassVar[Dict[str, Bender]] = { + "custom_request_handling": S("CustomRequestHandling", "InsertHeaders") + >> ForallBend(AwsWafCustomHTTPHeader.mapping) + } + custom_request_handling: Optional[List[AwsWafCustomHTTPHeader]] = field(default=None, metadata={"description": "Defines custom handling for the web request. For information about customizing web requests and responses, see Customizing web requests and responses in WAF in the WAF Developer Guide."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCaptchaAction: + kind: ClassVar[str] = "aws_waf_captcha_action" + mapping: ClassVar[Dict[str, Bender]] = { + "custom_request_handling": S("CustomRequestHandling", "InsertHeaders") + >> ForallBend(AwsWafCustomHTTPHeader.mapping) + } + custom_request_handling: Optional[List[AwsWafCustomHTTPHeader]] = field(default=None, metadata={"description": "Defines custom handling for the web request, used when the CAPTCHA inspection determines that the request's token is valid and unexpired."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafChallengeAction: + kind: ClassVar[str] = "aws_waf_challenge_action" + mapping: ClassVar[Dict[str, Bender]] = { + "custom_request_handling": S("CustomRequestHandling", "InsertHeaders") + >> ForallBend(AwsWafCustomHTTPHeader.mapping) + } + custom_request_handling: Optional[List[AwsWafCustomHTTPHeader]] = field(default=None, metadata={"description": "Defines custom handling for the web request, used when the challenge inspection determines that the request's token is valid and unexpired."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRuleAction: + kind: ClassVar[str] = "aws_waf_rule_action" + mapping: ClassVar[Dict[str, Bender]] = { + "block": S("Block", "CustomResponse") >> Bend(AwsWafCustomResponse.mapping), + "allow": S("Allow", "CustomRequestHandling", "InsertHeaders") >> ForallBend(AwsWafCustomHTTPHeader.mapping), + "count": S("Count") >> Bend(AwsWafCountAction.mapping), + "captcha": S("Captcha") >> Bend(AwsWafCaptchaAction.mapping), + "challenge": S("Challenge") >> Bend(AwsWafChallengeAction.mapping), + } + block: Optional[AwsWafCustomResponse] = field(default=None, metadata={"description": "Instructs WAF to block the web request."}) # fmt: skip + allow: Optional[List[AwsWafCustomHTTPHeader]] = field(default=None, metadata={"description": "Instructs WAF to allow the web request."}) # fmt: skip + count: Optional[AwsWafCountAction] = field(default=None, metadata={"description": "Instructs WAF to count the web request and then continue evaluating the request using the remaining rules in the web ACL."}) # fmt: skip + captcha: Optional[AwsWafCaptchaAction] = field(default=None, metadata={"description": "Instructs WAF to run a CAPTCHA check against the web request."}) # fmt: skip + challenge: Optional[AwsWafChallengeAction] = field(default=None, metadata={"description": "Instructs WAF to run a Challenge check against the web request."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRuleActionOverride: + kind: ClassVar[str] = "aws_waf_rule_action_override" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "action_to_use": S("ActionToUse") >> Bend(AwsWafRuleAction.mapping), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the rule to override."}) # fmt: skip + action_to_use: Optional[AwsWafRuleAction] = field(default=None, metadata={"description": "The override action to use, in place of the configured action of the rule in the rule group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRuleGroupReferenceStatement: + kind: ClassVar[str] = "aws_waf_rule_group_reference_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "arn": S("ARN"), + "excluded_rules": S("ExcludedRules", default=[]) >> ForallBend(S("Name")), + "rule_action_overrides": S("RuleActionOverrides", default=[]) >> ForallBend(AwsWafRuleActionOverride.mapping), + } + arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the entity."}) # fmt: skip + excluded_rules: Optional[List[str]] = field(factory=list, metadata={"description": "Rules in the referenced rule group whose actions are set to Count. Instead of this option, use RuleActionOverrides."}) # fmt: skip + rule_action_overrides: Optional[List[AwsWafRuleActionOverride]] = field(factory=list, metadata={"description": "Action settings to use in the place of the rule actions that are configured inside the rule group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafIPSetForwardedIPConfig: + kind: ClassVar[str] = "aws_waf_ip_set_forwarded_ip_config" + mapping: ClassVar[Dict[str, Bender]] = { + "header_name": S("HeaderName"), + "fallback_behavior": S("FallbackBehavior"), + "position": S("Position"), + } + header_name: Optional[str] = field(default=None, metadata={"description": "The name of the HTTP header to use for the IP address."}) # fmt: skip + fallback_behavior: Optional[str] = field(default=None, metadata={"description": "The match status to assign to the web request if the request doesn't have a valid IP address in the specified position."}) # fmt: skip + position: Optional[str] = field(default=None, metadata={"description": "The position in the header to search for the IP address. "}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafIPSetReferenceStatement: + kind: ClassVar[str] = "aws_waf_ip_set_reference_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "arn": S("ARN"), + "ip_set_forwarded_ip_config": S("IPSetForwardedIPConfig") >> Bend(AwsWafIPSetForwardedIPConfig.mapping), + } + arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the IPSet that this statement references."}) # fmt: skip + ip_set_forwarded_ip_config: Optional[AwsWafIPSetForwardedIPConfig] = field(default=None, metadata={"description": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRegexPatternSetReferenceStatement: + kind: ClassVar[str] = "aws_waf_regex_pattern_set_reference_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "arn": S("ARN"), + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + } + arn: Optional[str] = field(default=None, metadata={"description": "The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references."}) # fmt: skip + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRateLimit: + kind: ClassVar[str] = "aws_waf_rate_limit" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the header to use."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRateLimitUriPath: + kind: ClassVar[str] = "aws_waf_rate_limit_uri_path" + mapping: ClassVar[Dict[str, Bender]] = { + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping) + } + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRateBasedStatementCustomKey: + kind: ClassVar[str] = "aws_waf_rate_based_statement_custom_key" + mapping: ClassVar[Dict[str, Bender]] = { + "header": S("Header") >> Bend(AwsWafRateLimit.mapping), + "cookie": S("Cookie") >> Bend(AwsWafRateLimit.mapping), + "query_argument": S("QueryArgument") >> Bend(AwsWafRateLimit.mapping), + "query_string": S("QueryString") >> Bend(AwsWafRateLimit.mapping), + "http_method": S("HTTPMethod"), + "forwarded_ip": S("ForwardedIP"), + "ip": S("IP"), + "label_namespace": S("LabelNamespace", "Namespace"), + "uri_path": S("UriPath") >> Bend(AwsWafRateLimitUriPath.mapping), + } + header: Optional[AwsWafRateLimit] = field(default=None, metadata={"description": "Use the value of a header in the request as an aggregate key. Each distinct value in the header contributes to the aggregation instance."}) # fmt: skip + cookie: Optional[AwsWafRateLimit] = field(default=None, metadata={"description": "Use the value of a cookie in the request as an aggregate key. Each distinct value in the cookie contributes to the aggregation instance."}) # fmt: skip + query_argument: Optional[AwsWafRateLimit] = field(default=None, metadata={"description": "Use the specified query argument as an aggregate key. Each distinct value for the named query argument contributes to the aggregation instance."}) # fmt: skip + query_string: Optional[AwsWafRateLimit] = field(default=None, metadata={"description": "Use the request's query string as an aggregate key. Each distinct string contributes to the aggregation instance."}) # fmt: skip + http_method: Optional[Json] = field(default=None, metadata={"description": "Use the request's HTTP method as an aggregate key. Each distinct HTTP method contributes to the aggregation instance."}) # fmt: skip + forwarded_ip: Optional[Json] = field(default=None, metadata={"description": "Use the first IP address in an HTTP header as an aggregate key. Each distinct forwarded IP address contributes to the aggregation instance."}) # fmt: skip + ip: Optional[Json] = field(default=None, metadata={"description": "Use the request's originating IP address as an aggregate key. Each distinct IP address contributes to the aggregation instance."}) # fmt: skip + label_namespace: Optional[str] = field(default=None, metadata={"description": "Use the specified label namespace as an aggregate key. Each distinct fully qualified label name that has the specified label namespace contributes to the aggregation instance."}) # fmt: skip + uri_path: Optional[AwsWafRateLimitUriPath] = field(default=None, metadata={"description": "Use the request's URI path as an aggregate key. Each distinct URI path contributes to the aggregation instance."}) # fmt: skip + + +def aws_waf_statement() -> Bender: + # original: return AwsWafStatement.mapping, which leads to circular references + return ParseJson(keys_to_snake=True) + + +@define(eq=False, slots=False) +class AwsWafRateBasedStatement: + kind: ClassVar[str] = "aws_waf_rate_based_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "limit": S("Limit"), + "aggregate_key_type": S("AggregateKeyType"), + "scope_down_statement": S("ScopeDownStatement") >> Bend(aws_waf_statement()), + "forwarded_ip_config": S("ForwardedIPConfig") >> Bend(AwsWafForwardedIPConfig.mapping), + "custom_keys": S("CustomKeys", default=[]) >> ForallBend(AwsWafRateBasedStatementCustomKey.mapping), + } + limit: Optional[int] = field(default=None, metadata={"description": "The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule."}) # fmt: skip + aggregate_key_type: Optional[str] = field(default=None, metadata={"description": "Setting that indicates how to aggregate the request counts."}) # fmt: skip + scope_down_statement: Optional[AwsWafStatement] = field(default=None, metadata={"description": "An optional nested statement that narrows the scope of the web requests that are evaluated and managed by the rate-based statement."}) # fmt: skip + forwarded_ip_config: Optional[AwsWafForwardedIPConfig] = field(default=None, metadata={"description": "The configuration for inspecting IP addresses in an HTTP header that you specify, instead of using the IP address that's reported by the web request origin."}) # fmt: skip + custom_keys: Optional[List[AwsWafRateBasedStatementCustomKey]] = field(factory=list, metadata={"description": "Specifies the aggregate keys to use in a rate-base rule."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafAndStatement: + kind: ClassVar[str] = "aws_waf_and_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "statements": S("Statements", default=[]) >> ForallBend(aws_waf_statement()) + } + statements: Optional[List[AwsWafStatement]] = field(factory=list, metadata={"description": "The statements to combine with AND logic. You can use any statements that can be nested."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafOrStatement: + kind: ClassVar[str] = "aws_waf_or_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "statements": S("Statements", default=[]) >> ForallBend(aws_waf_statement()) + } + statements: Optional[List[AwsWafStatement]] = field(factory=list, metadata={"description": "The statements to combine with OR logic. You can use any statements that can be nested."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafNotStatement: + kind: ClassVar[str] = "aws_waf_not_statement" + mapping: ClassVar[Dict[str, Bender]] = {"statement": S("Statement") >> Bend(aws_waf_statement())} + statement: Optional[AwsWafStatement] = field(default=None, metadata={"description": "The statement to negate. You can use any statement that can be nested."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafAWSManagedRulesBotControlRuleSet: + kind: ClassVar[str] = "aws_waf_aws_managed_rules_bot_control_rule_set" + mapping: ClassVar[Dict[str, Bender]] = { + "inspection_level": S("InspectionLevel"), + "enable_machine_learning": S("EnableMachineLearning"), + } + inspection_level: Optional[str] = field(default=None, metadata={"description": "The inspection level to use for the Bot Control rule group. The common level is the least expensive."}) # fmt: skip + enable_machine_learning: Optional[bool] = field(default=None, metadata={"description": "Applies only to the targeted inspection level."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRequestInspection: + kind: ClassVar[str] = "aws_waf_request_inspection" + mapping: ClassVar[Dict[str, Bender]] = { + "payload_type": S("PayloadType"), + "username_field": S("UsernameField", "Identifier"), + "password_field": S("PasswordField", "Identifier"), + } + payload_type: Optional[str] = field(default=None, metadata={"description": "The payload type for your login endpoint, either JSON or form encoded."}) # fmt: skip + username_field: Optional[str] = field(default=None, metadata={"description": "The name of the field in the request payload that contains your customer's username."}) # fmt: skip + password_field: Optional[str] = field(default=None, metadata={"description": "The name of the field in the request payload that contains your customer's password."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafResponseInspectionPart: + kind: ClassVar[str] = "aws_waf_response_inspection" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "identifier": S("Identifier"), + "success_values": S("SuccessValues", default=[]), + "failure_values": S("FailureValues", default=[]), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the header to match against."}) # fmt: skip + identifier: Optional[str] = field(default=None, metadata={"description": "The identifier for the value to match against in the JSON."}) # fmt: skip + success_values: Optional[List[str]] = field(factory=list, metadata={"description": "Values for the specified identifier in the response JSON that indicate a successful login or account creation attempt."}) # fmt: skip + failure_values: Optional[List[str]] = field(factory=list, metadata={"description": "Values for the specified identifier in the response JSON that indicate a failed login or account creation attempt."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafResponseInspection: + kind: ClassVar[str] = "aws_waf_response_inspection" + mapping: ClassVar[Dict[str, Bender]] = { + "status_code": S("StatusCode") >> Bend(AwsWafResponseInspectionPart.mapping), + "header": S("Header") >> Bend(AwsWafResponseInspectionPart.mapping), + "body_contains": S("BodyContains") >> Bend(AwsWafResponseInspectionPart.mapping), + "json": S("Json") >> Bend(AwsWafResponseInspectionPart.mapping), + } + status_code: Optional[AwsWafResponseInspectionPart] = field(default=None, metadata={"description": "Configures inspection of the response status code for success and failure indicators."}) # fmt: skip + header: Optional[AwsWafResponseInspectionPart] = field(default=None, metadata={"description": "Configures inspection of the response header for success and failure indicators."}) # fmt: skip + body_contains: Optional[AwsWafResponseInspectionPart] = field(default=None, metadata={"description": "Configures inspection of the response body for success and failure indicators."}) # fmt: skip + json: Optional[AwsWafResponseInspectionPart] = field(default=None, metadata={"description": "Configures inspection of the response JSON for success and failure indicators."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafAWSManagedRulesATPRuleSet: + kind: ClassVar[str] = "aws_waf_aws_managed_rules_atp_rule_set" + mapping: ClassVar[Dict[str, Bender]] = { + "login_path": S("LoginPath"), + "request_inspection": S("RequestInspection") >> Bend(AwsWafRequestInspection.mapping), + "response_inspection": S("ResponseInspection") >> Bend(AwsWafResponseInspection.mapping), + "enable_regex_in_path": S("EnableRegexInPath"), + } + login_path: Optional[str] = field(default=None, metadata={"description": "The path of the login endpoint for your application."}) # fmt: skip + request_inspection: Optional[AwsWafRequestInspection] = field(default=None, metadata={"description": "The criteria for inspecting login requests, used by the ATP rule group to validate credentials usage."}) # fmt: skip + response_inspection: Optional[AwsWafResponseInspection] = field(default=None, metadata={"description": "The criteria for inspecting responses to login requests, used by the ATP rule group to track login failure rates."}) # fmt: skip + enable_regex_in_path: Optional[bool] = field(default=None, metadata={"description": "Allow the use of regular expressions in the login page path."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRequestInspectionACFP: + kind: ClassVar[str] = "aws_waf_request_inspection_acfp" + mapping: ClassVar[Dict[str, Bender]] = { + "payload_type": S("PayloadType"), + "username_field": S("UsernameField", "Identifier"), + "password_field": S("PasswordField", "Identifier"), + "email_field": S("EmailField", "Identifier"), + "phone_number_fields": S("PhoneNumberFields", default=[]) >> ForallBend(S("Identifier")), + "address_fields": S("AddressFields", default=[]) >> ForallBend(S("Identifier")), + } + payload_type: Optional[str] = field(default=None, metadata={"description": "The payload type for your account creation endpoint, either JSON or form encoded."}) # fmt: skip + username_field: Optional[str] = field(default=None, metadata={"description": "The name of the field in the request payload that contains your customer's username."}) # fmt: skip + password_field: Optional[str] = field(default=None, metadata={"description": "The name of the field in the request payload that contains your customer's password."}) # fmt: skip + email_field: Optional[str] = field(default=None, metadata={"description": "The name of the field in the request payload that contains your customer's email."}) # fmt: skip + phone_number_fields: Optional[List[str]] = field(factory=list, metadata={"description": "The names of the fields in the request payload that contain your customer's primary phone number."}) # fmt: skip + address_fields: Optional[List[str]] = field(factory=list, metadata={"description": "The names of the fields in the request payload that contain your customer's primary physical address."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafAWSManagedRulesACFPRuleSet: + kind: ClassVar[str] = "aws_waf_aws_managed_rules_acfp_rule_set" + mapping: ClassVar[Dict[str, Bender]] = { + "creation_path": S("CreationPath"), + "registration_page_path": S("RegistrationPagePath"), + "request_inspection": S("RequestInspection") >> Bend(AwsWafRequestInspectionACFP.mapping), + "response_inspection": S("ResponseInspection") >> Bend(AwsWafResponseInspection.mapping), + "enable_regex_in_path": S("EnableRegexInPath"), + } + creation_path: Optional[str] = field(default=None, metadata={"description": "The path of the account creation endpoint for your application."}) # fmt: skip + registration_page_path: Optional[str] = field(default=None, metadata={"description": "The path of the account registration endpoint for your application."}) # fmt: skip + request_inspection: Optional[AwsWafRequestInspectionACFP] = field(default=None, metadata={"description": "The criteria for inspecting account creation requests, used by the ACFP rule group to validate and track account creation attempts."}) # fmt: skip + response_inspection: Optional[AwsWafResponseInspection] = field(default=None, metadata={"description": "The criteria for inspecting responses to account creation requests, used by the ACFP rule group to track account creation success rates. "}) # fmt: skip + enable_regex_in_path: Optional[bool] = field(default=None, metadata={"description": "Allow the use of regular expressions in the registration page path and the account creation path."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafManagedRuleGroupConfig: + kind: ClassVar[str] = "aws_waf_managed_rule_group_config" + mapping: ClassVar[Dict[str, Bender]] = { + "login_path": S("LoginPath"), + "payload_type": S("PayloadType"), + "username_field": S("UsernameField", "Identifier"), + "password_field": S("PasswordField", "Identifier"), + "aws_managed_rules_bot_control_rule_set": S("AWSManagedRulesBotControlRuleSet") + >> Bend(AwsWafAWSManagedRulesBotControlRuleSet.mapping), + "aws_managed_rules_atp_rule_set": S("AWSManagedRulesATPRuleSet") + >> Bend(AwsWafAWSManagedRulesATPRuleSet.mapping), + "aws_managed_rules_acfp_rule_set": S("AWSManagedRulesACFPRuleSet") + >> Bend(AwsWafAWSManagedRulesACFPRuleSet.mapping), + } + login_path: Optional[str] = field(default=None, metadata={"description": "Instead of this setting, provide your configuration under AWSManagedRulesATPRuleSet."}) # fmt: skip + payload_type: Optional[str] = field(default=None, metadata={"description": "Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet."}) # fmt: skip + username_field: Optional[str] = field(default=None, metadata={"description": "Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet."}) # fmt: skip + password_field: Optional[str] = field(default=None, metadata={"description": "Instead of this setting, provide your configuration under the request inspection configuration for AWSManagedRulesATPRuleSet or AWSManagedRulesACFPRuleSet."}) # fmt: skip + aws_managed_rules_bot_control_rule_set: Optional[AwsWafAWSManagedRulesBotControlRuleSet] = field(default=None, metadata={"description": "Additional configuration for using the Bot Control managed rule group."}) # fmt: skip + aws_managed_rules_atp_rule_set: Optional[AwsWafAWSManagedRulesATPRuleSet] = field(default=None, metadata={"description": "Additional configuration for using the account takeover prevention (ATP) managed rule group, AWSManagedRulesATPRuleSet."}) # fmt: skip + aws_managed_rules_acfp_rule_set: Optional[AwsWafAWSManagedRulesACFPRuleSet] = field(default=None, metadata={"description": "Additional configuration for using the account creation fraud prevention (ACFP) managed rule group, AWSManagedRulesACFPRuleSet."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafManagedRuleGroupStatement: + kind: ClassVar[str] = "aws_waf_managed_rule_group_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "vendor_name": S("VendorName"), + "name": S("Name"), + "version": S("Version"), + "excluded_rules": S("ExcludedRules", default=[]) >> ForallBend(S("Name")), + "scope_down_statement": S("ScopeDownStatement") >> Bend(aws_waf_statement()), + "managed_rule_group_configs": S("ManagedRuleGroupConfigs", default=[]) + >> ForallBend(AwsWafManagedRuleGroupConfig.mapping), + "rule_action_overrides": S("RuleActionOverrides", default=[]) >> ForallBend(AwsWafRuleActionOverride.mapping), + } + vendor_name: Optional[str] = field(default=None, metadata={"description": "The name of the managed rule group vendor."}) # fmt: skip + name: Optional[str] = field(default=None, metadata={"description": "The name of the managed rule group."}) # fmt: skip + version: Optional[str] = field(default=None, metadata={"description": "The version of the managed rule group to use."}) # fmt: skip + excluded_rules: Optional[List[str]] = field(factory=list, metadata={"description": "Rules in the referenced rule group whose actions are set to Count."}) # fmt: skip + scope_down_statement: Optional[AwsWafStatement] = field(default=None, metadata={"description": "An optional nested statement that narrows the scope of the web requests that are evaluated by the managed rule group."}) # fmt: skip + managed_rule_group_configs: Optional[List[AwsWafManagedRuleGroupConfig]] = field(factory=list, metadata={"description": "Additional information that's used by a managed rule group. Many managed rule groups don't require this."}) # fmt: skip + rule_action_overrides: Optional[List[AwsWafRuleActionOverride]] = field(factory=list, metadata={"description": "Action settings to use in the place of the rule actions that are configured inside the rule group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafLabelMatchStatement: + kind: ClassVar[str] = "aws_waf_label_match_statement" + mapping: ClassVar[Dict[str, Bender]] = {"scope": S("Scope"), "key": S("Key")} + scope: Optional[str] = field(default=None, metadata={"description": "Specify whether you want to match using the label name or just the namespace."}) # fmt: skip + key: Optional[str] = field(default=None, metadata={"description": "The string to match against. The setting you provide for this depends on the match statement's Scope setting."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRegexMatchStatement: + kind: ClassVar[str] = "aws_waf_regex_match_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "regex_string": S("RegexString"), + "field_to_match": S("FieldToMatch") >> Bend(AwsWafFieldToMatch.mapping), + "text_transformations": S("TextTransformations", default=[]) >> ForallBend(AwsWafTextTransformation.mapping), + } + regex_string: Optional[str] = field(default=None, metadata={"description": "The string representing the regular expression."}) # fmt: skip + field_to_match: Optional[AwsWafFieldToMatch] = field(default=None, metadata={"description": "The part of the web request that you want WAF to inspect."}) # fmt: skip + text_transformations: Optional[List[AwsWafTextTransformation]] = field(factory=list, metadata={"description": "Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafStatement: + kind: ClassVar[str] = "aws_waf_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "byte_match_statement": S("ByteMatchStatement") >> Bend(AwsWafByteMatchStatement.mapping), + "sqli_match_statement": S("SqliMatchStatement") >> Bend(AwsWafSqliMatchStatement.mapping), + "xss_match_statement": S("XssMatchStatement") >> Bend(AwsWafXssMatchStatement.mapping), + "size_constraint_statement": S("SizeConstraintStatement") >> Bend(AwsWafSizeConstraintStatement.mapping), + "geo_match_statement": S("GeoMatchStatement") >> Bend(AwsWafGeoMatchStatement.mapping), + "rule_group_reference_statement": S("RuleGroupReferenceStatement") + >> Bend(AwsWafRuleGroupReferenceStatement.mapping), + "ip_set_reference_statement": S("IPSetReferenceStatement") >> Bend(AwsWafIPSetReferenceStatement.mapping), + "regex_pattern_set_reference_statement": S("RegexPatternSetReferenceStatement") + >> Bend(AwsWafRegexPatternSetReferenceStatement.mapping), + "rate_based_statement": S("RateBasedStatement") >> Bend(AwsWafRateBasedStatement.mapping), + "and_statement": S("AndStatement") >> Bend(AwsWafAndStatement.mapping), + "or_statement": S("OrStatement") >> Bend(AwsWafOrStatement.mapping), + "not_statement": S("NotStatement") >> Bend(AwsWafNotStatement.mapping), + "managed_rule_group_statement": S("ManagedRuleGroupStatement") >> Bend(AwsWafManagedRuleGroupStatement.mapping), + "label_match_statement": S("LabelMatchStatement") >> Bend(AwsWafLabelMatchStatement.mapping), + "regex_match_statement": S("RegexMatchStatement") >> Bend(AwsWafRegexMatchStatement.mapping), + } + byte_match_statement: Optional[AwsWafByteMatchStatement] = field(default=None, metadata={"description": "A rule statement that defines a string match search for WAF to apply to web requests."}) # fmt: skip + sqli_match_statement: Optional[AwsWafSqliMatchStatement] = field(default=None, metadata={"description": "A rule statement that inspects for malicious SQL code."}) # fmt: skip + xss_match_statement: Optional[AwsWafXssMatchStatement] = field(default=None, metadata={"description": "A rule statement that inspects for cross-site scripting (XSS) attacks."}) # fmt: skip + size_constraint_statement: Optional[AwsWafSizeConstraintStatement] = field(default=None, metadata={"description": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). "}) # fmt: skip + geo_match_statement: Optional[AwsWafGeoMatchStatement] = field(default=None, metadata={"description": "A rule statement that labels web requests by country and region and that matches against web requests based on country code."}) # fmt: skip + rule_group_reference_statement: Optional[AwsWafRuleGroupReferenceStatement] = field(default=None, metadata={"description": "A rule statement used to run the rules that are defined in a RuleGroup."}) # fmt: skip + ip_set_reference_statement: Optional[AwsWafIPSetReferenceStatement] = field(default=None, metadata={"description": "A rule statement used to detect web requests coming from particular IP addresses or address ranges."}) # fmt: skip + regex_pattern_set_reference_statement: Optional[AwsWafRegexPatternSetReferenceStatement] = field(default=None, metadata={"description": "A rule statement used to search web request components for matches with regular expressions."}) # fmt: skip + rate_based_statement: Optional[AwsWafRateBasedStatement] = field(default=None, metadata={"description": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate."}) # fmt: skip + and_statement: Optional[AwsWafAndStatement] = field(default=None, metadata={"description": "A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement."}) # fmt: skip + or_statement: Optional[AwsWafOrStatement] = field(default=None, metadata={"description": "A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement."}) # fmt: skip + not_statement: Optional[AwsWafNotStatement] = field(default=None, metadata={"description": "A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement."}) # fmt: skip + managed_rule_group_statement: Optional[AwsWafManagedRuleGroupStatement] = field(default=None, metadata={"description": "A rule statement used to run the rules that are defined in a managed rule group."}) # fmt: skip + label_match_statement: Optional[AwsWafLabelMatchStatement] = field(default=None, metadata={"description": "A rule statement to match against labels that have been added to the web request by rules that have already run in the web ACL."}) # fmt: skip + regex_match_statement: Optional[AwsWafRegexMatchStatement] = field(default=None, metadata={"description": "A rule statement used to search web request components for a match against a single regular expression."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafOverrideAction: + kind: ClassVar[str] = "aws_waf_override_action" + mapping: ClassVar[Dict[str, Bender]] = {"count": S("Count") >> Bend(AwsWafCountAction.mapping), "none": S("None")} + count: Optional[AwsWafCountAction] = field(default=None, metadata={"description": "Override the rule group evaluation result to count only."}) # fmt: skip + none: Optional[Json] = field(default=None, metadata={"description": "Don't override the rule group evaluation result. This is the most common setting."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafVisibilityConfig: + kind: ClassVar[str] = "aws_waf_visibility_config" + mapping: ClassVar[Dict[str, Bender]] = { + "sampled_requests_enabled": S("SampledRequestsEnabled"), + "cloud_watch_metrics_enabled": S("CloudWatchMetricsEnabled"), + "metric_name": S("MetricName"), + } + sampled_requests_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether WAF should store a sampling of the web requests that match the rules."}) # fmt: skip + cloud_watch_metrics_enabled: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the associated resource sends metrics to Amazon CloudWatch."}) # fmt: skip + metric_name: Optional[str] = field(default=None, metadata={"description": "A name of the Amazon CloudWatch metric dimension."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCaptchaConfig: + kind: ClassVar[str] = "aws_waf_captcha_config" + mapping: ClassVar[Dict[str, Bender]] = {"immunity_time_property": S("ImmunityTimeProperty", "ImmunityTime")} + immunity_time_property: Optional[int] = field(default=None, metadata={"description": "Determines how long a CAPTCHA timestamp in the token remains valid after the client successfully solves a CAPTCHA puzzle."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafChallengeConfig: + kind: ClassVar[str] = "aws_waf_challenge_config" + mapping: ClassVar[Dict[str, Bender]] = {"immunity_time_property": S("ImmunityTimeProperty", "ImmunityTime")} + immunity_time_property: Optional[int] = field(default=None, metadata={"description": "Determines how long a challenge timestamp in the token remains valid after the client successfully responds to a challenge."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafRule: + kind: ClassVar[str] = "aws_waf_rule" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "priority": S("Priority"), + "statement": S("Statement") >> Bend(AwsWafStatement.mapping), + "action": S("Action") >> Bend(AwsWafRuleAction.mapping), + "override_action": S("OverrideAction") >> Bend(AwsWafOverrideAction.mapping), + "rule_labels": S("RuleLabels", default=[]) >> ForallBend(S("Name")), + "visibility_config": S("VisibilityConfig") >> Bend(AwsWafVisibilityConfig.mapping), + "captcha_config": S("CaptchaConfig") >> Bend(AwsWafCaptchaConfig.mapping), + "challenge_config": S("ChallengeConfig") >> Bend(AwsWafChallengeConfig.mapping), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the rule."}) # fmt: skip + priority: Optional[int] = field(default=None, metadata={"description": "If you define more than one Rule in a WebACL, WAF evaluates each request against the Rules in order based on the value of Priority."}) # fmt: skip + statement: Optional[AwsWafStatement] = field(default=None, metadata={"description": "The WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement."}) # fmt: skip + action: Optional[AwsWafRuleAction] = field(default=None, metadata={"description": "The action that WAF should take on a web request when it matches the rule statement."}) # fmt: skip + override_action: Optional[AwsWafOverrideAction] = field(default=None, metadata={"description": "The action to use in the place of the action that results from the rule group evaluation."}) # fmt: skip + rule_labels: Optional[List[str]] = field(factory=list, metadata={"description": "Labels to apply to web requests that match the rule match statement."}) # fmt: skip + visibility_config: Optional[AwsWafVisibilityConfig] = field(default=None, metadata={"description": "Defines and enables Amazon CloudWatch metrics and web request sample collection."}) # fmt: skip + captcha_config: Optional[AwsWafCaptchaConfig] = field(default=None, metadata={"description": "Specifies how WAF should handle CAPTCHA evaluations."}) # fmt: skip + challenge_config: Optional[AwsWafChallengeConfig] = field(default=None, metadata={"description": "Specifies how WAF should handle Challenge evaluations."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafFirewallManagerStatement: + kind: ClassVar[str] = "aws_waf_firewall_manager_statement" + mapping: ClassVar[Dict[str, Bender]] = { + "managed_rule_group_statement": S("ManagedRuleGroupStatement") >> Bend(AwsWafManagedRuleGroupStatement.mapping), + "rule_group_reference_statement": S("RuleGroupReferenceStatement") + >> Bend(AwsWafRuleGroupReferenceStatement.mapping), + } + managed_rule_group_statement: Optional[AwsWafManagedRuleGroupStatement] = field(default=None, metadata={"description": "A statement used by Firewall Manager to run the rules that are defined in a managed rule group."}) # fmt: skip + rule_group_reference_statement: Optional[AwsWafRuleGroupReferenceStatement] = field(default=None, metadata={"description": "A statement used by Firewall Manager to run the rules that are defined in a rule group."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafFirewallManagerRuleGroup: + kind: ClassVar[str] = "aws_waf_firewall_manager_rule_group" + mapping: ClassVar[Dict[str, Bender]] = { + "name": S("Name"), + "priority": S("Priority"), + "firewall_manager_statement": S("FirewallManagerStatement") >> Bend(AwsWafFirewallManagerStatement.mapping), + "override_action": S("OverrideAction") >> Bend(AwsWafOverrideAction.mapping), + "visibility_config": S("VisibilityConfig") >> Bend(AwsWafVisibilityConfig.mapping), + } + name: Optional[str] = field(default=None, metadata={"description": "The name of the rule group. You cannot change the name of a rule group after you create it."}) # fmt: skip + priority: Optional[int] = field(default=None, metadata={"description": "If you define more than one rule group in the first or last Firewall Manager rule groups, WAF evaluates each request against the rule groups in order, starting from the lowest priority setting."}) # fmt: skip + firewall_manager_statement: Optional[AwsWafFirewallManagerStatement] = field(default=None, metadata={"description": "The processing guidance for an Firewall Manager rule"}) # fmt: skip + override_action: Optional[AwsWafOverrideAction] = field(default=None, metadata={"description": "The action to use in the place of the action that results from the rule group evaluation."}) # fmt: skip + visibility_config: Optional[AwsWafVisibilityConfig] = field(default=None, metadata={"description": "Defines and enables Amazon CloudWatch metrics and web request sample collection."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCustomResponseBody: + kind: ClassVar[str] = "aws_waf_custom_response_body" + mapping: ClassVar[Dict[str, Bender]] = {"content_type": S("ContentType"), "content": S("Content")} + content_type: Optional[str] = field(default=None, metadata={"description": "The type of content in the payload that you are defining in the Content string."}) # fmt: skip + content: Optional[str] = field(default=None, metadata={"description": "The payload of the custom response."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafCondition: + kind: ClassVar[str] = "aws_waf_condition" + mapping: ClassVar[Dict[str, Bender]] = { + "action_condition": S("ActionCondition", "Action"), + "label_name_condition": S("LabelNameCondition", "LabelName"), + } + action_condition: Optional[str] = field(default=None, metadata={"description": "A single action condition. This is the action setting that a log record must contain in order to meet the condition."}) # fmt: skip + label_name_condition: Optional[str] = field(default=None, metadata={"description": "A single label name condition. This is the fully qualified label name that a log record must contain in order to meet the condition. Fully qualified labels have a prefix, optional namespaces, and label name. The prefix identifies the rule group or web ACL context of the rule that added the label."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafFilter: + kind: ClassVar[str] = "aws_waf_filter" + mapping: ClassVar[Dict[str, Bender]] = { + "behavior": S("Behavior"), + "requirement": S("Requirement"), + "conditions": S("Conditions", default=[]) >> ForallBend(AwsWafCondition.mapping), + } + behavior: Optional[str] = field(default=None, metadata={"description": "How to handle logs that satisfy the filter's conditions and requirement."}) # fmt: skip + requirement: Optional[str] = field(default=None, metadata={"description": "Logic to apply to the filtering conditions. You can specify that, in order to satisfy the filter, a log must match all conditions or must match at least one condition."}) # fmt: skip + conditions: Optional[List[AwsWafCondition]] = field(factory=list, metadata={"description": "Match conditions for the filter."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafLoggingFilter: + kind: ClassVar[str] = "aws_waf_logging_filter" + mapping: ClassVar[Dict[str, Bender]] = { + "filters": S("Filters", default=[]) >> ForallBend(AwsWafFilter.mapping), + "default_behavior": S("DefaultBehavior"), + } + filters: Optional[List[AwsWafFilter]] = field(factory=list, metadata={"description": "The filters that you want to apply to the logs."}) # fmt: skip + default_behavior: Optional[str] = field(default=None, metadata={"description": "Default handling for logs that don't match any of the specified filtering conditions."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafLoggingConfiguration: + kind: ClassVar[str] = "aws_waf_logging_configuration" + mapping: ClassVar[Dict[str, Bender]] = { + "log_destination_configs": S("LogDestinationConfigs", default=[]), + "redacted_fields": S("RedactedFields", default=[]) >> ForallBend(AwsWafFieldToMatch.mapping), + "managed_by_firewall_manager": S("ManagedByFirewallManager"), + "logging_filter": S("LoggingFilter") >> Bend(AwsWafLoggingFilter.mapping), + } + log_destination_configs: Optional[List[str]] = field(factory=list, metadata={"description": "The logging destination configuration that you want to associate with the web ACL. You can associate one logging destination to a web ACL."}) # fmt: skip + redacted_fields: Optional[List[AwsWafFieldToMatch]] = field(factory=list, metadata={"description": "The parts of the request that you want to keep out of the logs. For example, if you redact the SingleHeader field, the HEADER field in the logs will be REDACTED for all rules that use the SingleHeader FieldToMatch setting. Redaction applies only to the component that's specified in the rule's FieldToMatch setting, so the SingleHeader redaction doesn't apply to rules that use the Headers FieldToMatch. You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, and Method."}) # fmt: skip + managed_by_firewall_manager: Optional[bool] = field(default=None, metadata={"description": "Indicates whether the logging configuration was created by Firewall Manager, as part of an WAF policy configuration. If true, only Firewall Manager can modify or delete the configuration."}) # fmt: skip + logging_filter: Optional[AwsWafLoggingFilter] = field(default=None, metadata={"description": "Filtering that specifies which web requests are kept in the logs and which are dropped. You can filter on the rule action and on the web request labels that were applied by matching rules during web ACL evaluation."}) # fmt: skip + + +@define(eq=False, slots=False) +class AwsWafWebACL(AwsResource): + kind: ClassVar[str] = "aws_waf_web_acl" + api_spec: ClassVar[AwsApiSpec] = AwsApiSpec("wafv2", "get-web-acl", "WebACL") + reference_kinds: ClassVar[ModelReference] = { + "successors": {"default": ["aws_alb", "aws_apigateway_rest_api", "aws_cognito_user_pool"]} + } + mapping: ClassVar[Dict[str, Bender]] = { + "id": S("Id"), + "name": S("Name"), + "tags": S("Tags", default=[]) >> ToDict(), + "arn": S("ARN"), + "default_action": S("DefaultAction") >> Bend(AwsWafDefaultAction.mapping), + "description": S("Description"), + "waf_rules": S("Rules", default=[]) >> ForallBend(AwsWafRule.mapping), + "visibility_config": S("VisibilityConfig") >> Bend(AwsWafVisibilityConfig.mapping), + "capacity": S("Capacity"), + "pre_process_firewall_manager_rule_groups": S("PreProcessFirewallManagerRuleGroups", default=[]) + >> ForallBend(AwsWafFirewallManagerRuleGroup.mapping), + "post_process_firewall_manager_rule_groups": S("PostProcessFirewallManagerRuleGroups", default=[]) + >> ForallBend(AwsWafFirewallManagerRuleGroup.mapping), + "managed_by_firewall_manager": S("ManagedByFirewallManager"), + "label_namespace": S("LabelNamespace"), + "custom_response_bodies": S("CustomResponseBodies") + >> MapDict(value_bender=Bend(AwsWafCustomResponseBody.mapping)), + "captcha_config": S("CaptchaConfig") >> Bend(AwsWafCaptchaConfig.mapping), + "challenge_config": S("ChallengeConfig") >> Bend(AwsWafChallengeConfig.mapping), + "token_domains": S("TokenDomains", default=[]), + "association_inspection_limit": S("AssociationConfig", "RequestBody", "DefaultSizeInspectionLimit"), + } + default_action: Optional[AwsWafDefaultAction] = field(default=None, metadata={"description": "The action to perform if none of the Rules contained in the WebACL match."}) # fmt: skip + description: Optional[str] = field(default=None, metadata={"description": "A description of the web ACL that helps with identification."}) # fmt: skip + rules: Optional[List[AwsWafRule]] = field(factory=list, metadata={"description": "The Rule statements used to identify the web requests that you want to manage. Each rule includes one top-level statement that WAF uses to identify matching web requests, and parameters that govern how WAF handles them."}) # fmt: skip + visibility_config: Optional[AwsWafVisibilityConfig] = field(default=None, metadata={"description": "Defines and enables Amazon CloudWatch metrics and web request sample collection."}) # fmt: skip + capacity: Optional[int] = field(default=None, metadata={"description": "The web ACL capacity units (WCUs) currently being used by this web ACL. WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs."}) # fmt: skip + pre_process_firewall_manager_rule_groups: Optional[List[AwsWafFirewallManagerRuleGroup]] = field(factory=list, metadata={"description": "The first set of rules for WAF to process in the web ACL."}) # fmt: skip + post_process_firewall_manager_rule_groups: Optional[List[AwsWafFirewallManagerRuleGroup]] = field(factory=list, metadata={"description": "The last set of rules for WAF to process in the web ACL."}) # fmt: skip + managed_by_firewall_manager: Optional[bool] = field(default=None, metadata={"description": "Indicates whether this web ACL is managed by Firewall Manager. If true, then only Firewall Manager can delete the web ACL or any Firewall Manager rule groups in the web ACL."}) # fmt: skip + label_namespace: Optional[str] = field(default=None, metadata={"description": "The label namespace prefix for this web ACL. All labels added by rules in this web ACL have this prefix."}) # fmt: skip + custom_response_bodies: Optional[Dict[str, AwsWafCustomResponseBody]] = field(default=None, metadata={"description": "A map of custom response keys and content bodies."}) # fmt: skip + captcha_config: Optional[AwsWafCaptchaConfig] = field(default=None, metadata={"description": "Specifies how WAF should handle CAPTCHA evaluations for rules that don't have their own CaptchaConfig settings."}) # fmt: skip + challenge_config: Optional[AwsWafChallengeConfig] = field(default=None, metadata={"description": "Specifies how WAF should handle challenge evaluations for rules that don't have their own ChallengeConfig settings."}) # fmt: skip + token_domains: Optional[List[str]] = field(factory=list, metadata={"description": "Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites."}) # fmt: skip + association_inspection_limit: Optional[str] = field(default=None, metadata={"description": "Specifies the maximum size of the web request body component that an associated CloudFront distribution should send to WAF for inspection."}) # fmt: skip + logging_configuration: Optional[AwsWafLoggingConfiguration] = None + _associated_resources: Optional[List[str]] = None + + @classmethod + def collect_resources(cls: Type[AwsResource], builder: GraphBuilder) -> None: + def fetch_acl_resources(acl: AwsWafWebACL) -> None: + with builder.suppress(f"{service_name}.list-resources-for-web-acl"): + acl._associated_resources = builder.client.list( + service_name, "list-resources-for-web-acl", "ResourceArns", WebACLArn=acl.arn + ) + + def fetch_logging_configuration(acl: AwsWafWebACL) -> None: + with builder.suppress(f"{service_name}.get-logging-configuration"): + if logging_configuration := builder.client.get( + aws_service=service_name, + action="get-logging-configuration", + result_name="LoggingConfiguration", + expected_errors=["WAFNonexistentItemException"], + ResourceArn=acl.arn, + ): + acl.logging_configuration = parse_json( + logging_configuration, AwsWafLoggingConfiguration, builder, AwsWafLoggingConfiguration.mapping + ) + + def fetch_web_acl(entry: Json, scope: str) -> None: + if web_acl := builder.client.get( + aws_service=service_name, + action="get-web-acl", + result_name="WebACL", + Scope=scope, + Id=entry["Id"], + Name=entry["Name"], + ): + if instance := AwsWafWebACL.from_api(web_acl, builder): + builder.add_node(instance) + builder.submit_work(service_name, fetch_logging_configuration, instance) + if scope == "REGIONAL": # only regional ACLs have associated resources + builder.submit_work(service_name, fetch_acl_resources, instance) + + # Default behavior: in case the class has an ApiSpec, call the api and call collect. + log.debug(f"Collecting {cls.__name__} in region {builder.region.name}") + try: + for entry in builder.client.list( + aws_service=service_name, + action="list-web-acls", + result_name="WebACLs", + Scope="REGIONAL", + ): + builder.submit_work(service_name, fetch_web_acl, entry, "REGIONAL") + for entry in builder.client.list( + aws_service=service_name, + action="list-web-acls", + result_name="WebACLs", + Scope="CLOUDFRONT", + ): + builder.submit_work(service_name, fetch_web_acl, entry, "CLOUDFRONT") + except Boto3Error as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.error(msg, log) + raise + except Exception as e: + msg = f"Error while collecting {cls.__name__} in region {builder.region.name}: {e}" + builder.core_feedback.info(msg, log) + raise + + def connect_in_graph(self, builder: GraphBuilder, source: Json) -> None: + for arn in self._associated_resources or []: + builder.add_edge(self, arn=arn) + if (lc := self.logging_configuration) and (lcdcs := lc.log_destination_configs): + for arn in lcdcs: + builder.add_edge(self, arn=arn) + + @classmethod + def called_collect_apis(cls) -> List[AwsApiSpec]: + return [ + AwsApiSpec(service_name, "list-web-acls"), + AwsApiSpec(service_name, "get-web-acl"), + AwsApiSpec(service_name, "list-resources-for-web-acl"), + AwsApiSpec(service_name, "get-logging-configuration"), + ] + + +resources: List[Type[AwsResource]] = [AwsWafWebACL] diff --git a/plugins/aws/test/collector_test.py b/plugins/aws/test/collector_test.py index 3551e0c5a7..380cccc2f4 100644 --- a/plugins/aws/test/collector_test.py +++ b/plugins/aws/test/collector_test.py @@ -33,8 +33,8 @@ def count_kind(clazz: Type[AwsResource]) -> int: # make sure all threads have been joined assert len(threading.enumerate()) == 1 # ensure the correct number of nodes and edges - assert count_kind(AwsResource) == 213 - assert len(account_collector.graph.edges) == 491 + assert count_kind(AwsResource) == 226 + assert len(account_collector.graph.edges) == 516 assert len(account_collector.graph.deferred_edges) == 2 diff --git a/plugins/aws/test/resources/__init__.py b/plugins/aws/test/resources/__init__.py index ef9197ab79..35b8d2f1dc 100644 --- a/plugins/aws/test/resources/__init__.py +++ b/plugins/aws/test/resources/__init__.py @@ -73,7 +73,7 @@ def call_action(*args: Any, **kwargs: Any) -> Any: with open(path) as f: return json.load(f) else: - # print(f"Not found: {path}") + print(f"Not found: {path}") return {} return call_action diff --git a/plugins/aws/test/resources/acm_test.py b/plugins/aws/test/resources/acm_test.py new file mode 100644 index 0000000000..b78f41af23 --- /dev/null +++ b/plugins/aws/test/resources/acm_test.py @@ -0,0 +1,6 @@ +from resoto_plugin_aws.resource.acm import AwsAcmCertificate +from test.resources import round_trip_for + + +def test_certificates() -> None: + round_trip_for(AwsAcmCertificate) diff --git a/plugins/aws/test/resources/apigateway_test.py b/plugins/aws/test/resources/apigateway_test.py index 26b82fa3df..1f16c3c3aa 100644 --- a/plugins/aws/test/resources/apigateway_test.py +++ b/plugins/aws/test/resources/apigateway_test.py @@ -15,10 +15,10 @@ def test_rest_apis() -> None: type_count: Dict[str, int] = defaultdict(int) for node in builder.graph.nodes: type_count[node.kind] += 1 - assert type_count["aws_api_gateway_deployment"] == 2 - assert type_count["aws_api_gateway_stage"] == 1 - assert type_count["aws_api_gateway_authorizer"] == 1 - assert type_count["aws_api_gateway_resource"] == 1 + assert type_count["aws_apigateway_deployment"] == 2 + assert type_count["aws_apigateway_stage"] == 1 + assert type_count["aws_apigateway_authorizer"] == 1 + assert type_count["aws_apigateway_resource"] == 1 def test_api_tagging() -> None: diff --git a/plugins/aws/test/resources/cloudtrail_test.py b/plugins/aws/test/resources/cloudtrail_test.py index f6b3e29c69..f209735ed6 100644 --- a/plugins/aws/test/resources/cloudtrail_test.py +++ b/plugins/aws/test/resources/cloudtrail_test.py @@ -15,6 +15,7 @@ def test_trails() -> None: AwsS3Bucket.collect_resources(builder) AwsKmsKey.collect_resources(builder) AwsSnsTopic.collect_resources(builder) + builder.executor.wait_for_submitted_work() data = builder.graph.nodes(data=True)[first] first.connect_in_graph(builder, data["source"]) assert len(builder.edges_of(AwsCloudTrail, AwsS3Bucket)) == 1 diff --git a/plugins/aws/test/resources/ec2_test.py b/plugins/aws/test/resources/ec2_test.py index 111fd5db6c..371ae4f821 100644 --- a/plugins/aws/test/resources/ec2_test.py +++ b/plugins/aws/test/resources/ec2_test.py @@ -21,6 +21,8 @@ AwsEc2RouteTable, AwsEc2Host, AwsEc2FlowLog, + AwsEc2Image, + AwsEc2LaunchTemplate, ) from resotolib.graph import Graph from test.resources import round_trip_for, build_graph, check_single_node @@ -30,6 +32,10 @@ def test_flow_logs() -> None: round_trip_for(AwsEc2FlowLog) +def test_launch_data() -> None: + round_trip_for(AwsEc2LaunchTemplate) + + def test_instance_types() -> None: builder = build_graph(AwsEc2InstanceType) for it in builder.global_instance_types.values(): @@ -45,6 +51,10 @@ def test_volumes() -> None: round_trip_for(AwsEc2Volume) +def test_images() -> None: + round_trip_for(AwsEc2Image) + + def test_delete_volumes() -> None: volume, _ = round_trip_for(AwsEc2Volume) diff --git a/plugins/aws/test/resources/files/acm/describe-certificate__foo.json b/plugins/aws/test/resources/files/acm/describe-certificate__foo.json new file mode 100644 index 0000000000..9a0e6bb8c2 --- /dev/null +++ b/plugins/aws/test/resources/files/acm/describe-certificate__foo.json @@ -0,0 +1,88 @@ +{ + "Certificate": { + "CertificateArn": "arn:aws:acm:us-east-2:test:certificate/7d16bbb2-99c5-4388-b6f4-d97eee909827", + "DomainName": "foo", + "SubjectAlternativeNames": [ + "demo.some.engineering", + "*.demo.some.engineering" + ], + "DomainValidationOptions": [ + { + "DomainName": "demo.some.engineering", + "ValidationEmails": [ + "admin@some.engineering", + "administrator@some.engineering", + "hostmaster@some.engineering", + "postmaster@some.engineering", + "webmaster@some.engineering" + ], + "ValidationDomain": "some.engineering", + "ValidationStatus": "SUCCESS", + "ValidationMethod": "EMAIL" + }, + { + "DomainName": "*.demo.some.engineering", + "ValidationEmails": [ + "admin@some.engineering", + "administrator@some.engineering", + "hostmaster@some.engineering", + "postmaster@some.engineering", + "webmaster@some.engineering" + ], + "ValidationDomain": "some.engineering", + "ValidationStatus": "SUCCESS", + "ValidationMethod": "EMAIL" + } + ], + "Serial": "03:ad:40:93:b4:d2:0a:ee:1d:6f:12:23:9b:57:64:42", + "Subject": "CN=demo.some.engineering", + "Issuer": "Amazon", + "CreatedAt": "2021-09-09T12:51:32+02:00", + "IssuedAt": "2021-09-09T21:39:50+02:00", + "Status": "EXPIRED", + "NotBefore": "2021-09-09T02:00:00+02:00", + "NotAfter": "2022-10-09T01:59:59+02:00", + "KeyAlgorithm": "RSA-2048", + "SignatureAlgorithm": "SHA256WITHRSA", + "InUseBy": [], + "Type": "AMAZON_ISSUED", + "KeyUsages": [ + { + "Name": "DIGITAL_SIGNATURE" + }, + { + "Name": "KEY_ENCIPHERMENT" + } + ], + "ExtendedKeyUsages": [ + { + "Name": "TLS_WEB_SERVER_AUTHENTICATION", + "OID": "1.3.6.1.5.5.7.3.1" + }, + { + "Name": "TLS_WEB_CLIENT_AUTHENTICATION", + "OID": "1.3.6.1.5.5.7.3.2" + } + ], + "RenewalEligibility": "INELIGIBLE", + "Options": { + "CertificateTransparencyLoggingPreference": "ENABLED" + }, + "ImportedAt": "2021-09-09T12:51:32+02:00", + "RevokedAt": "2021-09-09T12:51:32+02:00", + "RevocationReason": "UNSPECIFIED", + "FailureReason": "NO_AVAILABLE_CONTACTS", + "RenewalSummary": { + "RenewalStatus": "PENDING_AUTO_RENEWAL", + "DomainValidationOptions": [ + { + "DomainName": "demo.some.engineering", + "ValidationEmails": [ + "test@bla.de" + ] + } + ] + }, + "CertificateAuthorityArn": "arn:aws:acm-pca:us-east-2:test:certificate-authority/7d16bbb2-99c5-4388-b6f4-d97eee909827" + } +} diff --git a/plugins/aws/test/resources/files/acm/list-certificates.json b/plugins/aws/test/resources/files/acm/list-certificates.json new file mode 100644 index 0000000000..cbb8bd6355 --- /dev/null +++ b/plugins/aws/test/resources/files/acm/list-certificates.json @@ -0,0 +1,37 @@ +{ + "NextToken": "foo", + "CertificateSummaryList": [ + { + "CertificateArn": "foo", + "DomainName": "foo", + "SubjectAlternativeNameSummaries": [ + "foo", + "foo", + "foo" + ], + "HasAdditionalSubjectAlternativeNames": true, + "Status": "ISSUED", + "Type": "AMAZON_ISSUED", + "KeyAlgorithm": "RSA_2048", + "KeyUsages": [ + "NON_REPUDIATION", + "NON_REPUDIATION", + "NON_REPUDIATION" + ], + "ExtendedKeyUsages": [ + "TLS_WEB_CLIENT_AUTHENTICATION", + "TLS_WEB_CLIENT_AUTHENTICATION", + "TLS_WEB_CLIENT_AUTHENTICATION" + ], + "InUse": true, + "Exported": true, + "RenewalEligibility": "INELIGIBLE", + "NotBefore": "2024-01-02T08:55:12Z", + "NotAfter": "2024-01-02T08:55:12Z", + "CreatedAt": "2024-01-02T08:55:12Z", + "IssuedAt": "2024-01-02T08:55:12Z", + "ImportedAt": "2024-01-02T08:55:12Z", + "RevokedAt": "2024-01-02T08:55:12Z" + } + ] +} diff --git a/plugins/aws/test/resources/files/cloudfront/get-distribution__EDFDVBD632BHDS5.json b/plugins/aws/test/resources/files/cloudfront/get-distribution__EDFDVBD632BHDS5.json new file mode 100644 index 0000000000..926bde5b14 --- /dev/null +++ b/plugins/aws/test/resources/files/cloudfront/get-distribution__EDFDVBD632BHDS5.json @@ -0,0 +1,311 @@ +{ + "Distribution": { + "Id": "EALU522D0KVDX", + "ARN": "arn:aws:cloudfront::625596817853:distribution/EALU522D0KVDX", + "Status": "Deployed", + "LastModifiedTime": "2024-01-03T08:46:15.182000+00:00", + "InProgressInvalidationBatches": 0, + "DomainName": "dpgl4a7x6i7kh.cloudfront.net", + "ActiveTrustedSigners": { + "Enabled": false, + "Quantity": 0 + }, + "ActiveTrustedKeyGroups": { + "Enabled": false, + "Quantity": 0 + }, + "DistributionConfig": { + "CallerReference": "string", + "Aliases": { + "Quantity": 123, + "Items": [ + "string" + ] + }, + "DefaultRootObject": "string", + "Origins": { + "Quantity": 123, + "Items": [ + { + "Id": "stuff.s3.region.amazonaws.com", + "DomainName": "stuff.s3.region.amazonaws.com", + "OriginPath": "string", + "CustomHeaders": { + "Quantity": 123, + "Items": [ + { + "HeaderName": "string", + "HeaderValue": "string" + } + ] + }, + "S3OriginConfig": { + "OriginAccessIdentity": "string" + }, + "CustomOriginConfig": { + "HTTPPort": 123, + "HTTPSPort": 123, + "OriginProtocolPolicy": "http-only or match-viewer or https-only", + "OriginSslProtocols": { + "Quantity": 123, + "Items": [ + "SSLv3 or TLSv1 or TLSv1.1 or TLSv1.2" + ] + }, + "OriginReadTimeout": 123, + "OriginKeepaliveTimeout": 123 + }, + "ConnectionAttempts": 123, + "ConnectionTimeout": 123, + "OriginShield": { + "Enabled": true, + "OriginShieldRegion": "string" + }, + "OriginAccessControlId": "string" + } + ] + }, + "OriginGroups": { + "Quantity": 123, + "Items": [ + { + "Id": "string", + "FailoverCriteria": { + "StatusCodes": { + "Quantity": 123, + "Items": [ + 123 + ] + } + }, + "Members": { + "Quantity": 123, + "Items": [ + { + "OriginId": "string" + } + ] + } + } + ] + }, + "DefaultCacheBehavior": { + "TargetOriginId": "string", + "TrustedSigners": { + "Enabled": true, + "Quantity": 123, + "Items": [ + "string" + ] + }, + "TrustedKeyGroups": { + "Enabled": true, + "Quantity": 123, + "Items": [ + "string" + ] + }, + "ViewerProtocolPolicy": "allow-all or https-only or redirect-to-https", + "AllowedMethods": { + "Quantity": 123, + "Items": [ + "GET or HEAD or POST or PUT or PATCH or OPTIONS or DELETE" + ], + "CachedMethods": { + "Quantity": 123, + "Items": [ + "GET or HEAD or POST or PUT or PATCH or OPTIONS or DELETE" + ] + } + }, + "SmoothStreaming": true, + "Compress": true, + "LambdaFunctionAssociations": { + "Quantity": 123, + "Items": [ + { + "LambdaFunctionARN": "some-other-lambda-arn", + "EventType": "viewer-request", + "IncludeBody": true + } + ] + }, + "FunctionAssociations": { + "Quantity": 123, + "Items": [ + { + "FunctionARN": "string", + "EventType": "viewer-request or viewer-response or origin-request or origin-response" + } + ] + }, + "FieldLevelEncryptionId": "string", + "RealtimeLogConfigArn": "string", + "CachePolicyId": "string", + "OriginRequestPolicyId": "string", + "ResponseHeadersPolicyId": "string", + "ForwardedValues": { + "QueryString": true, + "Cookies": { + "Forward": "none or whitelist or all", + "WhitelistedNames": { + "Quantity": 123, + "Items": [ + "string" + ] + } + }, + "Headers": { + "Quantity": 123, + "Items": [ + "string" + ] + }, + "QueryStringCacheKeys": { + "Quantity": 123, + "Items": [ + "string" + ] + } + }, + "MinTTL": 123, + "DefaultTTL": 123, + "MaxTTL": 123 + }, + "CacheBehaviors": { + "Quantity": 123, + "Items": [ + { + "PathPattern": "string", + "TargetOriginId": "string", + "TrustedSigners": { + "Enabled": true, + "Quantity": 123, + "Items": [ + "string" + ] + }, + "TrustedKeyGroups": { + "Enabled": true, + "Quantity": 123, + "Items": [ + "string" + ] + }, + "ViewerProtocolPolicy": "allow-all or https-only or redirect-to-https", + "AllowedMethods": { + "Quantity": 123, + "Items": [ + "GET or HEAD or POST or PUT or PATCH or OPTIONS or DELETE" + ], + "CachedMethods": { + "Quantity": 123, + "Items": [ + "GET or HEAD or POST or PUT or PATCH or OPTIONS or DELETE" + ] + } + }, + "SmoothStreaming": true, + "Compress": true, + "LambdaFunctionAssociations": { + "Quantity": 123, + "Items": [ + { + "LambdaFunctionARN": "string", + "EventType": "viewer-request or viewer-response or origin-request or origin-response", + "IncludeBody": true + } + ] + }, + "FunctionAssociations": { + "Quantity": 123, + "Items": [ + { + "FunctionARN": "string", + "EventType": "viewer-request or viewer-response or origin-request or origin-response" + } + ] + }, + "FieldLevelEncryptionId": "string", + "RealtimeLogConfigArn": "string", + "CachePolicyId": "string", + "OriginRequestPolicyId": "string", + "ResponseHeadersPolicyId": "string", + "ForwardedValues": { + "QueryString": true, + "Cookies": { + "Forward": "none or whitelist or all", + "WhitelistedNames": { + "Quantity": 123, + "Items": [ + "string" + ] + } + }, + "Headers": { + "Quantity": 123, + "Items": [ + "string" + ] + }, + "QueryStringCacheKeys": { + "Quantity": 123, + "Items": [ + "string" + ] + } + }, + "MinTTL": 123, + "DefaultTTL": 123, + "MaxTTL": 123 + } + ] + }, + "CustomErrorResponses": { + "Quantity": 123, + "Items": [ + { + "ErrorCode": 123, + "ResponsePagePath": "string", + "ResponseCode": "string", + "ErrorCachingMinTTL": 123 + } + ] + }, + "Comment": "string", + "Logging": { + "Enabled": true, + "IncludeCookies": true, + "Bucket": "string", + "Prefix": "string" + }, + "PriceClass": "PriceClass_100 or PriceClass_200 or PriceClass_All", + "Enabled": true, + "ViewerCertificate": { + "CloudFrontDefaultCertificate": true, + "IAMCertificateId": "string", + "ACMCertificateArn": "string", + "SSLSupportMethod": "sni-only or vip or static-ip", + "MinimumProtocolVersion": "SSLv3 or TLSv1 or TLSv1_2016 or TLSv1.1_2016 or TLSv1.2_2018 or TLSv1.2_2019 or TLSv1.2_2021", + "Certificate": "string", + "CertificateSource": "cloudfront or iam or acm" + }, + "Restrictions": { + "GeoRestriction": { + "RestrictionType": "blacklist or whitelist or none", + "Quantity": 123, + "Items": [ + "string" + ] + } + }, + "WebACLId": "string", + "HttpVersion": "http1.1 or http2 or http3 or http2and3", + "IsIPV6Enabled": true, + "ContinuousDeploymentPolicyId": "string", + "Staging": true + }, + "ETag": "string" + } +} + diff --git a/plugins/aws/test/resources/files/cloudtrail/get-event-selectors__arn_aws_cloudtrail_us_east_1_test_trail_management_events.json b/plugins/aws/test/resources/files/cloudtrail/get-event-selectors__arn_aws_cloudtrail_us_east_1_test_trail_management_events.json index 7b1fd620ea..65265a609b 100644 --- a/plugins/aws/test/resources/files/cloudtrail/get-event-selectors__arn_aws_cloudtrail_us_east_1_test_trail_management_events.json +++ b/plugins/aws/test/resources/files/cloudtrail/get-event-selectors__arn_aws_cloudtrail_us_east_1_test_trail_management_events.json @@ -12,5 +12,20 @@ } ] } + ], + "EventSelectors": [ + { + "ReadWriteType": "All", + "IncludeManagementEvents": false, + "DataResources": [ + { + "Type": "AWS::Lambda::Function", + "Values": [ + "arn:aws:lambda" + ] + } + ], + "ExcludeManagementEventSources": [] + } ] } diff --git a/plugins/aws/test/resources/files/ec2/describe-images__self.json b/plugins/aws/test/resources/files/ec2/describe-images__self.json new file mode 100644 index 0000000000..7ea19242c5 --- /dev/null +++ b/plugins/aws/test/resources/files/ec2/describe-images__self.json @@ -0,0 +1,83 @@ +{ + "Images": [ + { + "Architecture": "x86_64", + "CreationDate": "2023-08-31T21:00:27.000Z", + "ImageId": "ami-1123", + "ImageLocation": "958347557273/pom-tmpst-20230831t204042-3888579", + "ImageType": "machine", + "Public": true, + "OwnerId": "958347557273", + "PlatformDetails": "Linux/UNIX", + "UsageOperation": "RunInstances", + "State": "available", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "DeleteOnTermination": true, + "SnapshotId": "snap-070a3cd53ddfc3a8b", + "VolumeSize": 10, + "VolumeType": "gp2", + "Encrypted": false + } + } + ], + "EnaSupport": true, + "Hypervisor": "xen", + "Name": "pom-tmpst-20230831t204042-3888579", + "RootDeviceName": "/dev/xvda", + "RootDeviceType": "ebs", + "SriovNetSupport": "simple", + "VirtualizationType": "hvm", + "DeprecationTime": "2025-08-31T21:00:27.000Z", + "SourceInstanceId": "i-041649455685d795f", + "KernelId": "aki-919dcaf8", + "Platform": "windows", + "RamdiskId": "ari-8f9dcae6", + "Description": "Windows_Server-2019-English-Full-Base-2021.08.11", + "ImageOwnerAlias": "amazon", + "StateReason": { + "Code": "Client.UserInitiatedShutdown", + "Message": "Client.UserInitiatedShutdown: User initiated shutdown" + }, + "BootMode": "legacy-bios", + "TpmSupport": true, + "ImdsSupport": true + }, + { + "Architecture": "x86_64", + "CreationDate": "2022-12-23T16:15:33.000Z", + "ImageId": "ami-123", + "ImageLocation": "amazon/amazon-eks-gpu-node-1.23-v20221222", + "ImageType": "machine", + "Public": true, + "OwnerId": "602401143452", + "PlatformDetails": "Linux/UNIX", + "UsageOperation": "RunInstances", + "State": "available", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvda", + "Ebs": { + "DeleteOnTermination": true, + "SnapshotId": "snap-04685fe771b7f9b84", + "VolumeSize": 20, + "VolumeType": "gp2", + "Encrypted": false + } + } + ], + "Description": "EKS Kubernetes Worker AMI for Machine Learning Accelerated Workloads on AmazonLinux2 image (k8s: 1.23.13, docker: 20.10.17-1.amzn2.0.1, containerd: 1.6.6-1.amzn2.0.2)", + "EnaSupport": true, + "Hypervisor": "xen", + "ImageOwnerAlias": "amazon", + "Name": "amazon-eks-gpu-node-1.23-v20221222", + "RootDeviceName": "/dev/xvda", + "RootDeviceType": "ebs", + "SriovNetSupport": "simple", + "VirtualizationType": "hvm", + "DeprecationTime": "2024-12-23T16:15:33.000Z" + } + ] +} diff --git a/plugins/aws/test/resources/files/ec2/describe-launch-template-versions___Default__Latest.json b/plugins/aws/test/resources/files/ec2/describe-launch-template-versions___Default__Latest.json new file mode 100644 index 0000000000..34e23bb3b2 --- /dev/null +++ b/plugins/aws/test/resources/files/ec2/describe-launch-template-versions___Default__Latest.json @@ -0,0 +1,572 @@ +{ + "LaunchTemplateVersions": [ + { + "LaunchTemplateId": "foo", + "LaunchTemplateName": "foo", + "VersionNumber": 123, + "VersionDescription": "foo", + "CreateTime": "2024-01-04T10:02:16Z", + "CreatedBy": "foo", + "DefaultVersion": true, + "LaunchTemplateData": { + "KernelId": "foo", + "EbsOptimized": true, + "IamInstanceProfile": { + "Arn": "foo", + "Name": "foo" + }, + "BlockDeviceMappings": [ + { + "DeviceName": "foo", + "VirtualName": "foo", + "Ebs": { + "Encrypted": true, + "DeleteOnTermination": true, + "Iops": 123, + "KmsKeyId": "foo", + "SnapshotId": "foo", + "VolumeSize": 123, + "VolumeType": "io1", + "Throughput": 123 + }, + "NoDevice": "foo" + }, + { + "DeviceName": "foo", + "VirtualName": "foo", + "Ebs": { + "Encrypted": true, + "DeleteOnTermination": true, + "Iops": 123, + "KmsKeyId": "foo", + "SnapshotId": "foo", + "VolumeSize": 123, + "VolumeType": "io1", + "Throughput": 123 + }, + "NoDevice": "foo" + }, + { + "DeviceName": "foo", + "VirtualName": "foo", + "Ebs": { + "Encrypted": true, + "DeleteOnTermination": true, + "Iops": 123, + "KmsKeyId": "foo", + "SnapshotId": "foo", + "VolumeSize": 123, + "VolumeType": "io1", + "Throughput": 123 + }, + "NoDevice": "foo" + } + ], + "NetworkInterfaces": [ + { + "AssociateCarrierIpAddress": true, + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "Description": "foo", + "DeviceIndex": 123, + "Groups": [ + "foo", + "foo", + "foo" + ], + "InterfaceType": "foo", + "Ipv6AddressCount": 123, + "Ipv6Addresses": [ + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + } + ], + "NetworkInterfaceId": "foo", + "PrivateIpAddress": "foo", + "PrivateIpAddresses": [ + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + } + ], + "SecondaryPrivateIpAddressCount": 123, + "SubnetId": "foo", + "NetworkCardIndex": 123, + "Ipv4Prefixes": [ + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + } + ], + "Ipv4PrefixCount": 123, + "Ipv6Prefixes": [ + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + } + ], + "Ipv6PrefixCount": 123, + "PrimaryIpv6": true, + "EnaSrdSpecification": { + "EnaSrdEnabled": true, + "EnaSrdUdpSpecification": { + "EnaSrdUdpEnabled": true + } + }, + "ConnectionTrackingSpecification": { + "TcpEstablishedTimeout": 123, + "UdpTimeout": 123, + "UdpStreamTimeout": 123 + } + }, + { + "AssociateCarrierIpAddress": true, + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "Description": "foo", + "DeviceIndex": 123, + "Groups": [ + "foo", + "foo", + "foo" + ], + "InterfaceType": "foo", + "Ipv6AddressCount": 123, + "Ipv6Addresses": [ + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + } + ], + "NetworkInterfaceId": "foo", + "PrivateIpAddress": "foo", + "PrivateIpAddresses": [ + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + } + ], + "SecondaryPrivateIpAddressCount": 123, + "SubnetId": "foo", + "NetworkCardIndex": 123, + "Ipv4Prefixes": [ + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + } + ], + "Ipv4PrefixCount": 123, + "Ipv6Prefixes": [ + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + } + ], + "Ipv6PrefixCount": 123, + "PrimaryIpv6": true, + "EnaSrdSpecification": { + "EnaSrdEnabled": true, + "EnaSrdUdpSpecification": { + "EnaSrdUdpEnabled": true + } + }, + "ConnectionTrackingSpecification": { + "TcpEstablishedTimeout": 123, + "UdpTimeout": 123, + "UdpStreamTimeout": 123 + } + }, + { + "AssociateCarrierIpAddress": true, + "AssociatePublicIpAddress": true, + "DeleteOnTermination": true, + "Description": "foo", + "DeviceIndex": 123, + "Groups": [ + "foo", + "foo", + "foo" + ], + "InterfaceType": "foo", + "Ipv6AddressCount": 123, + "Ipv6Addresses": [ + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + }, + { + "Ipv6Address": "foo", + "IsPrimaryIpv6": true + } + ], + "NetworkInterfaceId": "foo", + "PrivateIpAddress": "foo", + "PrivateIpAddresses": [ + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + }, + { + "Primary": true, + "PrivateIpAddress": "foo" + } + ], + "SecondaryPrivateIpAddressCount": 123, + "SubnetId": "foo", + "NetworkCardIndex": 123, + "Ipv4Prefixes": [ + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + }, + { + "Ipv4Prefix": "foo" + } + ], + "Ipv4PrefixCount": 123, + "Ipv6Prefixes": [ + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + }, + { + "Ipv6Prefix": "foo" + } + ], + "Ipv6PrefixCount": 123, + "PrimaryIpv6": true, + "EnaSrdSpecification": { + "EnaSrdEnabled": true, + "EnaSrdUdpSpecification": { + "EnaSrdUdpEnabled": true + } + }, + "ConnectionTrackingSpecification": { + "TcpEstablishedTimeout": 123, + "UdpTimeout": 123, + "UdpStreamTimeout": 123 + } + } + ], + "ImageId": "foo", + "InstanceType": "a1.large", + "KeyName": "foo", + "Monitoring": { + "Enabled": true + }, + "Placement": { + "AvailabilityZone": "foo", + "Affinity": "foo", + "GroupName": "foo", + "HostId": "foo", + "Tenancy": "dedicated", + "SpreadDomain": "foo", + "HostResourceGroupArn": "foo", + "PartitionNumber": 123, + "GroupId": "foo" + }, + "RamDiskId": "foo", + "DisableApiTermination": true, + "InstanceInitiatedShutdownBehavior": "terminate", + "UserData": "foo", + "TagSpecifications": [ + { + "ResourceType": "client-vpn-endpoint", + "Tags": [ + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + } + ] + }, + { + "ResourceType": "client-vpn-endpoint", + "Tags": [ + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + } + ] + }, + { + "ResourceType": "client-vpn-endpoint", + "Tags": [ + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + } + ] + } + ], + "ElasticGpuSpecifications": [ + { + "Type": "foo" + }, + { + "Type": "foo" + }, + { + "Type": "foo" + } + ], + "ElasticInferenceAccelerators": [ + { + "Type": "foo", + "Count": 123 + }, + { + "Type": "foo", + "Count": 123 + }, + { + "Type": "foo", + "Count": 123 + } + ], + "SecurityGroupIds": [ + "foo", + "foo", + "foo" + ], + "SecurityGroups": [ + "foo", + "foo", + "foo" + ], + "InstanceMarketOptions": { + "MarketType": "capacity-block", + "SpotOptions": { + "MaxPrice": "foo", + "SpotInstanceType": "persistent", + "BlockDurationMinutes": 123, + "ValidUntil": "2024-01-04T10:02:16Z", + "InstanceInterruptionBehavior": "stop" + } + }, + "CreditSpecification": { + "CpuCredits": "foo" + }, + "CpuOptions": { + "CoreCount": 123, + "ThreadsPerCore": 123, + "AmdSevSnp": "disabled" + }, + "CapacityReservationSpecification": { + "CapacityReservationPreference": "none", + "CapacityReservationTarget": { + "CapacityReservationId": "foo", + "CapacityReservationResourceGroupArn": "foo" + } + }, + "LicenseSpecifications": [ + { + "LicenseConfigurationArn": "foo" + }, + { + "LicenseConfigurationArn": "foo" + }, + { + "LicenseConfigurationArn": "foo" + } + ], + "HibernationOptions": { + "Configured": true + }, + "MetadataOptions": { + "State": "applied", + "HttpTokens": "required", + "HttpPutResponseHopLimit": 123, + "HttpEndpoint": "enabled", + "HttpProtocolIpv6": "enabled", + "InstanceMetadataTags": "enabled" + }, + "EnclaveOptions": { + "Enabled": true + }, + "InstanceRequirements": { + "VCpuCount": { + "Min": 123, + "Max": 123 + }, + "MemoryMiB": { + "Min": 123, + "Max": 123 + }, + "CpuManufacturers": [ + "amd", + "amd", + "amd" + ], + "MemoryGiBPerVCpu": { + "Min": 1.234, + "Max": 1.234 + }, + "ExcludedInstanceTypes": [ + "foo", + "foo", + "foo" + ], + "InstanceGenerations": [ + "previous", + "previous", + "previous" + ], + "SpotMaxPricePercentageOverLowestPrice": 123, + "OnDemandMaxPricePercentageOverLowestPrice": 123, + "BareMetal": "required", + "BurstablePerformance": "required", + "RequireHibernateSupport": true, + "NetworkInterfaceCount": { + "Min": 123, + "Max": 123 + }, + "LocalStorage": "required", + "LocalStorageTypes": [ + "ssd", + "ssd", + "ssd" + ], + "TotalLocalStorageGB": { + "Min": 1.234, + "Max": 1.234 + }, + "BaselineEbsBandwidthMbps": { + "Min": 123, + "Max": 123 + }, + "AcceleratorTypes": [ + "fpga", + "fpga", + "fpga" + ], + "AcceleratorCount": { + "Min": 123, + "Max": 123 + }, + "AcceleratorManufacturers": [ + "amd", + "amd", + "amd" + ], + "AcceleratorNames": [ + "inferentia", + "inferentia", + "inferentia" + ], + "AcceleratorTotalMemoryMiB": { + "Min": 123, + "Max": 123 + }, + "NetworkBandwidthGbps": { + "Min": 1.234, + "Max": 1.234 + }, + "AllowedInstanceTypes": [ + "foo", + "foo", + "foo" + ] + }, + "PrivateDnsNameOptions": { + "HostnameType": "resource-name", + "EnableResourceNameDnsARecord": true, + "EnableResourceNameDnsAAAARecord": true + }, + "MaintenanceOptions": { + "AutoRecovery": "disabled" + }, + "DisableApiStop": true + } + } + ], + "NextToken": "foo" +} diff --git a/plugins/aws/test/resources/files/ecr/get-lifecycle-policy__ubuntu.json b/plugins/aws/test/resources/files/ecr/get-lifecycle-policy__ubuntu.json new file mode 100644 index 0000000000..9e57760f86 --- /dev/null +++ b/plugins/aws/test/resources/files/ecr/get-lifecycle-policy__ubuntu.json @@ -0,0 +1,6 @@ +{ + "registryId": "test", + "repositoryName": "ubuntu", + "lifecyclePolicyText": "{\"rules\":[{\"rulePriority\":1,\"description\":\"hjh\",\"selection\":{\"tagStatus\":\"tagged\",\"tagPatternList\":[\"prod\"],\"countType\":\"sinceImagePushed\",\"countUnit\":\"days\",\"countNumber\":1},\"action\":{\"type\":\"expire\"}}]}", + "lastEvaluatedAt": "1970-01-01T01:00:00+01:00" +} diff --git a/plugins/aws/test/resources/files/efs/describe-file-system-policy__fs_1.json b/plugins/aws/test/resources/files/efs/describe-file-system-policy__fs_1.json new file mode 100644 index 0000000000..b0135ae585 --- /dev/null +++ b/plugins/aws/test/resources/files/efs/describe-file-system-policy__fs_1.json @@ -0,0 +1,4 @@ +{ + "FileSystemId": "fs-1", + "Policy" : "{\"Version\":\"2012-10-17\",\"Id\":\"epw\",\"Statement\":[{\"Sid\":\"esi\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":\"\",\"Resource\":\"arn:aws:elasticfilesystem:us-east-1:test:file-system/fs-0d91ea540dcdc083f\",\"Condition\":{\"Bool\":{\"elasticfilesystem:AccessedViaMountTarget\":\"true\"}}},{\"Sid\":\"efs-statement-b4b0917a-5793-4a6c-b9ce-9cad2b94d9df\",\"Effect\":\"Deny\",\"Principal\":{\"AWS\":\"*\"},\"Action\":\"*\",\"Resource\":\"arn:aws:elasticfilesystem:us-east-1:test:file-system/fs-2\",\"Condition\":{\"Bool\":{\"aws:SecureTransport\":\"false\"}}}]}" +} diff --git a/plugins/aws/test/resources/files/elb/describe-load-balancer-attributes__elb_123.json b/plugins/aws/test/resources/files/elb/describe-load-balancer-attributes__elb_123.json new file mode 100644 index 0000000000..096903cf62 --- /dev/null +++ b/plugins/aws/test/resources/files/elb/describe-load-balancer-attributes__elb_123.json @@ -0,0 +1,34 @@ +{ + "LoadBalancerAttributes": { + "CrossZoneLoadBalancing": { + "Enabled": true + }, + "AccessLog": { + "Enabled": true, + "S3BucketName": "foo", + "EmitInterval": 123, + "S3BucketPrefix": "foo" + }, + "ConnectionDraining": { + "Enabled": true, + "Timeout": 123 + }, + "ConnectionSettings": { + "IdleTimeout": 123 + }, + "AdditionalAttributes": [ + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + } + ] + } +} diff --git a/plugins/aws/test/resources/files/glacier/get-vault-access-policy__nice_vault.json b/plugins/aws/test/resources/files/glacier/get-vault-access-policy__nice_vault.json new file mode 100644 index 0000000000..6f60e1267c --- /dev/null +++ b/plugins/aws/test/resources/files/glacier/get-vault-access-policy__nice_vault.json @@ -0,0 +1 @@ +{"policy": { "Policy": "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"allow-time-based-deletes\", \"Principal\": { \"AWS\": \"999999999999\" }, \"Effect\": \"Allow\", \"Action\": \"glacier:Delete*\", \"Resource\": [ \"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\" ], \"Condition\": { \"DateGreaterThan\": { \"aws:CurrentTime\": \"2018-12-31T00:00:00Z\" } } } ] }" }} diff --git a/plugins/aws/test/resources/files/kms/get-key-policy__32b03fb9_default.json b/plugins/aws/test/resources/files/kms/get-key-policy__32b03fb9_default.json new file mode 100644 index 0000000000..f3183b47de --- /dev/null +++ b/plugins/aws/test/resources/files/kms/get-key-policy__32b03fb9_default.json @@ -0,0 +1,4 @@ +{ + "Policy": "{\n \"Version\" : \"2012-10-17\",\n \"Id\" : \"auto-es-2\",\n \"Statement\" : [ {\n \"Sid\" : \"Allow access through OpenSearch Service for all principals in the account that are authorized to use OpenSearch Service\",\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n \"AWS\" : \"*\"\n },\n \"Action\" : [ \"kms:Encrypt\", \"kms:Decrypt\", \"kms:ReEncrypt*\", \"kms:GenerateDataKey*\", \"kms:CreateGrant\", \"kms:DescribeKey\" ],\n \"Resource\" : \"*\",\n \"Condition\" : {\n \"StringEquals\" : {\n \"kms:CallerAccount\" : \"625596817853\",\n \"kms:ViaService\" : \"es.us-east-1.amazonaws.com\"\n }\n }\n }, {\n \"Sid\" : \"Allow direct access to key metadata to the account\",\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n \"AWS\" : \"arn:aws:iam::625596817853:root\"\n },\n \"Action\" : [ \"kms:Describe*\", \"kms:Get*\", \"kms:List*\", \"kms:RevokeGrant\" ],\n \"Resource\" : \"*\"\n }, {\n \"Sid\" : \"Allow OpenSearch service principals to describe the key directly\",\n \"Effect\" : \"Allow\",\n \"Principal\" : {\n \"Service\" : \"es.amazonaws.com\"\n },\n \"Action\" : [ \"kms:Describe*\", \"kms:Get*\", \"kms:List*\" ],\n \"Resource\" : \"*\"\n } ]\n}" +} + diff --git a/plugins/aws/test/resources/files/opensearch/describe-domains__foo.json b/plugins/aws/test/resources/files/opensearch/describe-domains__foo.json new file mode 100644 index 0000000000..c821c5b6c4 --- /dev/null +++ b/plugins/aws/test/resources/files/opensearch/describe-domains__foo.json @@ -0,0 +1,154 @@ +{ + "DomainStatusList": [ + { + "DomainId": "foo", + "DomainName": "foo", + "ARN": "foo", + "Created": true, + "Deleted": true, + "Endpoint": "foo", + "EndpointV2": "foo", + "Endpoints": { + "0": "foo", + "1": "foo", + "2": "foo" + }, + "Processing": true, + "UpgradeProcessing": true, + "EngineVersion": "foo", + "ClusterConfig": { + "InstanceType": "m3.large.search", + "InstanceCount": 123, + "DedicatedMasterEnabled": true, + "ZoneAwarenessEnabled": true, + "ZoneAwarenessConfig": { + "AvailabilityZoneCount": 123 + }, + "DedicatedMasterType": "m3.large.search", + "DedicatedMasterCount": 123, + "WarmEnabled": true, + "WarmType": "ultrawarm1.large.search", + "WarmCount": 123, + "ColdStorageOptions": { + "Enabled": true + }, + "MultiAZWithStandbyEnabled": true + }, + "EBSOptions": { + "EBSEnabled": true, + "VolumeType": "gp2", + "VolumeSize": 123, + "Iops": 123, + "Throughput": 123 + }, + "AccessPolicies": "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"allow-foo\", \"Principal\": { \"AWS\": \"999999999999\" }, \"Effect\": \"Allow\", \"Action\": \"glacier:Delete*\", \"Resource\": [ \"arn:aws:glacier:us-west-2:999999999999:vaults/examplevault\" ], \"Condition\": { \"DateGreaterThan\": { \"aws:CurrentTime\": \"2018-12-31T00:00:00Z\" } } } ] }", + "IPAddressType": "dualstack", + "SnapshotOptions": { + "AutomatedSnapshotStartHour": 123 + }, + "VPCOptions": { + "VPCId": "vpc-123", + "SubnetIds": [ + "subnet-123", + "foo" + ], + "AvailabilityZones": [ + "az-123", + "foo", + "foo" + ], + "SecurityGroupIds": [ + "sg-123", + "foo", + "foo" + ] + }, + "CognitoOptions": { + "Enabled": true, + "UserPoolId": "123", + "IdentityPoolId": "foo", + "RoleArn": "foo" + }, + "EncryptionAtRestOptions": { + "Enabled": true, + "KmsKeyId": "foo" + }, + "NodeToNodeEncryptionOptions": { + "Enabled": true + }, + "AdvancedOptions": { + "0": "foo", + "1": "foo", + "2": "foo" + }, + "LogPublishingOptions": { + "0": { + "CloudWatchLogsLogGroupArn": "foo", + "Enabled": true + }, + "1": { + "CloudWatchLogsLogGroupArn": "foo", + "Enabled": true + }, + "2": { + "CloudWatchLogsLogGroupArn": "foo", + "Enabled": true + } + }, + "ServiceSoftwareOptions": { + "CurrentVersion": "foo", + "NewVersion": "foo", + "UpdateAvailable": true, + "Cancellable": true, + "UpdateStatus": "IN_PROGRESS", + "Description": "foo", + "AutomatedUpdateDate": "2023-12-21T11:30:19Z", + "OptionalDeployment": true + }, + "DomainEndpointOptions": { + "EnforceHTTPS": true, + "TLSSecurityPolicy": "Policy-Min-TLS-1-2-2019-07", + "CustomEndpointEnabled": true, + "CustomEndpoint": "https://example.com", + "CustomEndpointCertificateArn": "foo" + }, + "AdvancedSecurityOptions": { + "Enabled": true, + "InternalUserDatabaseEnabled": true, + "SAMLOptions": { + "Enabled": true, + "Idp": { + "MetadataContent": "foo", + "EntityId": "foo" + }, + "SubjectKey": "foo", + "RolesKey": "foo", + "SessionTimeoutMinutes": 123 + }, + "AnonymousAuthDisableDate": "2023-12-21T11:30:19Z", + "AnonymousAuthEnabled": true + }, + "AutoTuneOptions": { + "State": "DISABLED", + "ErrorMessage": "foo", + "UseOffPeakWindow": true + }, + "ChangeProgressDetails": { + "ChangeId": "foo", + "Message": "foo" + }, + "OffPeakWindowOptions": { + "Enabled": true, + "OffPeakWindow": { + "WindowStartTime": { + "Hours": 123, + "Minutes": 123 + } + } + }, + "SoftwareUpdateOptions": { + "AutoSoftwareUpdateEnabled": true + } + } + ] +} diff --git a/plugins/aws/test/resources/files/opensearch/list-domain-names.json b/plugins/aws/test/resources/files/opensearch/list-domain-names.json new file mode 100644 index 0000000000..f3c04f7721 --- /dev/null +++ b/plugins/aws/test/resources/files/opensearch/list-domain-names.json @@ -0,0 +1,9 @@ +{ + "DomainNames": [ + { + "DomainName": "foo", + "EngineType": "Elasticsearch" + } + ] +} + diff --git a/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshot-attributes__deleteme.json b/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshot-attributes__deleteme.json new file mode 100644 index 0000000000..12061cde91 --- /dev/null +++ b/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshot-attributes__deleteme.json @@ -0,0 +1,15 @@ +{ + "DBClusterSnapshotAttributesResult": { + "DBClusterSnapshotIdentifier": "deleteme", + "DBClusterSnapshotAttributes": [ + { + "AttributeName": "restore", + "AttributeValues": [] + }, + { + "AttributeName": "foo", + "AttributeValues": ["foo","foo","foo"] + } + ] + } +} diff --git a/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshots.json b/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshots.json new file mode 100644 index 0000000000..fa9eeae391 --- /dev/null +++ b/plugins/aws/test/resources/files/rds/describe-db-cluster-snapshots.json @@ -0,0 +1,63 @@ +{ + "DBClusterSnapshots": [ + { + "AvailabilityZones": [ + "us-east-1a", + "us-east-1c", + "us-east-1d" + ], + "DBClusterSnapshotIdentifier": "deleteme", + "DBClusterIdentifier": "database-1", + "SnapshotCreateTime": "2024-01-03T14:26:54.023000+00:00", + "Engine": "aurora-postgresql", + "EngineMode": "provisioned", + "AllocatedStorage": 0, + "Status": "available", + "Port": 0, + "VpcId": "vpc-09981817e8ae09ee3", + "ClusterCreateTime": "2024-01-03T14:02:35.703000+00:00", + "MasterUsername": "postgres", + "EngineVersion": "15.3", + "LicenseModel": "postgresql-license", + "SnapshotType": "manual", + "PercentProgress": 100, + "StorageEncrypted": true, + "KmsKeyId": "arn:aws:kms:us-east-1:test:key/68958049-2091-4482-b0a1-9ce9c2b3a940", + "DBClusterSnapshotArn": "arn:aws:rds:us-east-1:test:cluster-snapshot:deleteme", + "IAMDatabaseAuthenticationEnabled": false, + "TagList": [], + "DbClusterResourceId": "cluster-54M44OD3CYK3R6SSWMGW22BIS4", + "SourceDBClusterSnapshotArn": "arn:aws:rds:us-east-1:test:cluster-snapshot:take2", + "DBSystemId": "rds:cluster-54M44OD3CYK3R6SSWMGW22BIS4", + "StorageType": "aurora" + }, + { + "AvailabilityZones": [ + "us-east-1a", + "us-east-1c", + "us-east-1d" + ], + "DBClusterSnapshotIdentifier": "take2", + "DBClusterIdentifier": "database-1", + "SnapshotCreateTime": "2024-01-03T14:34:15.234000+00:00", + "Engine": "aurora-postgresql", + "EngineMode": "provisioned", + "AllocatedStorage": 1, + "Status": "creating", + "Port": 0, + "VpcId": "vpc-09981817e8ae09ee3", + "ClusterCreateTime": "2024-01-03T14:02:35.703000+00:00", + "MasterUsername": "postgres", + "EngineVersion": "15.3", + "LicenseModel": "postgresql-license", + "SnapshotType": "manual", + "PercentProgress": 0, + "StorageEncrypted": true, + "KmsKeyId": "arn:aws:kms:us-east-1:test:key/68958049-2091-4482-b0a1-9ce9c2b3a940", + "DBClusterSnapshotArn": "arn:aws:rds:us-east-1:test:cluster-snapshot:take2", + "IAMDatabaseAuthenticationEnabled": false, + "TagList": [], + "DbClusterResourceId": "cluster-54M44OD3CYK3R6SSWMGW22BIS4" + } + ] +} diff --git a/plugins/aws/test/resources/files/rds/describe-db-snapshot-attributes__foo.json b/plugins/aws/test/resources/files/rds/describe-db-snapshot-attributes__foo.json new file mode 100644 index 0000000000..1bcbf5f14c --- /dev/null +++ b/plugins/aws/test/resources/files/rds/describe-db-snapshot-attributes__foo.json @@ -0,0 +1,15 @@ +{ + "DBSnapshotAttributesResult": { + "DBSnapshotIdentifier": "foo", + "DBSnapshotAttributes": [ + { + "AttributeName": "foo", + "AttributeValues": [ + "foo", + "foo", + "foo" + ] + } + ] + } +} diff --git a/plugins/aws/test/resources/files/rds/describe-db-snapshots.json b/plugins/aws/test/resources/files/rds/describe-db-snapshots.json new file mode 100644 index 0000000000..812a9153bd --- /dev/null +++ b/plugins/aws/test/resources/files/rds/describe-db-snapshots.json @@ -0,0 +1,69 @@ +{ + "Marker": "foo", + "DBSnapshots": [ + { + "DBSnapshotIdentifier": "foo", + "DBInstanceIdentifier": "foo", + "SnapshotCreateTime": "2024-01-03T13:45:34Z", + "Engine": "foo", + "AllocatedStorage": 123, + "Status": "foo", + "Port": 123, + "AvailabilityZone": "foo", + "VpcId": "foo", + "InstanceCreateTime": "2024-01-03T13:45:34Z", + "MasterUsername": "foo", + "EngineVersion": "foo", + "LicenseModel": "foo", + "SnapshotType": "foo", + "Iops": 123, + "OptionGroupName": "foo", + "PercentProgress": 123, + "SourceRegion": "foo", + "SourceDBSnapshotIdentifier": "foo", + "StorageType": "foo", + "TdeCredentialArn": "foo", + "Encrypted": true, + "KmsKeyId": "foo", + "DBSnapshotArn": "foo", + "Timezone": "foo", + "IAMDatabaseAuthenticationEnabled": true, + "ProcessorFeatures": [ + { + "Name": "foo", + "Value": "foo" + }, + { + "Name": "foo", + "Value": "foo" + }, + { + "Name": "foo", + "Value": "foo" + } + ], + "DbiResourceId": "foo", + "TagList": [ + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + }, + { + "Key": "foo", + "Value": "foo" + } + ], + "OriginalSnapshotCreateTime": "2024-01-03T13:45:34Z", + "SnapshotDatabaseTime": "2024-01-03T13:45:34Z", + "SnapshotTarget": "foo", + "StorageThroughput": 123, + "DBSystemId": "foo", + "DedicatedLogVolume": true, + "MultiTenant": true + } + ] +} diff --git a/plugins/aws/test/resources/files/redshift/describe-logging-status__resoto_delete_me.json b/plugins/aws/test/resources/files/redshift/describe-logging-status__resoto_delete_me.json new file mode 100644 index 0000000000..73881fa9e8 --- /dev/null +++ b/plugins/aws/test/resources/files/redshift/describe-logging-status__resoto_delete_me.json @@ -0,0 +1,14 @@ +{ + "LoggingEnabled": true, + "BucketName": "foo", + "S3KeyPrefix": "foo", + "LastSuccessfulDeliveryTime": "2024-01-04T09:19:15Z", + "LastFailureTime": "2024-01-04T09:19:15Z", + "LastFailureMessage": "foo", + "LogDestinationType": "cloudwatch", + "LogExports": [ + "foo", + "foo", + "foo" + ] +} diff --git a/plugins/aws/test/resources/files/route53/list-query-logging-configs__test_zone.json b/plugins/aws/test/resources/files/route53/list-query-logging-configs__test_zone.json new file mode 100644 index 0000000000..8373486184 --- /dev/null +++ b/plugins/aws/test/resources/files/route53/list-query-logging-configs__test_zone.json @@ -0,0 +1,20 @@ +{ + "QueryLoggingConfigs": [ + { + "Id": "foo", + "HostedZoneId": "foo", + "CloudWatchLogsLogGroupArn": "foo" + }, + { + "Id": "foo", + "HostedZoneId": "foo", + "CloudWatchLogsLogGroupArn": "foo" + }, + { + "Id": "foo", + "HostedZoneId": "foo", + "CloudWatchLogsLogGroupArn": "foo" + } + ], + "NextToken": "foo" +} diff --git a/plugins/aws/test/resources/files/sns/get-topic-attributes__arn_aws_sns_us_west_2_123456789012_my_topic.json b/plugins/aws/test/resources/files/sns/get-topic-attributes__arn_aws_sns_us_west_2_123456789012_my_topic.json index ca7ca87baf..6775ad7b85 100644 --- a/plugins/aws/test/resources/files/sns/get-topic-attributes__arn_aws_sns_us_west_2_123456789012_my_topic.json +++ b/plugins/aws/test/resources/files/sns/get-topic-attributes__arn_aws_sns_us_west_2_123456789012_my_topic.json @@ -6,7 +6,7 @@ "DeliveryPolicy": "{http:{defaultHealthyRetryPolicy:{minDelayTarget:20,maxDelayTarget:20,numRetries:3,numMaxDelayRetries:0,numNoDelayRetries:0,numMinDelayRetries:0,backoffFunction:linear},disableSubscriptionOverrides:false}}", "EffectiveDeliveryPolicy": "{http:{defaultHealthyRetryPolicy:{minDelayTarget:20,maxDelayTarget:20,numRetries:3,numMaxDelayRetries:0,numNoDelayRetries:0,numMinDelayRetries:0,backoffFunction:linear},disableSubscriptionOverrides:false}}", "Owner": "123456789012", - "Policy": "{Version:2008-10-17,Id:__default_policy_ID,Statement:[{Sid:__default_statement_ID,Effect:Allow,Principal:{AWS:*},Action:[SNS:Subscribe,SNS:ListSubscriptionsByTopic,SNS:DeleteTopic,SNS:GetTopicAttributes,SNS:Publish,SNS:RemovePermission,SNS:AddPermission,SNS:SetTopicAttributes],Resource:arn:aws:sns:us-west-2:123456789012:my-topic,Condition:{StringEquals:{AWS:SourceOwner:0123456789012}}}]}", + "Policy": "{\"Version\":\"2008-10-17\",\"Id\":\"__default_policy_ID\",\"Statement\":[{\"Sid\":\"__default_statement_ID\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":[\"SNS:Subscribe\",\"SNS:ListSubscriptionsByTopic\"],\"Resource\":\"arn:aws:sns:us-west-2:123456789012:my-topic\",\"Condition\":{\"StringEquals\":{\"AWS\":\"SourceOwner:0123456789012\"}}}]}", "TopicArn": "arn:aws:sns:us-west-2:123456789012:my-topic", "SubscriptionsPending": "0", "KmsMasterKeyId": "abc", diff --git a/plugins/aws/test/resources/files/ssm/describe-document-permission__Test123_Share.json b/plugins/aws/test/resources/files/ssm/describe-document-permission__Test123_Share.json new file mode 100644 index 0000000000..77796ca837 --- /dev/null +++ b/plugins/aws/test/resources/files/ssm/describe-document-permission__Test123_Share.json @@ -0,0 +1,17 @@ +{ + "AccountIds": [ "a", "b", "c" ], + "AccountSharingInfoList": [ + { + "AccountId": "a", + "SharedDocumentVersion": "v1" + }, + { + "AccountId": "b", + "SharedDocumentVersion": "v1" + }, + { + "AccountId": "c", + "SharedDocumentVersion": "v2" + } + ] +} diff --git a/plugins/aws/test/resources/files/ssm/describe-document__Test123.json b/plugins/aws/test/resources/files/ssm/describe-document__Test123.json new file mode 100644 index 0000000000..b86cd6b6d9 --- /dev/null +++ b/plugins/aws/test/resources/files/ssm/describe-document__Test123.json @@ -0,0 +1,40 @@ +{ + "Document": { + "Hash": "3f70de0ad8888f25e9de8221d4adec8d48240e3c0abdb629d84272f8ad8bc67a", + "HashType": "Sha256", + "Name": "arn:aws:ssm:us-east-1:506325616308:document/updatesimulatortest", + "Owner": "506325616308", + "CreatedDate": "2020-06-09T19:55:58.880000+02:00", + "StatusInformation": "Successfully created document", + "Author": "Batman", + "ApprovedVersion": "2", + "PendingReviewVersion": "2", + "ReviewStatus": "APPROVED", + "Status": "Active", + "Sha1": "foo", + "DisplayName": "updatesimulatortest", + "VersionName": "2", + "DocumentVersion": "2", + "Description": "updatesimulator", + "Parameters": [ + { + "Name": "Message", + "Type": "String", + "Description": "updatesimulator", + "DefaultValue": "updatesimulator" + } + ], + "PlatformTypes": [ + "Linux" + ], + "DocumentType": "Command", + "SchemaVersion": "2.2", + "LatestVersion": "2", + "DefaultVersion": "2", + "DocumentFormat": "JSON", + "TargetType": "/AWS::SSM::ManagedInstance", + "Tags": [], + "Category": [], + "CategoryEnum": [] + } +} diff --git a/plugins/aws/test/resources/files/ssm/get-document__Test123.json b/plugins/aws/test/resources/files/ssm/get-document__Test123.json new file mode 100644 index 0000000000..543dda47d3 --- /dev/null +++ b/plugins/aws/test/resources/files/ssm/get-document__Test123.json @@ -0,0 +1,9 @@ +{ + "Name": "arn:aws:ssm:us-east-1:506325616308:document/updatesimulatortest", + "CreatedDate": "2020-06-09T19:55:58.880000+02:00", + "DocumentVersion": "2", + "Status": "Active", + "Content": "{\"test\": \"replaced\"}", + "DocumentType": "Command", + "DocumentFormat": "JSON" +} diff --git a/plugins/aws/test/resources/files/ssm/list-documents__Owner_Self.json b/plugins/aws/test/resources/files/ssm/list-documents__Owner_Self.json new file mode 100644 index 0000000000..3dac04926c --- /dev/null +++ b/plugins/aws/test/resources/files/ssm/list-documents__Owner_Self.json @@ -0,0 +1,36 @@ +{ + "DocumentIdentifiers": [ + { + "Name": "Test123", + "CreatedDate": "2018-02-15T02:15:27.509000+01:00", + "Owner": "Amazon", + "PlatformTypes": [ + "Windows", + "Linux", + "MacOS" + ], + "DocumentVersion": "1", + "DocumentType": "Automation", + "SchemaVersion": "0.3", + "DocumentFormat": "JSON", + "TargetType": "/AWS::EC2::Instance", + "Tags": [] + }, + { + "Name": "t123", + "CreatedDate": "2018-02-15T02:20:01.405000+01:00", + "Owner": "Amazon", + "PlatformTypes": [ + "Windows", + "Linux", + "MacOS" + ], + "DocumentVersion": "1", + "DocumentType": "Automation", + "SchemaVersion": "0.3", + "DocumentFormat": "JSON", + "TargetType": "/AWS::EC2::Instance", + "Tags": [] + } + ] +} diff --git a/plugins/aws/test/resources/files/ssm/list-resource-compliance-summaries.json b/plugins/aws/test/resources/files/ssm/list-resource-compliance-summaries.json new file mode 100644 index 0000000000..ce93307dbc --- /dev/null +++ b/plugins/aws/test/resources/files/ssm/list-resource-compliance-summaries.json @@ -0,0 +1,107 @@ +{ + "ResourceComplianceSummaryItems": [ + { + "ComplianceType": "foo", + "ResourceType": "foo", + "ResourceId": "foo", + "Status": "NON_COMPLIANT", + "OverallSeverity": "HIGH", + "ExecutionSummary": { + "ExecutionTime": "2024-01-04T14:41:17Z", + "ExecutionId": "foo", + "ExecutionType": "foo" + }, + "CompliantSummary": { + "CompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + }, + "NonCompliantSummary": { + "NonCompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + } + }, + { + "ComplianceType": "foo", + "ResourceType": "foo", + "ResourceId": "foo", + "Status": "NON_COMPLIANT", + "OverallSeverity": "HIGH", + "ExecutionSummary": { + "ExecutionTime": "2024-01-04T14:41:17Z", + "ExecutionId": "foo", + "ExecutionType": "foo" + }, + "CompliantSummary": { + "CompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + }, + "NonCompliantSummary": { + "NonCompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + } + }, + { + "ComplianceType": "foo", + "ResourceType": "foo", + "ResourceId": "foo", + "Status": "NON_COMPLIANT", + "OverallSeverity": "HIGH", + "ExecutionSummary": { + "ExecutionTime": "2024-01-04T14:41:17Z", + "ExecutionId": "foo", + "ExecutionType": "foo" + }, + "CompliantSummary": { + "CompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + }, + "NonCompliantSummary": { + "NonCompliantCount": 123, + "SeveritySummary": { + "CriticalCount": 123, + "HighCount": 123, + "MediumCount": 123, + "LowCount": 123, + "InformationalCount": 123, + "UnspecifiedCount": 123 + } + } + } + ], + "NextToken": "foo" +} diff --git a/plugins/aws/test/resources/files/wafv2/get-logging-configuration__arn_aws_wafv2_us_east_1_test_regional_deleteme.json b/plugins/aws/test/resources/files/wafv2/get-logging-configuration__arn_aws_wafv2_us_east_1_test_regional_deleteme.json new file mode 100644 index 0000000000..7cc335ab99 --- /dev/null +++ b/plugins/aws/test/resources/files/wafv2/get-logging-configuration__arn_aws_wafv2_us_east_1_test_regional_deleteme.json @@ -0,0 +1,9 @@ +{ + "LoggingConfiguration": { + "ResourceArn": "arn:aws:wafv2:us-east-1:test:regional/deleteme", + "LogDestinationConfigs": [ + "arn:aws:logs:us-east-1:625596817853:log-group:aws-waf-logs-deleteme" + ], + "ManagedByFirewallManager": false + } +} diff --git a/plugins/aws/test/resources/files/wafv2/get-web-acl__acl1_deleteme_REGIONAL.json b/plugins/aws/test/resources/files/wafv2/get-web-acl__acl1_deleteme_REGIONAL.json new file mode 100644 index 0000000000..52e23494b2 --- /dev/null +++ b/plugins/aws/test/resources/files/wafv2/get-web-acl__acl1_deleteme_REGIONAL.json @@ -0,0 +1,74 @@ +{ + "WebACL": { + "Name": "deleteme", + "Id": "123", + "ARN": "arn:aws:wafv2:us-east-1:test:regional/deleteme", + "DefaultAction": { + "Allow": {} + }, + "Description": "", + "Rules": [ + { + "Name": "ksjdhf", + "Priority": 0, + "Statement": { + "ByteMatchStatement": { + "SearchString": "c2Rmc2Rm", + "FieldToMatch": { + "Cookies": { + "MatchPattern": { + "All": {} + }, + "MatchScope": "ALL", + "OversizeHandling": "NO_MATCH" + } + }, + "TextTransformations": [ + { + "Priority": 0, + "Type": "NONE" + } + ], + "PositionalConstraint": "STARTS_WITH" + } + }, + "Action": { + "Block": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "ksjdhf" + } + } + ], + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "deleteme" + }, + "Capacity": 2, + "ManagedByFirewallManager": false, + "LabelNamespace": "awswaf:test:webacl:deleteme:", + "CustomResponseBodies": { + "test": { + "ContentType": "text/plain", + "Content": "test" + } + }, + "CaptchaConfig": { + "FailOpen": "ENABLED", + "BehaviorOnFail": "CAPTCHA" + }, + "ChallengeConfig": { + "FallbackBehavior": "ALLOW", + "UntrustedIPAction": "BLOCK" + }, + "AssociationConfig": { + "RequestBody": { + "DefaultSizeInspectionLimit": 23 + } + } + }, + "LockToken": "04e465af-501f-45b8-9d8a-39668cfc6674" +} diff --git a/plugins/aws/test/resources/files/wafv2/list-resources-for-web-acl__arn_aws_wafv2_us_east_1_test_regional_deleteme.json b/plugins/aws/test/resources/files/wafv2/list-resources-for-web-acl__arn_aws_wafv2_us_east_1_test_regional_deleteme.json new file mode 100644 index 0000000000..ea97d82e82 --- /dev/null +++ b/plugins/aws/test/resources/files/wafv2/list-resources-for-web-acl__arn_aws_wafv2_us_east_1_test_regional_deleteme.json @@ -0,0 +1,6 @@ +{ + "ResourceArns": [ + "lb1", + "arn:aws:elasticloadbalancing:us-west-2:test:loadbalancer/net/t2/l2" + ] +} diff --git a/plugins/aws/test/resources/files/wafv2/list-web-acls__REGIONAL.json b/plugins/aws/test/resources/files/wafv2/list-web-acls__REGIONAL.json new file mode 100644 index 0000000000..cb2968d852 --- /dev/null +++ b/plugins/aws/test/resources/files/wafv2/list-web-acls__REGIONAL.json @@ -0,0 +1,12 @@ +{ + "NextMarker": "deleteme", + "WebACLs": [ + { + "Name": "deleteme", + "Id": "acl1", + "Description": "", + "LockToken": "04e465af-501f-45b8-9d8a-39668cfc6674", + "ARN": "arn:aws:wafv2:us-east-1:test:regional/deleteme" + } + ] +} diff --git a/plugins/aws/test/resources/opensearch_test.py b/plugins/aws/test/resources/opensearch_test.py new file mode 100644 index 0000000000..48215bc2ed --- /dev/null +++ b/plugins/aws/test/resources/opensearch_test.py @@ -0,0 +1,6 @@ +from resoto_plugin_aws.resource.opensearch import AwsOpenSearchDomain +from test.resources import round_trip_for + + +def test_opensearch_domains() -> None: + round_trip_for(AwsOpenSearchDomain) diff --git a/plugins/aws/test/resources/rds_test.py b/plugins/aws/test/resources/rds_test.py index 1d4cdcd0ff..dba7216f40 100644 --- a/plugins/aws/test/resources/rds_test.py +++ b/plugins/aws/test/resources/rds_test.py @@ -1,9 +1,10 @@ -from resotolib.graph import Graph -from test.resources import round_trip_for from types import SimpleNamespace from typing import cast, Any + from resoto_plugin_aws.aws_client import AwsClient -from resoto_plugin_aws.resource.rds import AwsRdsInstance, AwsRdsCluster +from resoto_plugin_aws.resource.rds import AwsRdsInstance, AwsRdsCluster, AwsRdsSnapshot, AwsRdsClusterSnapshot +from resotolib.graph import Graph +from test.resources import round_trip_for def test_rds_instances() -> None: @@ -16,6 +17,16 @@ def test_rds_cluster() -> None: round_trip_for(AwsRdsCluster) +def test_rds_snapshots() -> None: + first, _ = round_trip_for(AwsRdsSnapshot, "description", "volume_id", "owner_id", "owner_alias") + first.rds_attributes = {"foo": ["foo", "foo", "foo"]} + + +def test_rds_cluster_snapshots() -> None: + first, _ = round_trip_for(AwsRdsClusterSnapshot, "description", "volume_id", "owner_id", "owner_alias") + first.rds_attributes = {"foo": ["foo", "foo", "foo"]} + + def test_tagging() -> None: instance, _ = round_trip_for(AwsRdsInstance) diff --git a/plugins/aws/test/resources/ssm_test.py b/plugins/aws/test/resources/ssm_test.py index 6e4c15e269..f38075fb91 100644 --- a/plugins/aws/test/resources/ssm_test.py +++ b/plugins/aws/test/resources/ssm_test.py @@ -1,7 +1,27 @@ -from resoto_plugin_aws.resource.ssm import AwsSSMInstanceInformation +from resoto_plugin_aws.resource.ssm import ( + AwsSSMInstance, + AwsSSMDocument, + AwsSSMAccountSharingInfo, + AwsSSMResourceCompliance, +) from test.resources import round_trip_for -def test_queues() -> None: - first, builder = round_trip_for(AwsSSMInstanceInformation) - assert len(builder.resources_of(AwsSSMInstanceInformation)) == 2 +def test_instances() -> None: + first, builder = round_trip_for(AwsSSMInstance) + assert len(builder.resources_of(AwsSSMInstance)) == 2 + + +def test_resource_compliance() -> None: + round_trip_for(AwsSSMResourceCompliance) + + +def test_documents() -> None: + first, builder = round_trip_for(AwsSSMDocument) + assert len(builder.resources_of(AwsSSMDocument)) == 1 + first.document_shared_with_accounts = ["a", "b", "c"] + first.document_sharing_info = [ + AwsSSMAccountSharingInfo("a", "v1"), + AwsSSMAccountSharingInfo("b", "v1"), + AwsSSMAccountSharingInfo("c", "v2"), + ] diff --git a/plugins/aws/test/resources/waf_test.py b/plugins/aws/test/resources/waf_test.py new file mode 100644 index 0000000000..73d3a8eaa1 --- /dev/null +++ b/plugins/aws/test/resources/waf_test.py @@ -0,0 +1,6 @@ +from resoto_plugin_aws.resource.waf import AwsWafWebACL +from test.resources import round_trip_for + + +def test_acls() -> None: + round_trip_for(AwsWafWebACL) diff --git a/plugins/aws/tools/aws_model_gen.py b/plugins/aws/tools/aws_model_gen.py index 862b318eda..3093633c3e 100644 --- a/plugins/aws/tools/aws_model_gen.py +++ b/plugins/aws/tools/aws_model_gen.py @@ -31,7 +31,7 @@ def assignment(self) -> str: def type_string(self) -> str: if self.is_array: - return f"List[{self.type}]" + return f"Optional[List[{self.type}]]" else: return f"Optional[{self.type}]" @@ -118,6 +118,7 @@ def to_snake(name: str) -> str: "Timestamp": "datetime", "TagsMap": "Dict[str, str]", "MillisecondDateTime": "datetime", + "SearchString": "str", } simple_type_map |= {k.lower(): v for k, v in simple_type_map.items()} @@ -261,9 +262,9 @@ def all_models() -> List[AwsModel]: return result -def create_test_response(service: str, function: str) -> JsonElement: +def create_test_response(service: str, function: str, is_pascal: bool = False) -> JsonElement: sm = service_model(service) - op = sm.operation_model(pascalcase(function)) + op = sm.operation_model(function if is_pascal else pascalcase(function)) def sample(shape: Shape) -> JsonElement: if isinstance(shape, StringShape) and shape.enum: @@ -314,9 +315,6 @@ def default_imports() -> str: "accessanalyzer": [ # AwsResotoModel("list-analyzers", "analyzers", "AnalyzerSummary", prefix="AccessAnalyzer"), ], - "acm": [ - # AwsResotoModel("list-certificates", "CertificateSummaryList", "CertificateSummary", prefix="ACM"), - ], "acm-pca": [ # AwsResotoModel( # "list-certificate-authorities", "CertificateAuthorities", "CertificateAuthority", prefix="ACMPCA" @@ -396,13 +394,7 @@ def default_imports() -> str: # AwsResotoModel("list-data-catalogs", "DataCatalogs", "DataCatalog", prefix="Athena"), ], "autoscaling": [ - # AwsResotoModel( - # "describe-auto-scaling-groups", - # "AutoScalingGroupName", - # "AutoScalingGroup", - # prefix="AutoScaling", - # prop_prefix="autoscaling_", - # ), + # AwsResotoModel( "describe-auto-scaling-groups", "AutoScalingGroupName", "AutoScalingGroup", prefix="AutoScaling", prop_prefix="autoscaling_"), ], "cloudformation": [ # AwsResotoModel("describe-stacks", "Stacks", "Stack", prefix="CloudFormation", prop_prefix="stack_"), @@ -418,6 +410,13 @@ def default_imports() -> str: # ), ], "cloudfront": [ + # AwsResotoModel( + # "get-distribution", + # "Distribution", + # "Distribution", + # prefix="CloudFront", + # prop_prefix="distribution_", + # ), # AwsResotoModel( # "list-distributions", # "DistributionSummary", @@ -515,95 +514,27 @@ def default_imports() -> str: ], "ec2": [ # AwsResotoModel("describe-hosts", "Hosts", "Host", prefix="Ec2", prop_prefix="host_") - # AwsResotoModel( - # "describe-route-tables", - # "RouteTables", - # "RouteTable", - # base="BaseRoutingTable", - # prefix="Ec2", - # prop_prefix="route_table_", - # ), - # AwsResotoModel( - # "describe-vpc-endpoints", - # "VpcEndpoints", - # "VpcEndpoint", - # base="BaseEndpoint", - # prefix="Ec2", - # prop_prefix="endpoint_", - # ), - # AwsResotoModel( - # "describe-vpc-peering-connections", - # "VpcPeeringConnections", - # "VpcPeeringConnection", - # base="BasePeeringConnection", - # prefix="Ec2", - # prop_prefix="connection_", - # ), - # AwsResotoModel( - # "describe-snapshots", "Snapshots", "Snapshot", base="BaseSnapshot", prefix="Ec2", prop_prefix="snapshot_" - # ), - # AwsResotoModel( - # "describe-internet-gateways", - # "InternetGateways", - # "InternetGateway", - # base="BaseGateway", - # prefix="Ec2", - # prop_prefix="gateway_", - # ), - # AwsResotoModel( - # "describe-nat-gateways", "NatGateways", "NatGateway", base="BaseGateway", prefix="Ec2", prop_prefix="nat_" - # ), - # AwsResotoModel( - # "describe-security-groups", - # "SecurityGroups", - # "SecurityGroup", - # base="BaseSecurityGroup", - # prefix="Ec2", - # prop_prefix="group_", - # ), - # AwsResotoModel( - # "describe-subnets", - # "Subnets", - # "Subnet", - # base="BaseSubnet", - # prefix="Ec2", - # prop_prefix="subnet_", - # ), + # AwsResotoModel( "describe-route-tables", "RouteTables", "RouteTable", base="BaseRoutingTable", prefix="Ec2", prop_prefix="route_table_", ), + # AwsResotoModel( "describe-vpc-endpoints", "VpcEndpoints", "VpcEndpoint", base="BaseEndpoint", prefix="Ec2", prop_prefix="endpoint_", ), + # AwsResotoModel( "describe-vpc-peering-connections", "VpcPeeringConnections", "VpcPeeringConnection", base="BasePeeringConnection", prefix="Ec2", prop_prefix="connection_", ), + # AwsResotoModel( "describe-snapshots", "Snapshots", "Snapshot", base="BaseSnapshot", prefix="Ec2", prop_prefix="snapshot_" ), + # AwsResotoModel( "describe-internet-gateways", "InternetGateways", "InternetGateway", base="BaseGateway", prefix="Ec2", prop_prefix="gateway_", ), + # AwsResotoModel( "describe-nat-gateways", "NatGateways", "NatGateway", base="BaseGateway", prefix="Ec2", prop_prefix="nat_" ), + # AwsResotoModel( "describe-security-groups", "SecurityGroups", "SecurityGroup", base="BaseSecurityGroup", prefix="Ec2", prop_prefix="group_", ), + # AwsResotoModel( "describe-subnets", "Subnets", "Subnet", base="BaseSubnet", prefix="Ec2", prop_prefix="subnet_", ), # AwsResotoModel("describe-vpcs", "Vpcs", "Vpc", base="BaseNetwork", prefix="Ec2", prop_prefix="vpc_"), - # AwsResotoModel( - # "describe-addresses", "Addresses", "Address", base="BaseIPAddress", prefix="Ec2", prop_prefix="ip_" - # ), - # AwsResotoModel( - # "describe-network-interfaces", - # "NetworkInterfaces", - # "NetworkInterface", - # base="BaseNetworkInterface", - # prefix="Ec2", - # prop_prefix="nic_", - # ), - # AwsResotoModel( - # "describe-instances", - # "Reservations", - # "Instance", - # base="BaseInstance", - # prefix="Ec2", - # prop_prefix="instance_", - # ), + # AwsResotoModel( "describe-addresses", "Addresses", "Address", base="BaseIPAddress", prefix="Ec2", prop_prefix="ip_" ), + # AwsResotoModel( "describe-network-interfaces", "NetworkInterfaces", "NetworkInterface", base="BaseNetworkInterface", prefix="Ec2", prop_prefix="nic_", ), + # AwsResotoModel( "describe-instances", "Reservations", "Instance", base="BaseInstance", prefix="Ec2", prop_prefix="instance_", ), # AwsResotoModel("describe-key-pairs", "KeyPairs", "KeyPairInfo", prefix="Ec2"), # AwsResotoModel("describe-volumes", "Volumes", "Volume", base="BaseVolume", prefix="Ec2"), # AwsResotoModel("describe_addresses", "Addresses", "Address", prefix="Ec2"), - # AwsResotoModel( - # "describe-instance-types", "InstanceTypes", "InstanceTypeInfo", prefix="Ec2", prop_prefix="reservation_" - # ), - # AwsResotoModel( - # "describe_reserved_instances", - # "ReservedInstances", - # "ReservedInstances", - # prefix="Ec2", - # prop_prefix="reservation_", - # ), + # AwsResotoModel( "describe-instance-types", "InstanceTypes", "InstanceTypeInfo", prefix="Ec2", prop_prefix="reservation_" ), + # AwsResotoModel( "describe_reserved_instances", "ReservedInstances", "ReservedInstances", prefix="Ec2", prop_prefix="reservation_", ), # AwsResotoModel("describe-network-acls", "NetworkAcls", "NetworkAcl", prefix="Ec2"), # AwsResotoModel("describe-flow-logs", "FlowLogs", "FlowLog", prefix="Ec2"), + # AwsResotoModel("describe-images", "Images", "Image", prefix="Ec2"), + # AwsResotoModel( "describe-launch-template-versions", "LaunchTemplateVersions", "LaunchTemplateVersion", prefix="LaunchTemplate", ), ], "ecs": [ # AwsResotoModel( @@ -677,13 +608,8 @@ def default_imports() -> str: # ), ], "elb": [ - # AwsResotoModel( - # "describe-load-balancers", - # "LoadBalancerDescriptions", - # "LoadBalancerDescription", - # prefix="Elb", - # prop_prefix="elb_", - # ), + # AwsResotoModel( "describe-load-balancers", "LoadBalancerDescriptions", "LoadBalancerDescription", prefix="Elb", prop_prefix="elb_", ), + # AwsResotoModel( "describe-load-balancer-attributes", "DescribeLoadBalancerAttributesResult", "LoadBalancerAttributes", prefix="Elb" ), ], "elbv2": [ # AwsResotoModel( @@ -835,27 +761,19 @@ def default_imports() -> str: # AwsResotoModel("get-products", "PriceList", "PriceListItemJSON", prefix="Price", prop_prefix="price_") ], "redshift": [ - # AwsResotoModel( - # "describe-clusters", - # "Clusters", - # "Cluster", - # prefix="Redshift", - # prop_prefix="redshift_", - # ), + # AwsResotoModel( "describe-clusters", "Clusters", "Cluster", prefix="Redshift", prop_prefix="redshift_"), + # AwsResotoModel("describe-logging-status", "DescribeLoggingStatusResponse", prefix="Redshift"), ], "rds": [ - # AwsResotoModel("describe-db-instances", "Instances", "DBInstance", prefix="Rds", prop_prefix="rds_") - # AwsResotoModel("describe-db-clusters", "Clusters", "DBCluster", prefix="Rds", prop_prefix="rds_") + # # AwsResotoModel("describe-db-instances", "Instances", "DBInstance", prefix="Rds", prop_prefix="rds_") + # # AwsResotoModel("describe-db-clusters", "Clusters", "DBCluster", prefix="Rds", prop_prefix="rds_") + # # AwsResotoModel("describe-db-snapshots", "DBSnapshots", "DBSnapshot", prefix="Rds", prop_prefix="rds_") + # AwsResotoModel( "describe-db-cluster-snapshots", "DBClusterSnapshots", "DBClusterSnapshot", prefix="Rds", prop_prefix="rds_") ], "route53": [ # AwsResotoModel("list_hosted_zones", "HostedZones", "HostedZone", prefix="Route53", prop_prefix="zone_"), - # AwsResotoModel( - # "list_resource_record_sets", - # "ResourceRecordSets", - # "ResourceRecordSet", - # prefix="Route53", - # prop_prefix="record_", - # ), + # AwsResotoModel( "list_resource_record_sets", "ResourceRecordSets", "ResourceRecordSet", prefix="Route53", prop_prefix="record_", ), + # AwsResotoModel("list-query-logging-configs", "QueryLoggingConfigs", "QueryLoggingConfig", prefix="Route53"), ], "s3": [ # AwsResotoModel("list-buckets", "Buckets", "Bucket", prefix="S3", prop_prefix="s3_"), @@ -996,20 +914,32 @@ def default_imports() -> str: ], "ssm": [ # AwsResotoModel("describe-instance-information", "InstanceInformationList", "InstanceInformation", prefix="SSM"), + # AwsResotoModel("list-documents", "DocumentIdentifiers", "DocumentIdentifier", prefix="SSM"), + # AwsResotoModel("list-documents", "DocumentIdentifiers", "DescribeDocumentPermissionResponse", prefix="SSM"), + # AwsResotoModel( "list-resource-compliance-summaries", "ResourceComplianceSummaryItems", "ResourceComplianceSummaryItem", prefix="SSM", ), ], "secretsmanager": [ # AwsResotoModel( "list-secrets", "SecretList", "SecretListEntry", prefix="SecretsManager", name="AwsSecretsManagerSecret" ), # AwsResotoModel("list-secrets", "SecretList", "SecretVersionStagesType", prefix="SecretsManager"), ], + "opensearch": [ + # AwsResotoModel("describe-domains", "DomainStatusList", "DomainStatus", prefix="OpenSearch", name="AwsOpenSearchDomain"), + ], + "acm": [ + # AwsResotoModel("describe-certificate", "Certificate", "CertificateDetail", prefix="Acm", name="AcmCertificate") + ], + "wafv2": [ + # AwsResotoModel("get-logging-configuration", "LoggingConfigurations", "LoggingConfiguration", prefix="Waf") + ], } if __name__ == "__main__": """print some test data""" - print(json.dumps(create_test_response("secretsmanager", "list-secrets"), indent=2)) + print(json.dumps(create_test_response("ssm", "list-resource-compliance-summaries"), indent=2)) """print the class models""" # print(default_imports()) for model in all_models(): - pass - # print(model.to_class()) + # pass + print(model.to_class()) diff --git a/plugins/azure/resoto_plugin_azure/resource/compute.py b/plugins/azure/resoto_plugin_azure/resource/compute.py index 5d7ffeb648..4bbe7857ac 100644 --- a/plugins/azure/resoto_plugin_azure/resource/compute.py +++ b/plugins/azure/resoto_plugin_azure/resource/compute.py @@ -1215,7 +1215,7 @@ class AzureResourceSku(AzureResource): "atime": K(None), "api_versions": S("apiVersions"), "capabilities": S("capabilities") >> ForallBend(AzureResourceSkuCapabilities.mapping), - "capacity": S("capacity") >> Bend(AzureResourceSkuCapacity.mapping), + "sku_capacity": S("capacity") >> Bend(AzureResourceSkuCapacity.mapping), "costs": S("costs") >> ForallBend(AzureResourceSkuCosts.mapping), "family": S("family"), "sku_kind": S("kind"), @@ -1228,7 +1228,7 @@ class AzureResourceSku(AzureResource): } api_versions: Optional[List[str]] = field(default=None, metadata={'description': 'The api versions that support this sku.'}) # fmt: skip capabilities: Optional[List[AzureResourceSkuCapabilities]] = field(default=None, metadata={'description': 'A name value pair to describe the capability.'}) # fmt: skip - capacity: Optional[AzureResourceSkuCapacity] = field(default=None, metadata={'description': 'Describes scaling information of a sku.'}) # fmt: skip + sku_capacity: Optional[AzureResourceSkuCapacity] = field(default=None, metadata={'description': 'Describes scaling information of a sku.'}) # fmt: skip costs: Optional[List[AzureResourceSkuCosts]] = field(default=None, metadata={'description': 'Metadata for retrieving price info.'}) # fmt: skip family: Optional[str] = field(default=None, metadata={"description": "The family of this particular sku."}) sku_kind: Optional[str] = field(default=None, metadata={'description': 'The kind of resources that are supported in this sku.'}) # fmt: skip diff --git a/plugins/azure/resoto_plugin_azure/resource/network.py b/plugins/azure/resoto_plugin_azure/resource/network.py index ed6a521a36..88c2779f64 100644 --- a/plugins/azure/resoto_plugin_azure/resource/network.py +++ b/plugins/azure/resoto_plugin_azure/resource/network.py @@ -4734,20 +4734,17 @@ class AzureRouteFilter(AzureResource): "id": S("id"), "tags": S("tags", default={}), "name": S("name"), - "ctime": K(None), - "mtime": K(None), - "atime": K(None), "etag": S("etag"), "ipv6_peerings": S("properties", "ipv6Peerings") >> ForallBend(AzureExpressRouteCircuitPeering.mapping), "filter_peerings": S("properties", "peerings") >> ForallBend(AzureExpressRouteCircuitPeering.mapping), "provisioning_state": S("properties", "provisioningState"), - "rules": S("properties", "rules") >> ForallBend(AzureRouteFilterRule.mapping), + "filter_rules": S("properties", "rules") >> ForallBend(AzureRouteFilterRule.mapping), } etag: Optional[str] = field(default=None, metadata={'description': 'A unique read-only string that changes whenever the resource is updated.'}) # fmt: skip ipv6_peerings: Optional[List[AzureExpressRouteCircuitPeering]] = field(default=None, metadata={'description': 'A collection of references to express route circuit ipv6 peerings.'}) # fmt: skip filter_peerings: Optional[List[AzureExpressRouteCircuitPeering]] = field(default=None, metadata={'description': 'A collection of references to express route circuit peerings.'}) # fmt: skip provisioning_state: Optional[str] = field(default=None, metadata={'description': 'The current provisioning state.'}) # fmt: skip - rules: Optional[List[AzureRouteFilterRule]] = field(default=None, metadata={'description': 'Collection of RouteFilterRules contained within a route filter.'}) # fmt: skip + filter_rules: Optional[List[AzureRouteFilterRule]] = field(default=None, metadata={'description': 'Collection of RouteFilterRules contained within a route filter.'}) # fmt: skip @define(eq=False, slots=False) diff --git a/resotocore/resotocore/config/config_handler_service.py b/resotocore/resotocore/config/config_handler_service.py index ddd86317ea..995118f29c 100644 --- a/resotocore/resotocore/config/config_handler_service.py +++ b/resotocore/resotocore/config/config_handler_service.py @@ -184,7 +184,7 @@ async def update_configs_model(self, kinds: List[Kind]) -> Model: model = await self.get_configs_model() # make sure the update is valid, but ignore overlapping property paths, so the same name can # have different types in different sections - updated = model.update_kinds(kinds, check_overlap=False) + updated = model.update_kinds(kinds, check_overlap=False, replace=False) # store all updated kinds await self.model_db.update_many(kinds) return updated diff --git a/resotocore/resotocore/db/arango_query.py b/resotocore/resotocore/db/arango_query.py index 91ff6b9186..e52e026b05 100644 --- a/resotocore/resotocore/db/arango_query.py +++ b/resotocore/resotocore/db/arango_query.py @@ -303,9 +303,9 @@ def predicate(cursor: str, p: Predicate, context_path: Optional[str] = None) -> bvn = ctx.next_bind_var_name() op = lgt_ops[p.op] if prop.simple_kind.reverse_order and p.op in lgt_ops else p.op if op in ["in", "not in"] and isinstance(p.value, list): - ctx.bind_vars[bvn] = [prop.kind.coerce(a) for a in p.value] + ctx.bind_vars[bvn] = [prop.kind.coerce(a, array_creation=False) for a in p.value] else: - ctx.bind_vars[bvn] = prop.kind.coerce(p.value) + ctx.bind_vars[bvn] = prop.kind.coerce(p.value, array_creation=False) var_name = f"{cursor}.{prop_name}" if op == "=~": # use regex_test to do case-insensitive matching p_term = f"REGEX_TEST({var_name}, @{bvn}, true)" @@ -319,6 +319,7 @@ def predicate(cursor: str, p: Predicate, context_path: Optional[str] = None) -> def context_term(cursor: str, aep: ContextTerm, context_path: Optional[str] = None) -> Tuple[Optional[str], str]: predicate_statement = "" filter_statement = "" + ars_stmts = [] path_cursor = cursor context_path = f"{context_path}.{aep.name}" if context_path else aep.name # unfold only, if random access is required @@ -329,7 +330,9 @@ def context_term(cursor: str, aep: ContextTerm, context_path: Optional[str] = No spath = spath[:-1] for ar in [a.lstrip(".") for a in spath]: nxt_crs = ctx.next_crs("pre") - predicate_statement += f" FOR {nxt_crs} IN TO_ARRAY({path_cursor}.{ar})" + # see predicate for explanation + ars_stmts.append(f"{nxt_crs}._internal!=true") + predicate_statement += f" FOR {nxt_crs} IN APPEND(TO_ARRAY({path_cursor}.{ar}), {{_internal: true}})" path_cursor = nxt_crs ps, fs = term(path_cursor, aep.term, context_path) else: @@ -341,6 +344,8 @@ def context_term(cursor: str, aep: ContextTerm, context_path: Optional[str] = No predicate_statement += ps if fs: filter_statement += fs + if ars_stmts: + filter_statement = f"({filter_statement} AND {' AND '.join(ars_stmts)})" return predicate_statement, filter_statement def with_id(cursor: str, t: IdTerm) -> str: diff --git a/resotocore/resotocore/db/entitydb.py b/resotocore/resotocore/db/entitydb.py index 68fdb7e726..dbe492ebca 100644 --- a/resotocore/resotocore/db/entitydb.py +++ b/resotocore/resotocore/db/entitydb.py @@ -48,6 +48,10 @@ async def delete(self, key: K) -> bool: async def delete_value(self, value: T) -> None: pass + @abstractmethod + async def delete_many(self, keys: List[K]) -> None: + pass + @abstractmethod async def create_update_schema(self) -> None: pass @@ -109,6 +113,9 @@ async def delete_value(self, value: T) -> None: key = self.key_of(value) await self.db.delete(self.collection_name, key, ignore_missing=True) + async def delete_many(self, keys: List[K]) -> None: + await self.db.delete_many(self.collection_name, [{"_key": k} for k in keys]) + async def create_update_schema(self) -> None: name = self.collection_name db = self.db @@ -157,6 +164,10 @@ async def delete_value(self, value: T) -> None: await self.db.delete_value(value) await self.event_sender.core_event(f"{self.entity_name}-deleted") + async def delete_many(self, keys: List[K]) -> None: + await self.db.delete_many(keys) + await self.event_sender.core_event(f"{self.entity_name}-deleted") + async def create_update_schema(self) -> None: return await self.db.create_update_schema() diff --git a/resotocore/resotocore/db/graphdb.py b/resotocore/resotocore/db/graphdb.py index 094a85c077..43454260dc 100644 --- a/resotocore/resotocore/db/graphdb.py +++ b/resotocore/resotocore/db/graphdb.py @@ -346,7 +346,9 @@ async def update_node_with( # call adjuster on resulting node ctime = value_in_path_get(node, NodePath.reported_ctime, utc_str()) - adjusted = self.adjust_node(model, GraphAccess.dump_direct(node_id, updated, kind, recompute=True), ctime) + adjusted = self.adjust_node( + model, GraphAccess.dump_direct(node_id, updated, kind, recompute=True), ctime, utc_str() + ) update = {"_key": node["_key"], "hash": adjusted["hash"], "kinds": adjusted["kinds"], "flat": adjusted["flat"]} # copy relevant sections into update node for sec in [section] if section else Section.content_ordered: @@ -876,13 +878,20 @@ async def delete_marked_update(self, change_id: str, tx: Optional[AsyncArangoTra doc = {"_key": str(uuid.uuid5(uuid.NAMESPACE_DNS, change_id))} await db.delete(self.in_progress, doc, ignore_missing=True) - def adjust_node(self, model: Model, json: Json, created_at: Any) -> Json: + def adjust_node( + self, model: Model, json: Json, created_at: str, updated_at: str, *, mtime_from_ctime: bool = False + ) -> Json: reported = json[Section.reported] # preserve ctime in reported: if it is not set, use the creation time of the object if not reported.get("ctime", None): kind = model[reported] if isinstance(kind, ComplexKind) and "ctime" in kind: reported["ctime"] = created_at + # if no mtime is reported, we set updated_at as modification time + if not reported.get("mtime", None): + kind = model[reported] + if isinstance(kind, ComplexKind) and "mtime" in kind: + reported["mtime"] = reported.get("ctime", updated_at) if mtime_from_ctime else updated_at # adjuster has the option to manipulate the resulting json return self.node_adjuster.adjust(json) @@ -899,7 +908,7 @@ def prepare_nodes( optional_properties = [*Section.all_ordered, "refs", "kinds", "flat", "hash"] def insert_node(node: Json) -> None: - elem = self.adjust_node(model, node, access.at_json) + elem = self.adjust_node(model, node, access.at_json, access.at_json, mtime_from_ctime=True) js_doc: Json = {"_key": elem["id"], "created": access.at_json, "updated": access.at_json} for prop in optional_properties: value = node.get(prop, None) @@ -918,7 +927,7 @@ def update_or_delete_node(node: Json) -> None: info.nodes_deleted += 1 elif elem["hash"] != hash_string: # node is in db and in the graph, content is different - adjusted: Json = self.adjust_node(model, elem, node["created"]) + adjusted: Json = self.adjust_node(model, elem, node["created"], access.at_json) js = {"_key": key, "created": node["created"], "updated": access.at_json} for prop in optional_properties: value = adjusted.get(prop, None) diff --git a/resotocore/resotocore/model/db_updater.py b/resotocore/resotocore/model/db_updater.py index dbe2db570c..ca0ec8cda5 100644 --- a/resotocore/resotocore/model/db_updater.py +++ b/resotocore/resotocore/model/db_updater.py @@ -188,7 +188,7 @@ async def merge_graph(self, db: DbAccess) -> GraphUpdate: # type: ignore await graphdb.insert_usage_data(builder.usage) _, result = await graphdb.merge_graph(builder.graph, model, nxt.change_id, nxt.is_batch) # sizes of model entries have been adjusted during the merge. Update the model in the db. - await model_handler.update_model(graphdb.name, list(model.kinds.values())) + await model_handler.update_model(graphdb.name, list(model.kinds.values()), False) if nxt.task_id and builder.deferred_edges: await outer_edge_db.update( DeferredOuterEdges(uuid_str(), nxt.change_id, nxt.task_id, utc(), nxt.graph, builder.deferred_edges) diff --git a/resotocore/resotocore/model/model.py b/resotocore/resotocore/model/model.py index 40dd07cb66..d0ddd663ef 100644 --- a/resotocore/resotocore/model/model.py +++ b/resotocore/resotocore/model/model.py @@ -40,7 +40,7 @@ from resotocore.util import if_set, utc, duration, first from resotolib.core.model_check import check_overlap_for from resotolib.durations import duration_parser, DurationRe -from resotolib.parse_util import make_parser, variable_dp_backtick, dot_dp +from resotolib.parse_util import make_parser, variable_dp_backtick, dot_dp, l_bracket_p, r_bracket_p from resotolib.utils import is_env_var_string T = TypeVar("T") @@ -155,7 +155,7 @@ def any_prop() -> Property: # Split a variable path into its path parts. # foo.bla -> [foo, bla] # foo.`bla.bar` -> [foo, bla.bar] -prop_path_parser = (regex("[^`.]+") | variable_dp_backtick).sep_by(dot_dp) +prop_path_parser = (regex("[^`.]+") | (variable_dp_backtick + (l_bracket_p + r_bracket_p).optional(""))).sep_by(dot_dp) array_index_re = re.compile(r"\[(\d+|\*)]") @@ -186,7 +186,7 @@ def child(self, part: Optional[str]) -> PropertyPath: return PropertyPath(update) def unescaped_parts(self) -> List[str]: - return [p.strip("`") for p in self.path if p is not None] + return [p.rstrip("[]").strip("`") for p in self.path if p is not None] @property def last_part(self) -> Optional[str]: @@ -812,16 +812,9 @@ def check_valid(self, obj: JsonElement, **kwargs: bool) -> ValidationResult: self.inner.check_valid(elem, **kwargs) return coerced - def coerce_if_required(self, value: JsonElement, **kwargs: bool) -> Optional[List[JsonElement]]: + def coerce_if_required(self, value: JsonElement, **kwargs: bool) -> Optional[JsonElement]: has_coerced = False - if value is None: - return None - elif isinstance(value, dict): - return None - elif not isinstance(value, list): - # in case of simple type, we can make it an array - value = [value] - has_coerced = True + mapped: Optional[JsonElement] = None # noqa def check(item: Any) -> ValidationResult: nonlocal has_coerced @@ -832,7 +825,19 @@ def check(item: Any) -> ValidationResult: has_coerced = True return res - mapped = [check(elem) for elem in value] + if value is None: + return None + elif isinstance(value, dict): + return None + elif not isinstance(value, list): + value = check(value) + if kwargs.get("array_creation", True): + mapped = [value] + has_coerced = True + else: # in case only the inner kind should be coerced (see arango_query) + mapped = value + else: + mapped = [check(elem) for elem in value] return mapped if has_coerced else None @staticmethod @@ -948,11 +953,16 @@ def copy( def resolve(self, model: Dict[str, Kind]) -> None: if not self.__resolved: self.__resolved = True + kinds = [] # resolve properties for prop in self.properties: kind = prop.resolve(model) - kind.resolve(model) self.__resolved_kinds[prop.name] = (prop, kind) + kinds.append(kind) + + # resolve property kinds + for kind in kinds: + kind.resolve(model) # make sure all successor kinds can be resolved for names in self.successor_kinds.values(): @@ -1468,7 +1478,13 @@ def handle_complex(cx: ComplexKind) -> None: return graph - def update_kinds(self, kinds: List[Kind], check_overlap: bool = True) -> Model: + def update_kinds(self, kinds: List[Kind], check_overlap: bool = True, replace: bool = False) -> Model: + """ + Update the model with the given kinds. The kinds are merged with the existing model. + :param kinds: the kinds to update. + :param check_overlap: true if paths with different kinds should be avoided, otherwise false. + :param replace: if true, the existing model is replaced with the new kinds. + """ # Create a list of kinds that have changed to the existing model to_update = [] @@ -1505,8 +1521,14 @@ def update_is_valid(from_kind: Kind, to_kind: Kind) -> None: raise AttributeError(f"Update {from_kind.fqn} changes an existing property type {from_kind.fqn}") # resolve and build dict + new_kinds = {kind.fqn: kind for kind in kinds} updates = {elem.fqn: elem for elem in to_update} - updated = {**self.kinds, **updates} + filtered_kinds = ( + {k: v for k, v in self.kinds.items() if k in new_kinds or k in predefined_kinds_by_name} + if replace + else self.kinds + ) + updated = {**filtered_kinds, **updates} for elem in to_update: elem.resolve(updated) diff --git a/resotocore/resotocore/model/model_handler.py b/resotocore/resotocore/model/model_handler.py index f10ced1346..b5e8241373 100644 --- a/resotocore/resotocore/model/model_handler.py +++ b/resotocore/resotocore/model/model_handler.py @@ -67,7 +67,7 @@ async def uml_image( """ @abstractmethod - async def update_model(self, graph_name: GraphName, kinds: List[Kind]) -> Model: + async def update_model(self, graph_name: GraphName, kinds: List[Kind], replace: bool) -> Model: pass @@ -229,14 +229,17 @@ def add_visible(fn: Callable[[ComplexKind], Set[str]]) -> None: plant_uml = PlantUML(f"{self.plantuml_server}/{output}/") return await run_async(plant_uml.processes, puml) - async def update_model(self, graph_name: GraphName, kinds: List[Kind]) -> Model: + async def update_model(self, graph_name: GraphName, kinds: List[Kind], replace: bool) -> Model: # load existing model model = await self.load_model(graph_name) # make sure the update is valid - updated = model.update_kinds(kinds) + updated = model.update_kinds(kinds, replace=replace) # store all updated kinds db = await self.db_access.get_graph_model_db(graph_name) await db.update_many(kinds) + if deleted := set(model.kinds) - set(updated.kinds): + log.info(f"Deleted kinds: {deleted}") + await db.delete_many(list(deleted)) # unset loaded model self.__loaded_model[graph_name] = updated return updated diff --git a/resotocore/resotocore/report/inspector_service.py b/resotocore/resotocore/report/inspector_service.py index df9fbb462e..c063b11e73 100644 --- a/resotocore/resotocore/report/inspector_service.py +++ b/resotocore/resotocore/report/inspector_service.py @@ -411,14 +411,29 @@ async def validate_check_collection_config(self, json: Json) -> Optional[Json]: for check in ReportCheckCollectionConfig.from_config(ConfigEntity(ResotoReportCheck, json)): try: env = check.default_values or {} + detect = "" if search := check.detect.get("resoto"): + detect = search await self.template_expander.parse_query(search, on_section="reported", env=env) elif cmd := check.detect.get("resoto_cmd"): + detect = cmd await self.cli.evaluate_cli_command(cmd, CLIContext(env=env)) elif check.detect.get("manual"): - pass + continue else: errors.append(f"Check {check.id} neither has a resoto, resoto_cmd or manual defined") + if not check.result_kinds: + errors.append(f"Check {check.id} does not define any result kind") + for rk in check.result_kinds: + if rk not in detect: + errors.append(f"Check {check.id} does not detect result kind {rk}") + if not check.remediation.text: + errors.append(f"Check {check.id} does not define any remediation text") + if not check.remediation.url: + errors.append(f"Check {check.id} does not define any remediation url") + for prop in ["id", "title", "risk", "severity"]: + if not getattr(check, prop, None): + errors.append(f"Check {check.id} does not define prop {prop}") except Exception as e: errors.append(f"Check {check.id} is invalid: {e}") if errors: diff --git a/resotocore/resotocore/static/report/benchmark/aws/aws_cis_1_5.json b/resotocore/resotocore/static/report/benchmark/aws/aws_cis_1_5.json index 6d36608c11..b4b3c6dc93 100644 --- a/resotocore/resotocore/static/report/benchmark/aws/aws_cis_1_5.json +++ b/resotocore/resotocore/static/report/benchmark/aws/aws_cis_1_5.json @@ -512,7 +512,7 @@ "title": "5.6 Ensure that EC2 Metadata Service only allows IMDSv2", "description": "When enabling the Metadata Service on AWS EC2 instances, users have the option of using either Instance Metadata Service Version 1 (IMDSv1; a request/response method) or Instance Metadata Service Version 2 (IMDSv2; a session-oriented method).", "checks": [ - "aws_ec2_instance_imdsv2_enabled" + "aws_ec2_instance_uses_imdsv2" ] } ] diff --git a/resotocore/resotocore/static/report/benchmark/aws/aws_well_architected_framework_security_pillar.json b/resotocore/resotocore/static/report/benchmark/aws/aws_well_architected_framework_security_pillar.json new file mode 100644 index 0000000000..e1690bc6ee --- /dev/null +++ b/resotocore/resotocore/static/report/benchmark/aws/aws_well_architected_framework_security_pillar.json @@ -0,0 +1,615 @@ +{ + "title": "AWS Well-Architected Framework Security Pillar", + "framework": "WAF", + "clouds": ["aws"], + "version": "1.0", + "description": "The AWS Well-Architected Framework describes key concepts, design principles, and architectural best practices for designing and running workloads in the cloud. The security pillar focuses on protecting information and systems. Key topics include confidentiality and integrity of data, managing user permissions, and establishing controls to detect security events.", + "children": [ + { + "title": "Security", + "description": "The security pillar focuses on protecting information and systems. Key topics include confidentiality and integrity of data, managing user permissions, and establishing controls to detect security events.", + "children": [ + { + "title": "SEC01 How do you securely operate your workload?", + "description": "To operate your workload securely, you must apply overarching best practices to every area of security. Take requirements and processes that you have defined in operational excellence at an organizational and workload level, and apply them to all areas. Staying up to date with AWS and industry recommendations and threat intelligence helps you evolve your threat model and control objectives. Automating security processes, testing, and validation allow you to scale your security operations.", + "children": [ + { + "title": "SEC01-BP01 Separate workloads using accounts", + "description": "Establish common guardrails and isolation between environments (such as production, development, and test) and workloads through a multi-account strategy. Account-level separation is strongly recommended, as it provides a strong isolation boundary for security, billing, and access.", + "checks": [ + "aws_organizations_account_part_of_organizations" + ] + }, + { + "title": "SEC01-BP02 Secure account root user and properties", + "description": "The root user is the most privileged user in an AWS account, with full administrative access to all resources within the account, and in some cases cannot be constrained by security policies. Disabling programmatic access to the root user, establishing appropriate controls for the root user, and avoiding routine use of the root user helps reduce the risk of inadvertent exposure of the root credentials and subsequent compromise of the cloud environment.", + "checks": [ + "aws_iam_root_hardware_mfa_enabled", + "aws_iam_root_mfa_enabled", + "aws_iam_no_root_access_key" + ] + }, + { + "title": "SEC01-BP03 Identify and validate control objectives", + "description": "Based on your compliance requirements and risks identified from your threat model, derive and validate the control objectives and controls that you need to apply to your workload. Ongoing validation of control objectives and controls help you measure the effectiveness of risk mitigation.", + "checks": [] + }, + { + "title": "SEC01-BP04 Keep up-to-date with security threats", + "description": "To help you define and implement appropriate controls, recognize attack vectors by staying up to date with the latest security threats. Consume AWS Managed Services to make it easier to receive notification of unexpected or unusual behavior in your AWS accounts. Investigate using AWS Partner tools or third-party threat information feeds as part of your security information flow. The Common Vulnerabilities and Exposures (CVE) List list contains publicly disclosed cyber security vulnerabilities that you can use to stay up to date.", + "checks": [] + }, + { + "title": "SEC01-BP05 Identify and validate control objectives", + "description": "To help you define and implement appropriate controls, recognize attack vectors by staying up to date with the latest security threats. Consume AWS Managed Services to make it easier to receive notification of unexpected or unusual behavior in your AWS accounts. Investigate using AWS Partner tools or third-party threat information feeds as part of your security information flow. The Common Vulnerabilities and Exposures (CVE) List list contains publicly disclosed cyber security vulnerabilities that you can use to stay up to date.", + "checks": [] + }, + { + "title": "SEC01-BP06 Automate testing and validation of security controls in pipelines", + "description": "Establish secure baselines and templates for security mechanisms that are tested and validated as part of your build, pipelines, and processes. Use tools and automation to test and validate all security controls continuously.", + "checks": [ + "aws_ec2_instance_profile_attached", + "aws_ec2_instance_managed_by_ssm", + "aws_ecr_image_scan_on_push" + ] + }, + { + "title": "SEC01-BP07 Identify threats and prioritize mitigations using a threat model", + "description": "Perform threat modeling to identify and maintain an up-to-date register of potential threats and associated mitigations for your workload. Prioritize your threats and adapt your security control mitigations to prevent, detect, and respond. Revisit and maintain this in the context of your workload, and the evolving security landscape.", + "checks": [] + }, + { + "title": "SEC01-BP08 Evaluate and implement new security services and features regularly", + "description": "Evaluate and implement security services and features from AWS and AWS Partners that allow you to evolve the security posture of your workload. The AWS Security Blog highlights new AWS services and features, implementation guides, and general security guidance.", + "checks": [ + "aws_config_enabled_in_all_regions", + "aws_config_remediation_enabled" + ] + } + ] + }, + { + "title": "SEC02 How do you manage identities for people and machines?", + "description": "There are two types of identities you need to manage when approaching operating secure AWS workloads. Understanding the type of identity you need to manage and grant access helps you ensure the right identities have access to the right resources under the right conditions. Human Identities: Your administrators, developers, operators, and end users require an identity to access your AWS environments and applications. These are members of your organization, or external users with whom you collaborate, and who interact with your AWS resources via a web browser, client application, or interactive command-line tools. Machine Identities: Your service applications, operational tools, and workloads require an identity to make requests to AWS services - for example, to read data. These identities include machines running in your AWS environment such as Amazon EC2 instances or AWS Lambda functions. You may also manage machine identities for external parties who need access. Additionally, you may also have machines outside of AWS that need access to your AWS environment.", + "children": [ + { + "title": "SEC02-BP01 Use strong sign-in mechanisms", + "description": "Sign-ins (authentication using sign-in credentials) can present risks when not using mechanisms like multi-factor authentication (MFA), especially in situations where sign-in credentials have been inadvertently disclosed or are easily guessed. Use strong sign-in mechanisms to reduce these risks by requiring MFA and strong password policies.", + "checks": [ + "aws_ec2_instance_profile_attached", + "aws_iam_avoid_root_usage", + "aws_iam_no_root_access_key", + "aws_iam_password_policy_lowercase", + "aws_iam_password_policy_minimum_length_14", + "aws_iam_password_policy_number", + "aws_iam_password_policy_reuse_24", + "aws_iam_password_policy_symbol", + "aws_iam_password_policy_uppercase", + "aws_iam_root_mfa_enabled", + "aws_iam_user_hardware_mfa_enabled", + "aws_iam_user_has_two_active_access_keys", + "aws_iam_user_mfa_enabled_console_access", + "aws_iam_user_uses_access_keys_console_access", + "aws_opensearch_domain_uses_cognito_authentication", + "aws_sagemaker_notebook_root_access" + ] + }, + { + "title": "SEC02-BP02 Use temporary credentials", + "description": "When doing any type of authentication, it’s best to use temporary credentials instead of long-term credentials to reduce or eliminate risks, such as credentials being inadvertently disclosed, shared, or stolen.", + "checks": [ + "aws_iam_disable_old_credentials", + "aws_iam_rotate_access_keys_after_90_days", + "aws_iam_unused_access_keys", + "aws_iam_user_has_two_active_access_keys", + "aws_secretsmanager_secret_not_used_90d", + "aws_secretsmanager_secret_not_changed_90d", + "aws_secretsmanager_secret_not_used_90d", + "aws_secretsmanager_secret_rotated_as_scheduled", + "aws_secretsmanager_secret_rotation_enabled" + ] + }, + { + "title": "SEC02-BP03 Store and use secrets securely", + "description": "A workload requires an automated capability to prove its identity to databases, resources, and third-party services. This is accomplished using secret access credentials, such as API access keys, passwords, and OAuth tokens. Using a purpose-built service to store, manage, and rotate these credentials helps reduce the likelihood that those credentials become compromised.", + "checks": [ + "aws_cloudformation_no_secrets_in_output", + "aws_ec2_no_secrets_in_instance_user_data", + "aws_ec2_no_secrets_in_launch_template_user_data", + "aws_ecs_no_secrets_in_task_definition_env", + "aws_lambda_no_secrets_in_variables", + "aws_ssm_no_secrets_in_content" + ] + }, + { + "title": "SEC02-BP04 Rely on a centralized identity provider", + "description": "For workforce identities, rely on an identity provider that enables you to manage identities in a centralized place. This makes it easier to manage access across multiple applications and services, because you are creating, managing, and revoking access from a single location. For example, if someone leaves your organization, you can revoke access for all applications and services (including AWS) from one location. This reduces the need for multiple credentials and provides an opportunity to integrate with existing human resources (HR) processes. For federation with individual AWS accounts, you can use centralized identities for AWS with a SAML 2.0-based provider with AWS Identity and Access Management. You can use any provider— whether hosted by you in AWS, external to AWS, or supplied by the AWS Partner—that is compatible with the SAML 2.0 protocol. You can use federation between your AWS account and your chosen provider to grant a user or application access to call AWS API operations by using a SAML assertion to get temporary security credentials. Web-based single sign-on is also supported, allowing users to sign in to the AWS Management Console from your sign in website. For federation to multiple accounts in your AWS Organizations, you can configure your identity source in AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center), and specify where your users and groups are stored. Once configured, your identity provider is your source of truth, and information can be synchronized using the System for Cross-domain Identity Management (SCIM) v2.0 protocol. You can then look up users or groups and grant them IAM Identity Center access to AWS accounts, cloud applications, or both. IAM Identity Center integrates with AWS Organizations, which enables you to configure your identity provider once and then grant access to existing and new accounts managed in your organization. IAM Identity Center provides you with a default store, which you can use to manage your users and groups. If you choose to use the IAM Identity Center store, create your users and groups and assign their level of access to your AWS accounts and applications, keeping in mind the best practice of least privilege. Alternatively, you can choose to Connect to Your External Identity Provider using SAML 2.0, or Connect to Your Microsoft AD Directory using AWS Directory Service. Once configured, you can sign into the AWS Management Console, or the AWS mobile app, by authenticating through your central identity provider. For managing end-users or consumers of your workloads, such as a mobile app, you can use Amazon Cognito. It provides authentication, authorization, and user management for your web and mobile apps. Your users can sign in directly with sign-in credentials, or through a third party, such as Amazon, Apple, Facebook, or Google.", + "checks": [ + ] + }, + { + "title": "SEC02-BP05 Audit and rotate credentials periodically", + "description": "Audit and rotate credentials periodically to limit how long the credentials can be used to access your resources. Long-term credentials create many risks, and these risks can be reduced by rotating long-term credentials regularly.", + "checks": [ + "aws_iam_rotate_access_keys_after_90_days", + "aws_kms_key_rotation_enabled", + "aws_secretsmanager_secret_rotation_enabled" + ] + }, + { + "title": "SEC02-BP06 Audit and rotate credentials periodically", + "description": "As the number of users you manage grows, you will need to determine ways to organize them so that you can manage them at scale. Place users with common security requirements in groups defined by your identity provider, and put mechanisms in place to ensure that user attributes that may be used for access control (for example, department or location) are correct and updated. Use these groups and attributes to control access, rather than individual users. This allows you to manage access centrally by changing a user's group membership or attributes once with a permission set, rather than updating many individual policies when a user's access needs change. You can use AWS IAM Identity Center (successor to AWS Single Sign-On) (IAM Identity Center) to manage user groups and attributes. IAM Identity Center supports most commonly used attributes whether they are entered manually during user creation or automatically provisioned using a synchronization engine, such as defined in the System for Cross-Domain Identity Management (SCIM) specification.", + "checks": [ + "aws_iam_policy_attached_only_to_group_or_roles" + ] + } + ] + }, + { + "title": "SEC03 How do you manage permissions for people and machines?", + "description": "Manage permissions to control access to people and machine identities that require access to AWS and your workload. Permissions control who can access what, and under what conditions.", + "children": [ + { + "title": "SEC03-BP01 Define access requirements", + "description": "Each component or resource of your workload needs to be accessed by administrators, end users, or other components. Have a clear definition of who or what should have access to each component, choose the appropriate identity type and method of authentication and authorization.", + "checks": [ + "aws_ec2_instance_uses_imdsv2", + "aws_ec2_instance_profile_attached", + "aws_cloudwatch_cross_account_sharing_enabled", + "aws_ecs_host_mode_not_privileged_nor_root" + ] + }, + { + "title": "SEC03-BP02 Grant least privilege access", + "description": "It's a best practice to grant only the access that identities require to perform specific actions on specific resources under specific conditions. Use group and identity attributes to dynamically set permissions at scale, rather than defining permissions for individual users. Users should only have the permissions required to do their job. Users should only be given access to production environments to perform a specific task within a limited time period, and access should be revoked once that task is complete. Permissions should be revoked when no longer needed, including when a user moves onto a different project or job function. Administrator privileges should be given only to a small group of trusted administrators. Permissions should be reviewed regularly to avoid permission creep. Machine or system accounts should be given the smallest set of permissions needed to complete their tasks.", + "checks": [ + "aws_ec2_instance_profile_attached", + "aws_ecs_writable_root_filesystem_in_ecs_tasks", + "aws_iam_group_inline_policy_no_star_star", + "aws_iam_policy_with_administrative_privileges_not_in_use", + "aws_iam_role_inline_policy_no_star_star", + "aws_iam_user_inline_policy_no_star_star", + "aws_opensearch_domain_does_not_use_internal_user_database" + ] + }, + { + "title": "SEC03-BP03 Establish emergency access process", + "description": "A process that allows emergency access to your workload in the unlikely event of an automated process or pipeline issue. This will help you rely on least privilege access, but ensure users can obtain the right level of access when they require it. For example, establish a process for administrators to verify and approve their request, such as an emergency AWS cross-account role for access, or a specific process for administrators to follow to validate and approve an emergency request.", + "checks": [ + "aws_iam_account_maintain_current_contact_details", + "aws_iam_account_security_contact_information_is_registered", + "aws_iam_account_security_questions_are_registered_in_the_aws_account" + ] + }, + { + "title": "SEC03-BP04 Reduce permissions continuously", + "description": "As your teams determine what access is required, remove unneeded permissions and establish review processes to achieve least privilege permissions. Continually monitor and remove unused identities and permissions for both human and machine access. Permission policies should adhere to the least privilege principle. As job duties and roles become better defined, your permission policies need to be reviewed to remove unnecessary permissions. This approach lessens the scope of impact should credentials be inadvertently exposed or otherwise accessed without authorization.", + "checks": [ + "aws_iam_policy_with_administrative_privileges_not_in_use" + ] + }, + { + "title": "SEC03-BP05 Define permission guardrails for your organization", + "description": "Establish common controls that restrict access to all identities in your organization. For example, you can restrict access to specific AWS Regions, or prevent your operators from deleting common resources, such as an IAM role used for your central security team.", + "checks": [ + "aws_iam_disable_old_credentials", + "aws_iam_unused_access_keys", + "aws_organizations_account_part_of_organizations" + ] + }, + { + "title": "SEC03-BP06 Manage access based on lifecycle", + "description": "Integrate access controls with operator and application lifecycle and your centralized federation provider. For example, remove a user's access when they leave the organization or change roles. AWS RAM, access to shared resources is automatically granted or revoked as accounts are moved in and out of the Organization or Organization Unit with which they are shared. This helps ensure that resources are only shared with the accounts that you intend.", + "checks": [ + "aws_cloudwatch_log_group_retention_days_at_least_365", + "aws_dms_public_ip_address", + "aws_ec2_unused_elastic_ip", + "aws_ecr_repository_lifecycle_policy_enabled", + "aws_elb_alb_has_no_listeners", + "aws_elb_elb_has_no_listeners", + "aws_iam_password_policy_expire_90", + "aws_iam_rotate_access_keys_after_90_days", + "aws_iam_unused_access_keys" + ] + }, + { + "title": "SEC03-BP07 Analyze public and cross-account access", + "description": "Continually monitor findings that highlight public and cross-account access. Reduce public access and cross-account access to only the specific resources that require this access. Know which of your AWS resources are shared and with whom. Continually monitor and audit your shared resources to verify they are shared with only authorized principals.", + "checks": [ + "aws_autoscaling_launch_template_public_ip_disabled", + "aws_cloudtrail_logs_s3_bucket_is_not_publicly_accessible", + "aws_dms_public_ip_address", + "aws_ec2_image_public", + "aws_ec2_instance_in_vpc", + "aws_ec2_public_ip_address", + "aws_ec2_snapshot_encrypted", + "aws_ec2_volume_not_encrypted", + "aws_ec2_wide_open_ipv4_security_group", + "aws_ecr_repository_prohibit_public_access", + "aws_efs_not_publicly_accessible", + "aws_eks_cluster_endpoint_restrict_public_access", + "aws_glacier_vaults_policy_public_access", + "aws_kms_cmk_policy_prohibit_public_access", + "aws_lambda_cors_policy", + "aws_lambda_function_in_vpc", + "aws_lambda_function_restrict_public_access", + "aws_opensearch_domain_is_not_publicly_accessible", + "aws_redshift_cluster_publicly_accessible", + "aws_rds_no_public_access", + "aws_rds_snapshot_not_public", + "aws_sagemaker_notebook_with_direct_internet_access", + "aws_s3_account_level_public_access_blocks", + "aws_s3_bucket_policy_public_write_access", + "aws_s3_public_bucket", + "aws_secretsmanager_secret_not_used_90d", + "aws_ssm_document_is_shared" + ] + }, + { + "title": "SEC03-BP08 Share resources securely within your organization", + "description": "As the number of workloads grows, you might need to share access to resources in those workloads or provision the resources multiple times across multiple accounts. You might have constructs to compartmentalize your environment, such as having development, testing, and production environments. However, having separation constructs does not limit you from being able to share securely. By sharing components that overlap, you can reduce operational overhead and allow for a consistent experience without guessing what you might have missed while creating the same resource multiple times.", + "checks": [ + "aws_dms_public_ip_address", + "aws_ec2_instance_in_vpc", + "aws_lambda_function_in_vpc", + "aws_lambda_publicly_accessible_permissions", + "aws_opensearch_domain_is_not_publicly_accessible", + "aws_sagemaker_notebook_with_direct_internet_access", + "aws_secretsmanager_secret_not_used_90d", + "aws_ssm_no_secrets_in_content" + ] + } + ] + }, + { + "title": "SEC04 How do you detect and investigate security events?", + "description": "Capture and analyze events from logs and metrics to gain visibility. Take action on security events and potential threats to help secure your workload.", + "children": [ + { + "title": "SEC04-BP01 Configure service and application logging", + "description": "Retain security event logs from services and applications. This is a fundamental principle of security for audit, investigations, and operational use cases, and a common security requirement driven by governance, risk, and compliance (GRC) standards, policies, and procedures.An organization should be able to reliably and consistently retrieve security event logs from AWS services and applications in a timely manner when required to fulfill an internal process or obligation, such as a security incident response. Consider centralizing logs for better operational results.", + "checks": [ + "aws_apigateway_logging_enabled", + "aws_cloudfront_distribution_logging_enabled", + "aws_cloudtrail_log_file_validation_enabled", + "aws_cloudtrail_no_logging_enabled", + "aws_cloudtrail_lambda_logging_enabled", + "aws_cloudtrail_s3_data_events_read_enabled", + "aws_cloudtrail_s3_data_events_write_enabled", + "aws_acm_certificate_transparency_logging_enabled", + "aws_ec2_vpc_flow_logs_enabled", + "aws_eks_cluster_control_plane_audit_logging_enabled", + "aws_elb_logging_enabled", + "aws_opensearch_audit_logs_enabled", + "aws_rds_db_instance_cloudwatch_logs_enabled", + "aws_redshift_cluster_audit_logging_enabled", + "aws_route53_zone_query_logging_enabled", + "aws_s3_bucket_object_logging_enabled" + ] + }, + { + "title": "SEC04-BP02 Analyze logs, findings, and metrics centrally", + "description": "Security operations teams rely on the collection of logs and the use of search tools to discover potential events of interest, which might indicate unauthorized activity or unintentional change. However, simply analyzing collected data and manually processing information is insufficient to keep up with the volume of information flowing from complex architectures. Analysis and reporting alone don’t facilitate the assignment of the right resources to work an event in a timely fashion.", + "checks": [ + "aws_cloudtrail_logging_in_all_regions_enabled", + "aws_rds_db_instance_cloudwatch_logs_enabled", + "aws_ec2_vpc_flow_logs_enabled", + "aws_wafv2_web_acl_logging_enabled", + "aws_cloudtrail_logging_in_all_regions_enabled", + "aws_redshift_cluster_audit_logging_enabled", + "aws_config_enabled_in_all_regions" + ] + }, + { + "title": "SEC04-BP03 Automate response to events", + "description": "Using automation to investigate and remediate events reduces human effort and error, and allows you to scale investigation capabilities. Regular reviews will help you tune automation tools, and continuously iterate. In AWS, investigating events of interest and information on potentially unexpected changes into an automated workflow can be achieved using Amazon EventBridge. This service provides a scalable rules engine designed to broker both native AWS event formats (such as AWS CloudTrail events), as well as custom events you can generate from your application. Amazon GuardDuty also allows you to route events to a workflow system for those building incident response systems (AWS Step Functions), or to a central Security Account, or to a bucket for further analysis.", + "checks": [ + "aws_cloudtrail_logging_in_all_regions_enabled", + "aws_cloudtrail_logging_in_all_regions_enabled", + "aws_ec2_vpc_flow_logs_enabled", + "aws_elb_logging_enabled", + "aws_rds_db_instance_cloudwatch_logs_enabled", + "aws_redshift_cluster_audit_logging_enabled", + "aws_wafv2_web_acl_logging_enabled" + ] + }, + { + "title": "SEC04-BP04 Implement actionable security events", + "description": "Create alerts that are sent to and can be actioned by your team. Ensure that alerts include relevant information for the team to take action. For each detective mechanism you have, you should also have a process, in the form of a runbook or playbook, to investigate. For example, when you enable Amazon GuardDuty, it generates different findings. You should have a runbook entry for each finding type, for example, if a trojan is discovered, your runbook has simple instructions that instruct someone to investigate and remediate.", + "checks": [] + } + ] + }, + { + "title": "SEC05 How do you protect your network resources?", + "description": "Any workload that has some form of network connectivity, whether it's the internet or a private network, requires multiple layers of defense to help protect from external and internal network-based threats.", + "children": [ + { + "title": "SEC05-BP01 Create network layers", + "description": "Group components that share sensitivity requirements into layers to minimize the potential scope of impact of unauthorized access. For example, a database cluster in a virtual private cloud (VPC) with no need for internet access should be placed in subnets with no route to or from the internet. Traffic should only flow from the adjacent next least sensitive resource. Consider a web application sitting behind a load balancer. Your database should not be accessible directly from the load balancer. Only the business logic or web server should have direct access to your database.", + "checks": [ + "aws_apigateway_waf_acl_attached", + "aws_cloudfront_distribution_waf_enabled", + "aws_ec2_instance_in_vpc", + "aws_eks_cluster_endpoint_restrict_public_access", + "aws_lambda_publicly_accessible_permissions", + "aws_opensearch_domain_is_not_publicly_accessible", + "aws_sagemaker_model_isolation_enabled", + "aws_sagemaker_model_vpc_settings_enabled", + "aws_sagemaker_notebook_vpc_settings_enabled", + "aws_sagemaker_training_job_vpc_settings_enabled", + "aws_elb_waf_enabled" + ] + }, + { + "title": "SEC05-BP02 Control traffic at all layers", + "description": "When architecting your network topology, you should examine the connectivity requirements of each component. For example, if a component requires internet accessibility (inbound and outbound), connectivity to VPCs, edge services, and external data centers. A VPC allows you to define your network topology that spans an AWS Region with a private IPv4 address range that you set, or an IPv6 address range AWS selects. You should apply multiple controls with a defense in depth approach for both inbound and outbound traffic, including the use of security groups (stateful inspection firewall), Network ACLs, subnets, and route tables. Within a VPC, you can create subnets in an Availability Zone. Each subnet can have an associated route table that defines routing rules for managing the paths that traffic takes within the subnet. You can define an internet routable subnet by having a route that goes to an internet or NAT gateway attached to the VPC, or through another VPC.", + "checks": [ + "aws_apigateway_authorizers_enabled", + "aws_dms_public_ip_address", + "aws_ec2_default_restrict_traffic", + "aws_ec2_public_ip_address", + "aws_ec2_snapshot_encrypted", + "aws_ec2_subnet_auto_assign_public_ip_disabled", + "aws_s3_account_level_public_access_blocks", + "aws_s3_bucket_no_mfa_delete", + "aws_sagemaker_notebook_with_direct_internet_access" + ] + }, + { + "title": "SEC05-BP03 Automate network protection", + "description": "Automate protection mechanisms to provide a self-defending network based on threat intelligence and anomaly detection. For example, intrusion detection and prevention tools that can adapt to current threats and reduce their impact. A web application firewall is an example of where you can automate network protection, for example, by using the AWS WAF Security Automations solution to automatically block requests originating from IP addresses associated with known threat actors.", + "checks": [ + "aws_ec2_allow_ingress_from_internet_to_any_port", + "aws_ec2_allow_ingress_from_internet_to_port_mongodb_27017_27018", + "aws_ec2_allow_ingress_from_internet_to_ssh_port_22", + "aws_ec2_allow_ingress_from_internet_to_ftp_port_20_21", + "aws_ec2_allow_ingress_from_internet_to_rdp_port_3389", + "aws_ec2_allow_ingress_from_internet_to_port_mongodb_27017_27018", + "aws_ec2_allow_ingress_from_internet_to_cassandra_ports", + "aws_ec2_allow_ingress_from_internet_to_kibana_ports", + "aws_ec2_allow_ingress_from_internet_to_kafka_ports", + "aws_ec2_allow_ingress_from_internet_to_memcached_ports", + "aws_ec2_allow_ingress_from_internet_to_mysql_ports", + "aws_ec2_allow_ingress_from_internet_to_oracledb", + "aws_ec2_allow_ingress_from_internet_to_postgresql_ports", + "aws_ec2_allow_ingress_from_internet_to_redis_ports", + "aws_ec2_allow_ingress_from_internet_to_sql_server_ports", + "aws_ec2_allow_ingress_from_internet_to_telnet_ports", + "aws_ec2_default_restrict_traffic", + "aws_dms_public_ip_address", + "aws_ec2_subnet_auto_assign_public_ip_disabled", + "aws_elb_waf_enabled", + "aws_apigateway_waf_acl_attached" + ] + }, + { + "title": "SEC05-BP04 Implement inspection and protection", + "description": "Inspect and filter your traffic at each layer. You can inspect your VPC configurations for potential unintended access using VPC Network Access Analyzer. You can specify your network access requirements and identify potential network paths that do not meet them. For components transacting over HTTP-based protocols, a web application firewall can help protect from common attacks. AWS WAF is a web application firewall that lets you monitor and block HTTP(s) requests that match your configurable rules that are forwarded to an Amazon API Gateway API, Amazon CloudFront, or an Application Load Balancer. To get started with AWS WAF, you can use AWS Managed Rules in combination with your own, or use existing partner integrations.", + "checks": [ + "aws_iam_guardduty_enabled", + "aws_ec2_vpc_flow_logs_enabled", + "aws_apigateway_authorizers_enabled" + ] + } + ] + }, + { + "title": "SEC06 How do you protect your compute resources?", + "description": "Compute resources in your workload require multiple layers of defense to help protect from external and internal threats. Compute resources include EC2 instances, containers, AWS Lambda functions, database services, IoT devices, and more.", + "children": [ + { + "title": "SEC06-BP01 Perform vulnerability management", + "description": "Frequently scan and patch for vulnerabilities in your code, dependencies, and in your infrastructure to help protect against new threats. Create and maintain a vulnerability management program. Regularly scan and patch resources such as Amazon EC2 instances, Amazon Elastic Container Service (Amazon ECS) containers, and Amazon Elastic Kubernetes Service (Amazon EKS) workloads. Configure maintenance windows for AWS managed resources, such as Amazon Relational Database Service (Amazon RDS) databases. Use static code scanning to inspect application source code for common issues. Consider web application penetration testing if your organization has the requisite skills or can hire outside assistance.", + "checks": [ + "aws_cloudtrail_log_file_validation_enabled", + "aws_cloudtrail_logging_in_all_regions_enabled", + "aws_ec2_instance_uses_imdsv2", + "aws_ec2_internet_facing_with_instance_profile", + "aws_opensearch_update_available", + "aws_rds_db_instance_automatic_minor_version_upgrade_enabled", + "aws_redshift_version_upgrade_enabled", + "aws_ssm_managed_instance_compliance_patch_compliant", + "aws_ssm_resource_non_compliant" + ] + }, + { + "title": "SEC06-BP02 Reduce attack surface", + "description": "Reduce your exposure to unintended access by hardening operating systems and minimizing the components, libraries, and externally consumable services in use. Start by reducing unused components, whether they are operating system packages or applications, for Amazon Elastic Compute Cloud (Amazon EC2)-based workloads, or external software modules in your code, for all workloads. You can find many hardening and security configuration guides for common operating systems and server software. For example, you can start with the Center for Internet Security and iterate.", + "checks": [ + "aws_ecr_image_scan_on_push", + "aws_ecs_ecs_cluster_container_insights_enabled", + "aws_ecs_ecs_service_fargate_using_latest_platform_version", + "aws_lambda_function_in_vpc", + "aws_lambda_publicly_accessible_permissions" + ] + }, + { + "title": "SEC06-BP03 Implement managed services", + "description": "Implement services that manage resources, such as Amazon Relational Database Service (Amazon RDS), AWS Lambda, and Amazon Elastic Container Service (Amazon ECS), to reduce your security maintenance tasks as part of the shared responsibility model. For example, Amazon RDS helps you set up, operate, and scale a relational database, automates administration tasks such as hardware provisioning, database setup, patching, and backups. This means you have more free time to focus on securing your application in other ways described in the AWS Well-Architected Framework. Lambda lets you run code without provisioning or managing servers, so you only need to focus on the connectivity, invocation, and security at the code level–not the infrastructure or operating system.", + "checks": [] + }, + { + "title": "SEC06-BP04 Automate compute protection", + "description": "Automate your protective compute mechanisms including vulnerability management, reduction in attack surface, and management of resources. The automation will help you invest time in securing other aspects of your workload, and reduce the risk of human error.", + "checks": [ + "aws_ec2_instance_profile_attached", + "aws_ec2_instance_stopped_since_30d", + "aws_ec2_internet_facing_with_instance_profile", + "aws_ec2_instance_managed_by_ssm" + ] + }, + { + "title": "SEC06-BP05 Enable people to perform actions at a distance", + "description": "Removing the ability for interactive access reduces the risk of human error, and the potential for manual configuration or management. For example, use a change management workflow to deploy Amazon Elastic Compute Cloud (Amazon EC2) instances using infrastructure-as-code, then manage Amazon EC2 instances using tools such as AWS Systems Manager instead of allowing direct access or through a bastion host. AWS Systems Manager can automate a variety of maintenance and deployment tasks, using features including automation workflows, documents (playbooks), and the run command. AWS CloudFormation stacks build from pipelines and can automate your infrastructure deployment and management tasks without using the AWS Management Console or APIs directly.", + "checks": [ + "aws_ec2_instance_profile_attached", + "aws_ec2_instance_stopped_since_30d", + "aws_ec2_internet_facing_with_instance_profile", + "aws_ec2_instance_managed_by_ssm" + ] + }, + { + "title": "SEC06-BP06 Validate software integrity", + "description": "Implement mechanisms (for example, code signing) to validate that the software, code and libraries used in the workload are from trusted sources and have not been tampered with. For example, you should verify the code signing certificate of binaries and scripts to confirm the author, and ensure it has not been tampered with since created by the author. AWS Signer can help ensure the trust and integrity of your code by centrally managing the code- signing lifecycle, including signing certification and public and private keys. You can learn how to use advanced patterns and best practices for code signing with AWS Lambda. Additionally, a checksum of software that you download, compared to that of the checksum from the provider, can help ensure it has not been tampered with.", + "checks": [ + "aws_ec2_ebs_volume_unused", + "aws_ssm_managed_instance_compliance_patch_compliant", + "aws_ec2_instance_managed_by_ssm", + "aws_cloudtrail_log_file_validation_enabled" + ] + } + ] + }, + { + "title": "SEC08 How do you protect your data at rest?", + "description": "Protect your data at rest by implementing multiple controls, to reduce the risk of unauthorized access or mishandling.", + "children": [ + { + "title": "SEC08-BP01 Implement secure key management", + "description": "By defining an encryption approach that includes the storage, rotation, and access control of keys, you can help provide protection for your content against unauthorized users and against unnecessary exposure to authorized users. AWS Key Management Service (AWS KMS) helps you manage encryption keys and integrates with many AWS services. This service provides durable, secure, and redundant storage for your AWS KMS keys. You can define your key aliases as well as key-level policies. The policies help you define key administrators as well as key users. Additionally, AWS CloudHSM is a cloud-based hardware security module (HSM) that allows you to easily generate and use your own encryption keys in the AWS Cloud. It helps you meet corporate, contractual, and regulatory compliance requirements for data security by using FIPS 140-2 Level 3 validated HSMs.", + "checks": [ + "aws_kms_key_not_pending_deletion" + ] + }, + { + "title": "SEC08-BP02 Enforce encryption at rest", + "description": "You should enforce the use of encryption for data at rest. Encryption maintains the confidentiality of sensitive data in the event of unauthorized access or accidental disclosure. Private data should be encrypted by default when at rest. Encryption helps maintain confidentiality of the data and provides an additional layer of protection against intentional or inadvertent data disclosure or exfiltration. Data that is encrypted cannot be read or accessed without first unencrypting the data. Any data stored unencrypted should be inventoried and controlled.", + "checks": [ + "aws_cloudtrail_uses_encryption_at_rest", + "aws_cloudwatch_log_group_encryption_at_rest_enabled", + "aws_dynamodb_table_kms_encryption_enabled", + "aws_ec2_volume_not_encrypted", + "aws_efs_storage_encrypted", + "aws_eks_cluster_encryption_enabled", + "aws_opensearch_encryption_at_rest_enabled", + "aws_rds_snapshot_not_public", + "aws_rds_storage_encrypted", + "aws_sagemaker_notebook_encryption_enabled", + "aws_sagemaker_training_job_encryption_enabled", + "aws_sagemaker_training_job_volume_encryption_enabled", + "aws_sqs_server_side_encryption_enabled" + ] + }, + { + "title": "SEC08-BP03 Automate data at rest protection", + "description": "Use automated tools to validate and enforce data at rest controls continuously, for example, verify that there are only encrypted storage resources. You can automate validation that all EBS volumes are encrypted using AWS Config Rules. AWS Security Hub can also verify several different controls through automated checks against security standards. Additionally, your AWS Config Rules can automatically remediate noncompliant resources.", + "checks": [ + "aws_s3_bucket_default_encryption", + "aws_sagemaker_notebook_encryption_enabled" + ] + }, + { + "title": "SEC08-BP04 Enforce access control", + "description": "To help protect your data at rest, enforce access control using mechanisms, such as isolation and versioning, and apply the principle of least privilege. Prevent the granting of public access to your data. Verify that only authorized users can access data on a need-to-know basis. Protect your data with regular backups and versioning to prevent against intentional or inadvertent modification or deletion of data. Isolate critical data from other data to protect its confidentiality and data integrity.", + "checks": [ + "aws_s3_bucket_no_mfa_delete", + "aws_sns_encryption_at_rest_enabled", + "aws_organizations_account_part_of_organizations" + ] + } + ] + }, + { + "title": "SEC09 How do you protect your data in transit?", + "description": "Protect your data in transit by implementing multiple controls to reduce the risk of unauthorized access or loss.", + "children": [ + { + "title": "SEC09-BP01 Implement secure key and certificate management", + "description": "Store encryption keys and certificates securely and rotate them at appropriate time intervals with strict access control. The best way to accomplish this is to use a managed service, such as AWS Certificate Manager (ACM). It lets you easily provision, manage, and deploy public and private Transport Layer Security (TLS) certificates for use with AWS services and your internal connected resources. TLS certificates are used to secure network communications and establish the identity of websites over the internet as well as resources on private networks. ACM integrates with AWS resources, such as Elastic Load Balancers (ELBs), AWS distributions, and APIs on API Gateway, also handling automatic certificate renewals. If you use ACM to deploy a private root CA, both certificates and private keys can be provided by it for use in Amazon Elastic Compute Cloud (Amazon EC2) instances, containers, and so on.", + "checks": [ + "aws_elb_uses_ssl_certificate" + ] + }, + { + "title": "SEC09-BP02 Enforce encryption in transit", + "description": "Enforce your defined encryption requirements based on your organization's policies, regulatory obligations and standards to help meet organizational, legal, and compliance requirements. Only use protocols with encryption when transmitting sensitive data outside of your virtual private cloud (VPC). Encryption helps maintain data confidentiality even when the data transits untrusted networks. All data should be encrypted in transit using secure TLS protocols and cipher suites. Network traffic between your resources and the internet must be encrypted to mitigate unauthorized access to the data. Network traffic solely within your internal AWS environment should be encrypted using TLS wherever possible.", + "checks": [ + "aws_cloudtrail_uses_encryption_at_rest", + "aws_s3_bucket_default_encryption", + "aws_apigateway_client_certificate_enabled", + "aws_rds_storage_encrypted", + "aws_elb_uses_ssl_certificate", + "aws_s3_bucket_secure_transport_policy" + ] + }, + { + "title": "SEC09-BP03 Automate detection of unintended data access", + "description": "Use tools such as Amazon GuardDuty to automatically detect suspicious activity or attempts to move data outside of defined boundaries. For example, GuardDuty can detect Amazon Simple Storage Service (Amazon S3) read activity that is unusual with the Exfiltration:S3/AnomalousBehavior finding. In addition to GuardDuty, Amazon VPC Flow Logs, which capture network traffic information, can be used with Amazon EventBridge to detect connections, both successful and denied. Amazon S3 Access Analyzer can help assess what data is accessible to who in your Amazon S3 buckets.", + "checks": [ + "aws_iam_guardduty_enabled" + ] + }, + { + "title": "SEC09-BP04 Authenticate network communications", + "description": "Verify the identity of communications by using protocols that support authentication, such as Transport Layer Security (TLS) or IPsec. Using network protocols that support authentication, allows for trust to be established between the parties. This adds to the encryption used in the protocol to reduce the risk of communications being altered or intercepted. Common protocols that implement authentication include Transport Layer Security (TLS), which is used in many AWS services, and IPsec, which is used in AWS Virtual Private Network (AWS VPN).", + "checks": [ + "aws_elb_uses_ssl_certificate", + "aws_ec2_vpc_flow_logs_enabled" + ] + } + ] + }, + { + "title": "SEC10 How do you anticipate, respond to, and recover from incidents?", + "description": "Preparation is critical to timely and effective investigation, response to, and recovery from security incidents to help minimize disruption to your organization.", + "children": [ + { + "title": "SEC10-BP01 Identify key personnel and external resources", + "description": "Identify internal and external personnel, resources, and legal obligations that would help your organization respond to an incident. When you define your approach to incident response in the cloud, in unison with other teams (such as your legal counsel, leadership, business stakeholders, AWS Support Services, and others), you must identify key personnel, stakeholders, and relevant contacts. To reduce dependency and decrease response time, make sure that your team, specialist security teams, and responders are educated about the services that you use and have opportunities to practice hands-on.", + "checks": [ + "aws_iam_account_maintain_current_contact_details", + "aws_iam_account_security_contact_information_is_registered", + "aws_iam_account_security_questions_are_registered_in_the_aws_account", + "aws_iam_support_role_exists" + ] + }, + { + "title": "SEC10-BP02 Develop incident management plans", + "description": "Create plans to help you respond to, communicate during, and recover from an incident. For example, you can start an incident response plan with the most likely scenarios for your workload and organization. Include how you would communicate and escalate both internally and externally.", + "checks": [] + }, + { + "title": "SEC10-BP03 Prepare forensic capabilities", + "description": "It's important for your incident responders to understand when and how the forensic investigation fits into your response plan. Your organization should define what evidence is collected and what tools are used in the process. Identify and prepare forensic investigation capabilities that are suitable, including external specialists, tools, and automation. A key decision that you should make upfront is if you will collect data from a live system. Some data, such as the contents of volatile memory or active network connections, will be lost if the system is powered off or rebooted.Your response team can combine tools, such as AWS Systems Manager, Amazon EventBridge, and AWS Lambda, to automatically run forensic tools within an operating system and VPC traffic mirroring to obtain a network packet capture, to gather non-persistent evidence. Conduct other activities, such as log analysis or analyzing disk images, in a dedicated security account with customized forensic workstations and tools accessible to your responders.Routinely ship relevant logs to a data store that provides high durability and integrity. Responders should have access to those logs. AWS offers several tools that can make log investigation easier, such as Amazon Athena, Amazon OpenSearch Service (OpenSearch Service), and Amazon CloudWatch Logs Insights. Additionally, preserve evidence securely using Amazon Simple Storage Service (Amazon S3) Object Lock. This service follows the WORM (write-once- read-many) model and prevents objects from being deleted or overwritten for a defined period. As forensic investigation techniques require specialist training, you might need to engage external specialists.", + "checks": [] + }, + { + "title": "SEC10-BP04", + "description": "", + "checks": [] + }, + { + "title": "SEC10-BP05 Pre-provision access", + "description": "Verify that incident responders have the correct access pre-provisioned in AWS to reduce the time needed for investigation through to recovery.Common anti-patterns:Using the root account for incident response.Altering existing accounts.Manipulating IAM permissions directly when providing just-in-time privilege elevation.", + "checks": [] + }, + { + "title": "SEC10-BP06 Pre-deploy tools", + "description": "Ensure that security personnel have the right tools pre-deployed into AWS to reduce the time for investigation through to recovery.To automate security engineering and operations functions, you can use a comprehensive set of APIs and tools from AWS. You can fully automate identity management, network security, data protection, and monitoring capabilities and deliver them using popular software development methods that you already have in place. When you build security automation, your system can monitor, review, and initiate a response, rather than having people monitor your security position and manually react to events. An effective way to automatically provide searchable and relevant log data across AWS services to your incident responders is to enable Amazon Detective.If your incident response teams continue to respond to alerts in the same way, they risk alert fatigue. Over time, the team can become desensitized to alerts and can either make mistakes handling ordinary situations or miss unusual alerts. Automation helps avoid alert fatigue by using functions that process the repetitive and ordinary alerts, leaving humans to handle the sensitive and unique incidents. Integrating anomaly detection systems, such as Amazon GuardDuty, AWS CloudTrail Insights, and Amazon CloudWatch Anomaly Detection, can reduce the burden of common threshold-based alerts.You can improve manual processes by programmatically automating steps in the process. After you define the remediation pattern to an event, you can decompose that pattern into actionable logic, and write the code to perform that logic. Responders can then execute that code to remediate the issue. Over time, you can automate more and more steps, and ultimately automatically handle whole classes of common incidents.For tools that execute within the operating system of your Amazon Elastic Compute Cloud (Amazon EC2) instance, you should evaluate using the AWS Systems Manager Run Command, which enables you to remotely and securely administrate instances using an agent that you install on your Amazon EC2 instance operating system. It requires the Systems Manager Agent (SSM Agent), which is installed by default on many Amazon Machine Images (AMIs). Be aware, though, that once an instance has been compromised, no responses from tools or agents running on it should be considered trustworthy.", + "checks": [] + }, + { + "title": "SEC10-BP07 Run game days", + "description": "Game days, also known as simulations or exercises, are internal events that provide a structured opportunity to practice your incident management plans and procedures during a realistic scenario. These events should exercise responders using the same tools and techniques that would be used in a real-world scenario - even mimicking real-world environments. Game days are fundamentally about being prepared and iteratively improving your response capabilities. Some of the reasons you might find value in performing game day activities include:Validating readinessDeveloping confidence – learning from simulations and training staffFollowing compliance or contractual obligationsGenerating artifacts for accreditationBeing agile – incremental improvementBecoming faster and improving toolsRefining communication and escalationDeveloping comfort with the rare and the unexpectedFor these reasons, the value derived from participating in a simulation activity increases an organization's effectiveness during stressful events. Developing a simulation activity that is both realistic and beneficial can be a difficult exercise. Although testing your procedures or automation that handles well-understood events has certain advantages, it is just as valuable to participate in creative Security Incident Response Simulations (SIRS) activities to test yourself against the unexpected and continuously improve.Create custom simulations tailored to your environment, team, and tools. Find an issue and design your simulation around it. This could be something like a leaked credential, a server communicating with unwanted systems, or a misconfiguration that results in unauthorized exposure. Identify engineers who are familiar with your organization to create the scenario and another group to participate. The scenario should be realistic and challenging enough to be valuable. It should include the opportunity to get hands on with logging, notifications, escalations, and executing runbooks or automation. During the simulation, your responders should exercise their technical and organizational skills, and leaders should be involved to build their incident management skills. At the end of the simulation, celebrate the efforts of the team and look for ways to iterate, repeat, and expand into further simulations.AWS has created Incident Response Runbook templates that you can use not only to prepare your response efforts, but also as a basis for a simulation. When planning, a simulation can be broken into five phases.Evidence gathering: In this phase, a team will get alerts through various means, such as an internal ticketing system, alerts from monitoring tooling, anonymous tips, or even public news. Teams then start to review infrastructure and application logs to determine the source of the compromise. This step should also involve internal escalations and incident leadership. Once identified, teams move on to containing the incidentContain the incident: Teams will have determined there has been an incident and established the source of the compromise. Teams now should take action to contain it, for example, by disabling compromised credentials, isolating a compute resource, or revoking a role's permission.Eradicate the incident: Now that they've contained the incident, teams will work towards mitigating any vulnerabilities in applications or infrastructure configurations that were susceptible to the compromise. This could include rotating all credentials used for a workload, modifying Access Control Lists (ACLs) or changing network configurations.", + "checks": [] + } + ] + }, + { + "title": "SEC11 How do you incorporate and validate the security properties of applications throughout the design, development, and deployment lifecycle?", + "description": "Preparation is critical to timely and effective investigation, response to, and recovery from security incidents to help minimize disruption to your organization.", + "children": [ + { + "title": "SEC11-BP01 Identify key personnel and external resources", + "description": "Training people, testing using automation, understanding dependencies, and validating the security properties of tools and applications help to reduce the likelihood of security issues in production workloads.", + "checks": [ + "aws_ecr_image_scan_on_push" + ] + } + ] + } + ] + } + ] +} diff --git a/resotocore/resotocore/static/report/benchmark/aws/waf_notes.md b/resotocore/resotocore/static/report/benchmark/aws/waf_notes.md new file mode 100644 index 0000000000..429fe299f1 --- /dev/null +++ b/resotocore/resotocore/static/report/benchmark/aws/waf_notes.md @@ -0,0 +1,5 @@ + +- SEC02-BP06: check all custom permissions if they allow privilege escalation + e.g. iam:*, (iam:PassRole and ec2:RunInstances), (iam:PassRole and lambda:CreateFunction and lambda:InvokeFunction) +- SEC03-BP07: SQS queue and SNS topic not publicly accessible check is missing (policy) +- SEC04-BP04: Cloudwatch: ensure alerts for log metric filters are configured diff --git a/resotocore/resotocore/static/report/check_template.json b/resotocore/resotocore/static/report/check_template.json index d919316896..916a5b64ac 100644 --- a/resotocore/resotocore/static/report/check_template.json +++ b/resotocore/resotocore/static/report/check_template.json @@ -9,7 +9,7 @@ "cost" ], "risk": "", - "severity": "high|medium|low", + "severity": "critical|high|medium|low|info", "url": "", "detect": { "resoto": "is(resource)", @@ -18,10 +18,10 @@ }, "remediation": { "action": { - "awscli": null, - "resoto": null, - "xxx": null - }, + "aws_cli": null, + "resoto": null, + "xxx": null + }, "text": "", "url": "" }, diff --git a/resotocore/resotocore/static/report/checks/aws/aws_acm.json b/resotocore/resotocore/static/report/checks/aws/aws_acm.json new file mode 100644 index 0000000000..57c69636c0 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_acm.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "acm", + "checks": [ + { + "name": "certificate_transparency_logging_enabled", + "title": "Ensure ACM certificate transparency is enabled to enhance website security and detect unauthorized SSL/TLS certificates.", + "result_kinds": ["aws_acm_certificate"], + "categories": ["security", "compliance"], + "risk": "Without ACM certificate transparency, the risk of unauthorized SSL/TLS certificates going undetected increases, posing a threat to website and infrastructure security.", + "severity": "medium", + "detect": { + "resoto": "is(aws_acm_certificate) and type!=IMPORTED and certificate_transparency_logging!=Enabled" + }, + "remediation": { + "text": "To fix this issue, select the certificate you want to check in ACM. In the certificate details, look for the 'Certificate Transparency Logging' attribute and ensure it is enabled.", + "url": "https://docs.aws.amazon.com/acm/latest/userguide/acm-bestpractices.html#best-practices-transparency" + }, + "url": "https://docs.aws.amazon.com/acm/latest/userguide/acm-bestpractices.html#best-practices-transparency", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die ACM-Zertifikatstransparenz aktiviert ist, um die Sicherheit der Website zu verbessern und nicht autorisierte SSL/TLS-Zertifikate zu erkennen.", + "risk": "Ohne die ACM-Zertifikatstransparenz besteht ein erhöhtes Risiko, dass nicht autorisierte SSL/TLS-Zertifikate unbemerkt bleiben und eine Bedrohung für die Sicherheit der Website und der Infrastruktur darstellen.", + "remediation": "Um dieses Problem zu beheben, wählen Sie das Zertifikat aus, das Sie überprüfen möchten, in ACM aus. In den Zertifikatdetails suchen Sie nach dem Attribut 'Zertifikatstransparenzprotokollierung' und stellen Sie sicher, dass es aktiviert ist." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_apigateway.json b/resotocore/resotocore/static/report/checks/aws/aws_apigateway.json index 5d833921f5..4c4baaca2b 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_apigateway.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_apigateway.json @@ -4,66 +4,94 @@ "checks": [ { "name": "authorizers_enabled", - "title": "Check if API Gateway has configured authorizers.", - "result_kinds": ["aws_api_gateway_rest_api"], + "title": "Ensure API Gateway is Configured with Authorizers", + "result_kinds": ["aws_apigateway_rest_api"], "categories": ["security", "compliance"], - "risk": "If no authorizer is defined, anyone can use the service.", + "risk": "Without a defined authorizer, your service could be exposed to unsanctioned use. This lack of control poses a security risk and can lead to unauthorized access and misuse.", "severity": "medium", "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html", "detect": { - "resoto": "is(aws_api_gateway_rest_api) with(empty, --> is(aws_api_gateway_authorizer))" + "resoto": "is(aws_apigateway_rest_api) with(empty, --> is(aws_apigateway_authorizer))" }, "remediation": { - "text": "Add a Cognito pool or attach a Lambda function to control access to your API.", + "text": "Implement an authorizer by adding an Amazon Cognito user pool or attaching an AWS Lambda function. This will provide controlled access for users interacting with your API.", "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html" }, - "related": [], - "internal_notes": "" + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass das API Gateway mit Authorizern konfiguriert ist", + "risk": "Ohne definierten Authorizer könnte Ihr Service unautorisierten Zugriffen ausgesetzt sein. Diese mangelnde Kontrolle stellt ein Sicherheitsrisiko dar und kann zu unbefugtem Zugriff und Missbrauch führen.", + "remediation": "Implementieren Sie einen Authorizer, indem Sie einen Amazon Cognito Benutzerpool hinzufügen oder eine AWS Lambda Funktion anhängen. Dadurch wird kontrollierter Zugriff für Benutzer ermöglicht, die mit Ihrer API interagieren." + } + } }, { "name": "client_certificate_enabled", - "title": "Check if API Gateway has client certificate enabled to access your backend endpoint.", - "result_kinds": ["aws_api_gateway_stage"], + "title": "Ensure that Client Certificate is Enabled on API Gateway for Backend Endpoint Access", + "result_kinds": ["aws_apigateway_stage"], "categories": ["security", "compliance"], - "risk": "Man in the middle attacks are possible and other similar risks.", + "risk": "Without client certificate enforcement, potential 'man-in-the-middle' attacks can occur, jeopardizing data integrity and confidentiality. Unsecured API calls may also be intercepted, leading to unauthorized data access.", "severity": "medium", "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-mutual-tls.html", "detect": { - "resoto": "is(aws_api_gateway_stage) and stage_client_certificate_id==null <-[2]- is(aws_api_gateway_rest_api)" + "resoto": "is(aws_apigateway_stage) and stage_client_certificate_id==null <-[2]- is(aws_apigateway_rest_api)" }, "remediation": { - "text": "Enable client certificate. Mutual TLS is recommended and commonly used for business-to-business (B2B) applications. It iss used in standards such as Open Banking. API Gateway now provides integrated mutual TLS authentication at no additional cost.", + "text": "Enable the client certificate and implement mutual TLS for secure data transit. Mutual TLS is highly recommended for B2B applications, adhering to norms such as Open Banking. Amazon API Gateway provides integral mutual TLS authentication at no additional cost.", "url": "https://aws.amazon.com/blogs/compute/introducing-mutual-tls-authentication-for-amazon-api-gateway/" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass das Clientzertifikat für Backend-Endpunktzugriff in der API Gateway aktiviert ist", + "risk": "Ohne Durchsetzung des Clientzertifikats können potenzielle 'Man-in-the-Middle'-Angriffe auftreten, bei denen die Integrität und Vertraulichkeit von Daten gefährdet werden. Nicht gesicherte API-Aufrufe können ebenfalls abgefangen werden, was zu unbefugtem Datenzugriff führt.", + "remediation": "Aktivieren Sie das Clientzertifikat und implementieren Sie Mutual TLS für sicheren Datentransit. Mutual TLS wird für B2B-Anwendungen dringend empfohlen und entspricht Normen wie Open Banking. Amazon API Gateway bietet eine integrierte Mutual TLS-Authentifizierung ohne zusätzliche Kosten." + } } }, { "name": "logging_enabled", - "title": "Check if API Gateway has logging enabled.", - "result_kinds": ["aws_api_gateway_stage"], + "title": "Ensure API Gateway Stages Have Logging Enabled", + "result_kinds": ["aws_apigateway_stage"], "categories": ["compliance"], - "risk": "If not enabled, monitoring of service use is not possible. Real-time monitoring of API calls can be achieved by directing CloudTrail Logs to CloudWatch Logs and establishing corresponding metric filters and alarms.", + "risk": "Without enabling logging, you compromise on the visibility and traceability of your service usage, potentially missing crucial operational insights, security vulnerabilities, and debugging information.", "severity": "medium", "detect": { - "resoto": "is(aws_api_gateway_stage) and stage_method_settings!={}" + "resoto": "is(aws_apigateway_stage) and stage_method_settings!={}" }, "remediation": { - "text": "Monitoring is an important part of maintaining the reliability, availability and performance of API Gateway and your AWS solutions. You should collect monitoring data from all of the parts of your AWS solution. CloudTrail provides a record of actions taken by a user, role, or an AWS service in API Gateway. Using the information collected by CloudTrail, you can determine the request that was made to API Gateway, the IP address from which the request was made, who made the request, etc.", + "text": "To remediate, ensure you enable monitoring in all parts of your AWS solution. Particularly, ensure CloudTrail is active for logging API Gateway actions, which can offer insights like the nature of requests, originating IP address, the executor of the request, and more.", "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/security-monitoring.html" + }, + "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/logging-and-monitoring.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass API Gateway-Stufen das Logging aktiviert haben", + "risk": "Wenn das Logging nicht aktiviert ist, beeinträchtigen Sie die Sichtbarkeit und Nachverfolgbarkeit der Nutzung Ihres Dienstes. Dadurch können wesentliche betriebliche Erkenntnisse, Sicherheitsschwachstellen und Debugging-Informationen übersehen werden.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass Sie die Überwachung in allen Teilen Ihrer AWS-Lösung aktivieren. Stellen Sie insbesondere sicher, dass CloudTrail für das Protokollieren von API Gateway-Aktionen aktiviert ist. Dadurch erhalten Sie Einblicke wie die Art der Anfragen, die IP-Adresse des Ursprungs, den Ausführer der Anfrage und mehr." + } } }, { "name": "waf_acl_attached", - "title": "Check if API Gateway has a WAF ACL attached.", - "result_kinds": ["aws_api_gateway_stage"], + "title": "Ensure API Gateway Has a WAF ACL Attached.", + "result_kinds": ["aws_apigateway_stage"], "categories": ["security"], - "risk": "Potential attacks and / or abuse of service for internet reachable services.", + "risk": "Without a WAF ACL, API Gateway could be exposed to common web threats such as SQL injection and XSS attacks. These could compromise security, affect availability and performance, and consume excessive resources.", "severity": "medium", "detect": { - "resoto": "is(aws_api_gateway_stage) and stage_web_acl_arn==null" + "resoto": "is(aws_apigateway_stage) and stage_web_acl_arn==null" }, "remediation": { - "text": "Use AWS WAF to protect your API Gateway API from common web exploits, such as SQL injection and cross-site scripting (XSS) attacks. These could affect API availability and performance, compromise security or consume excessive resources.", - "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/security-monitoring.html" + "text": "Mitigate this risk by using AWS WAF to protect your API Gateway from threats. AWS WAF can block these common web attacks, safeguarding both service performance and security.", + "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-control-access-aws-waf.html" + }, + "url": "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-control-access-aws-waf.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass API Gateway einen WAF ACL angehängt hat.", + "risk": "Ohne einen WAF ACL kann API Gateway gängigen Web-Bedrohungen wie SQL-Injektionen und XSS-Angriffen ausgesetzt sein. Diese könnten die Sicherheit beeinträchtigen, die Verfügbarkeit und Leistung beeinträchtigen und übermäßige Ressourcen verbrauchen.", + "remediation": "Verringern Sie dieses Risiko, indem Sie AWS WAF verwenden, um Ihr API Gateway vor Bedrohungen zu schützen. AWS WAF kann diese gängigen Web-Angriffe blockieren und sowohl die Leistung als auch die Sicherheit des Dienstes schützen." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_autoscaling.json b/resotocore/resotocore/static/report/checks/aws/aws_autoscaling.json new file mode 100644 index 0000000000..d3f3885edd --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_autoscaling.json @@ -0,0 +1,28 @@ +{ + "provider": "aws", + "service": "autoscaling", + "checks": [ + { + "name": "launch_template_public_ip_disabled", + "title": "Ensure Auto Scaling Launch Templates Do Not Auto-Assign Public IP Addresses", + "result_kinds": ["aws_autoscaling_group"], + "categories": ["security", "compliance"], + "risk": "Auto-assigning public IP addresses in Auto Scaling launch templates can lead to unintended public exposure of EC2 instances. This increases the risk of malicious activities such as brute force attacks, data breaches, or unauthorized access, compromising instance security and data integrity.", + "severity": "medium", + "detect": { + "resoto": "is(aws_autoscaling_group) with (any, --> is(aws_ec2_launch_template) and launch_template_data.network_interfaces[*].associate_public_ip_address==true)" + }, + "remediation": { + "text": "To fix the issue, modify the Auto Scaling launch template. Set 'AssociatePublicIpAddress' to false for all network interfaces in the launch template. Additionally, ensure instances are launched in private subnets where public access is not required.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#launch-template-network" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Auto Scaling Launch-Vorlagen keine öffentlichen IP-Adressen automatisch zuweisen", + "risk": "Die automatische Zuweisung öffentlicher IP-Adressen in Auto Scaling Launch-Vorlagen kann zu unbeabsichtigter öffentlicher Zugänglichkeit von EC2-Instanzen führen. Dadurch steigt das Risiko von bösartigen Aktivitäten wie Brute-Force-Angriffen, Datenschutzverletzungen oder unberechtigtem Zugriff, was die Sicherheit und Datenintegrität der Instanz gefährdet.", + "remediation": "Um das Problem zu beheben, ändern Sie die Auto Scaling Launch-Vorlage. Setzen Sie 'AssociatePublicIpAddress' für alle Netzwerkschnittstellen in der Launch-Vorlage auf false. Stellen Sie außerdem sicher, dass Instanzen in privaten Subnetzen gestartet werden, wo kein öffentlicher Zugriff erforderlich ist." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_cloudformation.json b/resotocore/resotocore/static/report/checks/aws/aws_cloudformation.json new file mode 100644 index 0000000000..6240b374e2 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_cloudformation.json @@ -0,0 +1,28 @@ +{ + "provider": "aws", + "service": "cloudformation", + "checks": [ + { + "name": "no_secrets_in_output", + "title": "Ensure there are no secrets in CloudFormation outputs", + "result_kinds": ["aws_cloudformation_stack"], + "categories": ["security", "compliance"], + "risk": "Using secrets hardcoded into CloudFormation outputs can enable malware and bad actors to gain unauthorized access to other services, leading to potential data breaches and compromised infrastructure.", + "severity": "critical", + "detect": { + "resoto_cmd": "search is(aws_cloudformation_stack) | detect-secrets --path stack_outputs --with-secrets" + }, + "remediation": { + "text": "To fix this issue, avoid including secrets in CloudFormation outputs. Instead, use the AWS Secrets Manager service to securely store and retrieve passwords and other sensitive information.", + "url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass in den CloudFormation-Ausgaben keine Geheimnisse enthalten sind", + "risk": "Die Verwendung von in CloudFormation-Ausgaben fest codierten Geheimnissen kann Malware und bösartigen Akteuren ermöglichen, unbefugten Zugriff auf andere Dienste zu erlangen, was zu potenziellen Datenschutzverletzungen und beeinträchtigter Infrastruktur führen kann.", + "remediation": "Um dieses Problem zu beheben, vermeiden Sie die Verwendung von Geheimnissen in den CloudFormation-Ausgaben. Verwenden Sie stattdessen den AWS Secrets Manager-Dienst, um Passwörter und andere sensitive Informationen sicher zu speichern und abzurufen." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_cloudfront.json b/resotocore/resotocore/static/report/checks/aws/aws_cloudfront.json new file mode 100644 index 0000000000..0751ef5de2 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_cloudfront.json @@ -0,0 +1,53 @@ +{ + "provider": "aws", + "service": "cloudfront", + "checks": [ + { + "name": "distribution_logging_enabled", + "title": "Ensure that logging is enabled for every CloudFront distribution", + "result_kinds": ["aws_cloudfront_distribution"], + "categories": ["security", "compliance"], + "risk": "Failure to enable logging in CloudFront distributions may result in a lack of visibility into access patterns, hindering the detection and investigation of potential security threats. This can also lead to non-compliance with security best practices and regulatory requirements.", + "severity": "medium", + "detect": { + "resoto": "is(aws_cloudfront_distribution) and distribution_config.logging==null" + }, + "remediation": { + "text": "To fix this issue, you can enable logging for your CloudFront distribution either through the AWS CLI or the management console.", + "url": "https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/LoggingIngesting.html", + "action": { + "aws_cli": "aws cloudfront update-distribution --id YOUR_DISTRIBUTION_ID --logging-config '{\"enabled\":true,\"bucket\":\"YOUR_S3_BUCKET\",\"prefix\":\"LOG_PREFIX\"}'" + } + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Protokollierung für jede CloudFront-Distribution aktiviert ist", + "risk": "Das Nichtaktivieren der Protokollierung in CloudFront-Distributionen kann zu fehlender Transparenz bei Zugriffsmustern führen, was die Erkennung und Untersuchung potenzieller Sicherheitsbedrohungen behindert. Dies kann auch zu Nichteinhaltung von Sicherheitsstandards und gesetzlichen Anforderungen führen.", + "remediation": "Um dieses Problem zu beheben, können Sie die Protokollierung für Ihre CloudFront-Distribution entweder über die AWS CLI oder die Managementkonsole aktivieren." + } + } + }, + { + "name": "distribution_waf_enabled", + "title": "Ensure that a Web Application Firewall (WAF) is associated with CloudFront Distribution", + "result_kinds": ["aws_cloudfront_distribution"], + "categories": ["security", "compliance"], + "risk": "Not associating a Web Application Firewall (WAF) with CloudFront Distribution can leave your distributions vulnerable to internet attacks such as DDoS.", + "severity": "medium", + "detect": { + "resoto": "is(aws_cloudfront_distribution) and distribution_config.web_acl_id in [null, \"\"]" + }, + "remediation": { + "text": "To fix this issue, associate your CloudFront Distributions with a Web ACL. This will provide an additional layer of security.", + "url": "https://docs.aws.amazon.com/waf/latest/developerguide/cloudfront-features.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Web Application Firewall (WAF) mit CloudFront Distribution verknüpft ist", + "risk": "Wenn Sie keine Web Application Firewall (WAF) mit CloudFront Distribution verknüpfen, können Ihre Verteilungen anfällig für Internetangriffe wie DDoS sein.", + "remediation": "Um dieses Problem zu beheben, verknüpfen Sie Ihre CloudFront-Distributionen mit einer Web ACL. Dadurch wird eine zusätzliche Sicherheitsebene bereitgestellt." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_cloudtrail.json b/resotocore/resotocore/static/report/checks/aws/aws_cloudtrail.json index 6c50cc376d..96c6f12af5 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_cloudtrail.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_cloudtrail.json @@ -6,83 +6,96 @@ "name": "logging_in_all_regions_enabled", "title": "Ensure CloudTrail is enabled and set up for logging in all regions", "result_kinds": ["aws_region"], - "categories": [ - "security", - "compliance" - ], - "risk": "AWS CloudTrail is a web service that records AWS API calls for your account and delivers log files to you. The recorded information includes the identity of the API caller; the time of the API call; the source IP address of the API caller; the request parameters; and the response elements returned by the AWS service.", + "categories": ["security", "compliance"], + "risk": "If CloudTrail is not enabled and set up for logging in all regions, critical information about API calls will not be recorded and delivered. This includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service.", "severity": "high", "detect": { "resoto": "is(aws_region) with(empty, -[0:1]-> is(aws_cloud_trail) and trail_status.is_logging==true)" }, "remediation": { - "text": "Ensure there is one trail in every region with logging enabled. Consider using multi account / multi region trails for your organization.", + "text": "To fix this issue, ensure there is one trail in every region with logging enabled. Additionally, consider using multi-account/multi-region trails for your organization to enhance logging capabilities.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrailconcepts.html#cloudtrail-concepts-management-events" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass CloudTrail aktiviert ist und in allen Regionen für die Protokollierung eingerichtet ist", + "risk": "Wenn CloudTrail nicht aktiviert und für die Protokollierung in allen Regionen eingerichtet ist, werden wichtige Informationen über API-Aufrufe nicht aufgezeichnet und übermittelt. Dies umfasst die Identität des API-Aufrufers, die Zeit des API-Aufrufs, die Quell-IP-Adresse des API-Aufrufers, die Anforderungsparameter und die von dem AWS-Dienst zurückgegebenen Antwortelemente.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass in jeder Region ein Trail mit aktivierter Protokollierung vorhanden ist. Erwägen Sie außerdem die Verwendung von Multi-Konto/Multi-Region-Trails für Ihre Organisation, um die Protokollierungsfunktionen zu verbessern." + } } }, { "name": "log_file_validation_enabled", "title": "Ensure CloudTrail log file validation is enabled", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Enabling log file validation will provide additional integrity checking of CloudTrail logs.", + "categories": ["security", "compliance"], + "risk": "Enabling log file validation provides additional integrity checking of CloudTrail logs. Failure to enable log file validation increases the risk of undetected tampering or unauthorized access to sensitive logs.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_status.is_logging==true and trail_log_file_validation_enabled==false" }, "remediation": { - "text": "Ensure LogFileValidationEnabled is set to true for each trail.", - "url": "http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-filevalidation-enabling.html" + "text": "To fix the issue, ensure that the LogFileValidationEnabled parameter is set to true for each CloudTrail trail. This can be done through the AWS Management Console or by using the AWS CLI.", + "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-filevalidation-enabling.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Validierung der CloudTrail-Protokolldatei aktiviert ist", + "risk": "Das Aktivieren der Protokollvalidierung bietet zusätzliche Integritätsprüfungen für CloudTrail-Protokolle. Wenn die Protokollvalidierung nicht aktiviert ist, erhöht sich das Risiko einer unbemerkten Manipulation oder unbefugten Zugriffs auf sensible Protokolle.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass der Parameter LogFileValidationEnabled für jeden CloudTrail-Trail auf true gesetzt ist. Dies kann über die AWS Management Console oder mithilfe der AWS CLI erfolgen." + } } }, { "name": "logs_s3_bucket_is_not_publicly_accessible", - "title": "Ensure the S3 bucket CloudTrail logs is not publicly accessible", + "title": "Ensure that the S3 bucket for CloudTrail logs is not publicly accessible", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected accounts use or configuration.", + "categories": ["security", "compliance"], + "risk": "Allowing public access to CloudTrail log content may expose sensitive information and provide potential attackers with valuable insights into the account's vulnerabilities and configuration weaknesses.", "severity": "critical", "detect": { "resoto": "is(aws_cloud_trail) and trail_status.is_logging==true --> is(aws_s3_bucket) and bucket_public_access_block_configuration.{block_public_acls!=true or ignore_public_acls!=true or block_public_policy!=true or restrict_public_buckets!=true} or bucket_acl.grants[*].{permission in [READ, READ_ACP] and grantee.uri==\"http://acs.amazonaws.com/groups/global/AllUsers\"}" }, "remediation": { - "text": "Analyze Bucket policy to validate appropriate permissions. Ensure the AllUsers principal is not granted privileges. Ensure the AuthenticatedUsers principal is not granted privileges.", + "text": "To fix the issue, carefully analyze the Bucket policy and ensure that appropriate permissions are set. Specifically, make sure that the AllUsers principal and the AuthenticatedUsers principal are not granted any privileges.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der S3-Bucket für CloudTrail-Logs nicht öffentlich zugänglich ist", + "risk": "Die Zulassung öffentlichen Zugriffs auf den Inhalt von CloudTrail-Logs kann sensible Informationen offenlegen und potenziellen Angreifern wertvolle Einblicke in die Schwachstellen und Konfigurationsprobleme des Kontos ermöglichen.", + "remediation": "Um das Problem zu beheben, analysieren Sie die Bucket-Richtlinie sorgfältig und stellen Sie sicher, dass die entsprechenden Berechtigungen festgelegt sind. Stellen Sie insbesondere sicher, dass dem AllUsers-Principal und dem AuthenticatedUsers-Principal keine Privilegien gewährt werden." + } } }, { "name": "no_logging_enabled", "title": "Ensure CloudTrail has logging enabled", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user; API; resource; and IP address; and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.", + "categories": ["security", "compliance"], + "risk": "Not enabling logging for CloudTrail prevents real-time and historic activity logging based on user, API, resource, and IP address. This also eliminates the opportunity to establish alarms and notifications for anomalous or sensitive account activity.", "severity": "low", "detect": { "resoto": "is(aws_cloud_trail) and trail_status.is_logging==false" }, "remediation": { - "text": "Validate that the trails in CloudTrail has an arn set in the CloudWatchLogsLogGroupArn property.", + "text": "Ensure the trails in CloudTrail have the 'CloudWatchLogsLogGroupArn' property set to a valid ARN.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass CloudTrail die Protokollierung aktiviert hat", + "risk": "Die Nichtaktivierung der Protokollierung für CloudTrail verhindert das protokollieren von Echtzeit- und historischen Aktivitäten basierend auf Benutzer, API, Ressourcen und IP-Adresse. Dadurch entfällt auch die Möglichkeit, Alarme und Benachrichtigungen für anomale oder sensible Kontobewegungen einzurichten.", + "remediation": "Stellen Sie sicher, dass die Trails in CloudTrail die Eigenschaft 'CloudWatchLogsLogGroupArn' auf einen gültigen ARN festgelegt haben." + } } }, { "name": "no_recent_log_event", - "title": "Ensure CloudTrail has log events in the configured duration", + "title": "Ensure CloudTrail has recent log events", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user; API; resource; and IP address; and provides opportunity to establish alarms and notifications for anomalous or sensitivity account activity.", + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, there will be no log events in CloudTrail for the configured duration, which hinders real-time and historic activity logging and prevents the establishment of alarms and notifications for anomalous or sensitive account activity.", "severity": "low", "detect": { "resoto": "is(aws_cloud_trail) and trail_status.is_logging==true and trail_status.latest_delivery_attempt_succeeded<{{last_log_event_threshold.ago}}" @@ -91,386 +104,499 @@ "last_log_event_threshold": "1d" }, "remediation": { - "text": "Validate that the trails in CloudTrail has an arn set in the CloudWatchLogsLogGroupArn property.", + "text": "To fix the issue, ensure that the trails in CloudTrail have a valid ARN set in the CloudWatchLogsLogGroupArn property.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass CloudTrail aktuelle Protokollereignisse hat", + "risk": "Wenn das Problem nicht gelöst wird, wird es keine Protokollereignisse in CloudTrail für den konfigurierten Zeitraum geben, was die Echtzeit- und historische Aktivitätsprotokollierung beeinträchtigt und die Einrichtung von Alarmen und Benachrichtigungen für anormale oder sensible Kontenaktivitäten verhindert.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass die Trails in CloudTrail eine gültige ARN im CloudWatchLogsLogGroupArn-Attribut haben." + } } }, { "name": "s3_bucket_logging_enabled", "title": "Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Server access logs can assist you in security and access audits; help you learn about your customer base; and understand your Amazon S3 bill.", + "categories": ["security", "compliance"], + "risk": "Failure to enable S3 bucket access logging on the CloudTrail S3 bucket can result in security breaches, hindered access audits, and limited understanding of Amazon S3 bill.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) --> is(aws_s3_bucket) and bucket_logging.target_bucket==null" }, "remediation": { - "text": "Ensure that S3 buckets have Logging enabled. CloudTrail data events can be used in place of S3 bucket logging. If that is the case; this finding can be considered a false positive.", + "text": "To fix this issue, ensure that S3 buckets have Logging enabled. If you are using CloudTrail data events, you can consider this finding a false positive.", "url": "https://docs.aws.amazon.com/AmazonS3/latest/dev/security-best-practices.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass das Protokollieren des Zugriffs auf S3-Buckets auf dem CloudTrail S3-Bucket aktiviert ist", + "risk": "Das Versäumnis, das Protokollieren des Zugriffs auf S3-Buckets auf dem CloudTrail S3-Bucket zu aktivieren, kann zu Sicherheitsverletzungen, eingeschränkten Zugriffsprüfungen und begrenztem Verständnis der Amazon S3-Rechnung führen.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass S3-Buckets die Protokollierung aktiviert haben. Wenn Sie CloudTrail-Datenvorgänge verwenden, können Sie diesen Befund als Fehlalarm betrachten." + } } }, { "name": "uses_encryption_at_rest", - "title": "Ensure CloudTrail logs are encrypted at rest using KMS CMKs", + "title": "Ensure CloudTrail logs are encrypted at rest using AWS KMS keys", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "By default; the log files delivered by CloudTrail to your bucket are encrypted by Amazon server-side encryption with Amazon S3-managed encryption keys (SSE-S3). To provide a security layer that is directly manageable; you can instead use server-side encryption with AWS KMS–managed keys (SSE-KMS) for your CloudTrail log files.", + "categories": ["security", "compliance"], + "risk": "By default, CloudTrail log files are encrypted using Amazon server-side encryption with Amazon S3-managed encryption keys (SSE-S3). However, using AWS KMS keys (SSE-KMS) provides a more secure and directly manageable encryption method, ensuring the confidentiality of the log files.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_kms_key_id==null" }, "remediation": { - "text": "This approach has the following advantages: You can create and manage the CMK encryption keys yourself. You can use a single CMK to encrypt and decrypt log files for multiple accounts across all regions. You have control over who can use your key for encrypting and decrypting CloudTrail log files. You can assign permissions for the key to the users. You have enhanced security.", + "text": "To fix this issue, you should enable server-side encryption with AWS KMS (SSE-KMS) for your CloudTrail log files. This allows you to create and manage your own encryption keys, assign permissions, and provides enhanced security. Follow the instructions in the AWS documentation to enable SSE-KMS for your CloudTrail logs.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass CloudTrail-Protokolle bei Ruheverschlüsselung mit AWS KMS-Schlüsseln verschlüsselt werden", + "risk": "Standardmäßig werden CloudTrail-Protokolldateien mit Amazon-Server-seitiger Verschlüsselung und S3-verwalteten Verschlüsselungsschlüsseln (SSE-S3) verschlüsselt. Die Verwendung von AWS KMS-Schlüsseln (SSE-KMS) bietet jedoch eine sicherere und direkt verwaltbare Verschlüsselungsmethode, um die Vertraulichkeit der Protokolldateien zu gewährleisten.", + "remediation": "Um dieses Problem zu beheben, sollten Sie die serverseitige Verschlüsselung mit AWS KMS (SSE-KMS) für Ihre CloudTrail-Protokolldateien aktivieren. Dadurch können Sie Ihre eigenen Verschlüsselungsschlüssel erstellen und verwalten, Berechtigungen zuweisen und eine erhöhte Sicherheit gewährleisten. Befolgen Sie die Anweisungen in der AWS-Dokumentation, um SSE-KMS für Ihre CloudTrail-Protokolle zu aktivieren." + } } }, { "name": "s3_data_events_write_enabled", - "title": "Check all regions and make sure S3 buckets have Object-level logging for write events is enabled in CloudTrail.", + "title": "Ensure S3 buckets have Object-level logging for write events enabled in CloudTrail", "result_kinds": ["aws_region"], - "categories": [ - "security", - "compliance" - ], - "risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.", + "categories": ["security", "compliance"], + "risk": "If Object-level logging is not enabled, monitoring of service use and threat analysis is not possible.", "severity": "low", "detect": { - "resoto": "is(aws_region) with(empty, --> is(aws_cloud_trail) and trail_has_custom_event_selectors=true and trail_event_selectors[*].field_selectors.eventCategory.equals[*]=Data and trail_event_selectors[*].field_selectors.`resources.type`.equals[*]=AWS::S3::Object and trail_event_selectors[*].field_selectors.readOnly.equals[*]!=\"false\")" + "resoto": "is(aws_region) with(empty, --> is(aws_cloud_trail) and trail_has_custom_event_selectors=true and (trail_event_selectors.event_selectors[*].{read_write_type in [All, WriteOnly] and data_resources[*].type=\"AWS::S3::Object\"} or trail_event_selectors.advanced_event_selectors[*].field_selectors[*].{selector_field==\"resources.type\" and equals[*]=\"AWS::S3::Object\"}))" }, "remediation": { - "text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.", + "text": "Enable Object-level logging for write events in CloudTrail. Create an S3 lifecycle policy. Define use cases, metrics, and automated responses where applicable.", "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass S3-Buckets in CloudTrail eine protokollierte Objektebene für Schreibereignisse aktiviert haben", + "risk": "Wenn die protokollierte Objektebene nicht aktiviert ist, ist eine Überwachung der Nutzung des Dienstes und eine Bedrohungsanalyse nicht möglich.", + "remediation": "Aktivieren Sie die protokollierte Objektebene für Schreibereignisse in CloudTrail. Erstellen Sie eine S3-Lifecycle-Policy. Definieren Sie Anwendungsfälle, Metriken und automatisierte Reaktionen, wo anwendbar." + } } }, { "name": "s3_data_events_read_enabled", - "title": "Check all regions and make sure S3 buckets have Object-level logging for read events is enabled in CloudTrail.", + "title": "Ensure S3 buckets have Object-level logging for read events enabled in CloudTrail", + "result_kinds": ["aws_region"], + "categories": ["security", "compliance"], + "risk": "If Object-level logging for read events is not enabled in CloudTrail, monitoring of service use and threat analysis is not possible.", + "severity": "low", + "detect": { + "resoto": "is(aws_region) with(empty, --> is(aws_cloud_trail) and trail_has_custom_event_selectors=true and (trail_event_selectors.event_selectors[*].{read_write_type in [All, ReadOnly] and data_resources[*].type=\"AWS::S3::Object\"} or trail_event_selectors.advanced_event_selectors[*].field_selectors[*].{selector_field==\"resources.type\" and equals[*]=\"AWS::S3::Object\"}))" + }, + "remediation": { + "text": "Enable Object-level logging for read events. Create an S3 lifecycle policy. Define use cases, metrics, and automated responses where applicable.", + "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass S3-Buckets in CloudTrail eine objektbezogene Protokollierung für Leseereignisse aktiviert haben", + "risk": "Wenn in CloudTrail keine objektbezogene Protokollierung für Leseereignisse aktiviert ist, ist die Überwachung der Service-Nutzung und Bedrohungsanalyse nicht möglich.", + "remediation": "Aktivieren Sie eine objektbezogene Protokollierung für Leseereignisse. Erstellen Sie eine S3-Lifecycle-Richtlinie. Definieren Sie Anwendungsfälle, Metriken und automatisierte Reaktionen, wo dies zutreffend ist." + } + } + }, + { + "name": "lambda_logging_enabled", + "title": "Ensure Lambda function invocations are logged via CloudTrail", "result_kinds": ["aws_region"], - "categories": [ - "security", - "compliance" - ], - "risk": "If logs are not enabled, monitoring of service use and threat analysis is not possible.", + "categories": ["security", "compliance"], + "risk": "If logs are not enabled, it will not be possible to monitor service use and perform threat analysis.", "severity": "low", "detect": { - "resoto": "is(aws_region) with(empty, --> is(aws_cloud_trail) and trail_has_custom_event_selectors=true and trail_event_selectors[*].field_selectors.eventCategory.equals[*]=Data and trail_event_selectors[*].field_selectors.`resources.type`.equals[*]=AWS::S3::Object and trail_event_selectors[*].field_selectors.readOnly.equals[*]!=\"true\")" + "resoto": "is(aws_region) with(empty, --> is(aws_cloud_trail) and trail_has_custom_event_selectors=true and (trail_event_selectors.event_selectors[*].{data_resources[*].type=\"AWS::Lambda::Function\"} or trail_event_selectors.advanced_event_selectors[*].field_selectors[*].{selector_field==\"resources.type\" and equals[*]=\"AWS::Lambda::Function\"}))" }, "remediation": { - "text": "Enable logs. Create an S3 lifecycle policy. Define use cases, metrics and automated responses where applicable.", + "text": "To fix this issue, enable logging and create an S3 lifecycle policy. Make sure to define use cases, metrics, and automated responses as applicable.", "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-cloudtrail-logging-for-s3.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Lambda-Funktionsaufrufe über CloudTrail protokolliert werden", + "risk": "Wenn das Protokollieren von Logs nicht aktiviert ist, können Nutzung des Dienstes und Bedrohungsanalysen nicht überwacht werden.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie das Protokollieren und erstellen Sie eine S3-Lifecycle-Richtlinie. Stellen Sie sicher, dass Anwendungsfälle, Metriken und automatisierte Reaktionen gegebenenfalls definiert sind." + } } }, { "name": "log_metric_filters_unauthorized_api_calls", - "title": "Ensure a log metric filter and alarm exist for unauthorized API calls.", + "title": "Ensure that a log metric filter and alarm are in place for unauthorized API calls", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, there is a risk of application errors going undetected and malicious activity not being detected in a timely manner.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\$\\.errorCode\\s*=\\s*\\\"\\*UnauthorizedOperation\\\".+\\$\\.errorCode\\s*=\\s*\\\"AccessDenied\\*\\\".+\\$\\.sourceIPAddress\\s*!=\\s*\\\"delivery.logs.amazonaws.com\\\".+\\$\\.eventName\\s*!=\\s*\\\"HeadBucket\\\"\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for Unauthorized API Calls.", + "text": "To fix the issue, it is recommended to establish a metric filter and alarm for unauthorized API Calls.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Log-Metriksfilter und ein Alarm für nicht autorisierte API-Aufrufe vorhanden sind", + "risk": "Wenn das Problem nicht gelöst wird, besteht das Risiko, dass Anwendungsfehler unentdeckt bleiben und bösartige Aktivitäten nicht rechtzeitig erkannt werden.", + "remediation": "Um das Problem zu beheben, wird empfohlen, einen Metriksfilter und Alarm für nicht autorisierte API-Aufrufe einzurichten." + } } }, { "name": "log_metric_filters_sign_in_without_mfa", "title": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA.", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity. It is essential to detect Management Console sign-in without MFA to prevent unauthorized access.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\(\\s*\\$\\.eventName\\s*=\\s*\\\"ConsoleLogin\\\"\\)\\s+&&\\s+\\(\\s*\\$.additionalEventData\\.MFAUsed\\s*!=\\s*\\\"Yes\\\"\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for Console sign-ins without MFA.", + "text": "It is recommended that a metric filter and alarm be established for Console sign-ins without MFA. Follow the instructions in the AWS documentation to create a metric filter and alarm.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metriksfilter und ein Alarm für die Anmeldung in der Management Console ohne MFA vorhanden sind.", + "risk": "Das Überwachen nicht autorisierter API-Aufrufe hilft dabei, Anwendungsfehler aufzudecken und die Zeit zur Erkennung bösartiger Aktivitäten zu verkürzen. Es ist unerlässlich, Anmeldungen in der Management Console ohne MFA zu erkennen, um unbefugten Zugriff zu verhindern.", + "remediation": "Es wird empfohlen, einen Metriksfilter und Alarm für Anmeldungen in der Konsole ohne MFA einzurichten. Befolgen Sie die Anweisungen in der AWS-Dokumentation, um einen Metriksfilter und Alarm zu erstellen." + } } }, { "name": "log_metric_filters_console_login_no_mfa", "title": "Ensure a log metric filter and alarm exist for Management Console sign-in without MFA.", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Unauthorized API calls can go unnoticed without monitoring, increasing the risk of application errors and delayed detection of malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\(\\s*\\$\\.eventName\\s*=\\s*\\\"ConsoleLogin\\\"\\)\\s+&&\\s+\\(\\s*\\$.additionalEventData\\.MFAUsed\\s*!=\\s*\\\"Yes\\\"\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for Management Console sign-ins without MFA.", + "text": "To ensure the security of Management Console sign-ins, establish a metric filter and alarm to detect sign-ins without MFA.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metrikfilter und Alarm für Management-Konsolen-Anmeldungen ohne MFA vorhanden sind.", + "risk": "Unberechtigte API-Aufrufe können unbemerkt bleiben, ohne Überwachung, was das Risiko von Anwendungsfehlern und verzögerter Erkennung von bösartigen Aktivitäten erhöht.", + "remediation": "Um die Sicherheit von Management-Konsolen-Anmeldungen zu gewährleisten, richten Sie einen Metrikfilter und einen Alarm ein, um Anmeldungen ohne MFA zu erkennen." + } } }, { "name": "log_metric_filters_root_account_usage", - "title": "Ensure a log metric filter and alarm exist for usage of root account.", + "title": "Ensure a log metric filter and alarm exist for usage of the root account", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Failure to monitor unauthorized API calls may result in delayed detection of application errors and malicious activity.", "severity": "medium", "detect": { - "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.userIdentity\\.type\\s*=\\s*\\\"Root\\\".+\\$\\.userIdentity\\.invokedBy NOT EXISTS.+\\$\\.eventType\\s*!=\\s*\\\"AwsServiceEvent\\\"\"))" + "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with (empty, --> is(aws_cloudwatch_log_group) with (any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.userIdentity\\.type\\s*=\\s*\\\"Root\\\".+\\$\\.userIdentity\\.invokedBy NOT EXISTS.+\\$\\.eventType\\s*!=\\s*\\\"AwsServiceEvent\\\"\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for usage of root account.", + "text": "To ensure proper monitoring of unauthorized API calls, it is highly recommended to establish a metric filter and alarm for usage of the root account.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und ein Alarm für die Nutzung des Root-Kontos vorhanden sind", + "risk": "Die fehlende Überwachung nicht autorisierter API-Aufrufe kann zu verzögerter Erkennung von Anwendungsfehlern und bösartigen Aktivitäten führen.", + "remediation": "Um eine ordnungsgemäße Überwachung nicht autorisierter API-Aufrufe sicherzustellen, wird dringend empfohlen, einen Metrikfilter und einen Alarm für die Nutzung des Root-Kontos einzurichten." + } } }, { "name": "log_metric_filters_iam_policy_changes", - "title": "Ensure a log metric filter and alarm exist for IAM policy changes.", + "title": "Ensure a log metric filter and alarm exist for IAM policy changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Failure to monitor unauthorized API calls may result in undetected application errors and delays in identifying malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*DeleteGroupPolicy.+\\$\\.eventName\\s*=\\s*DeleteRolePolicy.+\\$\\.eventName\\s*=\\s*DeleteUserPolicy.+\\$\\.eventName\\s*=\\s*PutGroupPolicy.+\\$\\.eventName\\s*=\\s*PutRolePolicy.+\\$\\.eventName\\s*=\\s*PutUserPolicy.+\\$\\.eventName\\s*=\\s*CreatePolicy.+\\$\\.eventName\\s*=\\s*DeletePolicy.+\\$\\.eventName\\s*=\\s*CreatePolicyVersion.+\\$\\.eventName\\s*=\\s*DeletePolicyVersion.+\\$\\.eventName\\s*=\\s*AttachRolePolicy.+\\$\\.eventName\\s*=\\s*DetachRolePolicy.+\\$\\.eventName\\s*=\\s*AttachUserPolicy.+\\$\\.eventName\\s*=\\s*DetachUserPolicy.+\\$\\.eventName\\s*=\\s*AttachGroupPolicy.+\\$\\.eventName\\s*=\\s*DetachGroupPolicy\\\"))\\s*\\$\\.userIdentity\\.type\\s*=\\s*\\\"Root\\\".+\\$\\.userIdentity\\.invokedBy NOT EXISTS.+\\$\\.eventType\\s*!=\\s*\\\"AwsServiceEvent\\\"\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for IAM policy changes.", + "text": "To address this issue, establish a metric filter and alarm for IAM policy changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und Alarm für IAM-Richtlinienänderungen vorhanden sind", + "risk": "Das Versäumnis, unbefugte API-Aufrufe zu überwachen, kann zu unentdeckten Anwendungsfehlern und Verzögerungen bei der Identifizierung von bösartigen Aktivitäten führen.", + "remediation": "Um dieses Problem zu beheben, richten Sie einen Metrikfilter und Alarm für IAM-Richtlinienänderungen ein." + } } }, { "name": "log_metric_filters_cloud_trail_configuration_changes", - "title": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes.", + "title": "Ensure a log metric filter and alarm exist for CloudTrail configuration changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], + "categories": ["security", "compliance"], "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*CreateTrail.+\\$\\.eventName\\s*=\\s*UpdateTrail.+\\$\\.eventName\\s*=\\s*DeleteTrail.+\\$\\.eventName\\s*=\\s*StartLogging.+\\$\\.eventName\\s*=\\s*StopLogging\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for cloudtrail configuration changes.", + "text": "It is recommended to establish a metric filter and alarm for CloudTrail configuration changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metrikfilter und ein Alarm für Änderungen an der CloudTrail-Konfiguration vorhanden sind", + "risk": "Die Überwachung nicht autorisierter API-Aufrufe hilft dabei, Anwendungsfehler aufzudecken und kann die Zeit zur Erkennung böswilliger Aktivitäten verkürzen.", + "remediation": "Es wird empfohlen, einen Metrikfilter und einen Alarm für Änderungen an der CloudTrail-Konfiguration einzurichten." + } } }, { "name": "log_metric_filters_authentication_failures", - "title": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures.", + "title": "Ensure a log metric filter and alarm exist for AWS Management Console authentication failures", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls helps reveal application errors and reduces time to detect malicious activity", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*ConsoleLogin.+\\$\\.errorMessage\\s*=\\s*\\\"Failed authentication\\\"))\\s*\\$\\.eventName\\s*=\\s*CreateTrail.+\\$\\.eventName\\s*=\\s*UpdateTrail.+\\$\\.eventName\\s*=\\s*DeleteTrail.+\\$\\.eventName\\s*=\\s*StartLogging.+\\$\\.eventName\\s*=\\s*StopLogging\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for AWS Management Console authentication failures.", + "text": "Establish a metric filter and alarm for AWS Management Console authentication failures", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metrikenfilter und ein Alarm für AWS Management Console Authentifizierungsfehler vorhanden sind", + "risk": "Die Überwachung unauthorisierter API-Aufrufe hilft, Anwendungsfehler aufzudecken und die Erkennungszeit für bösartige Aktivitäten zu reduzieren.", + "remediation": "Richten Sie einen Metrikenfilter und einen Alarm für AWS Management Console Authentifizierungsfehler ein" + } } }, { "name": "log_metric_filters_kms_key_deletion", - "title": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer created KMS CMKs.", + "title": "Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer-created KMS CMKs.", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and reduce time to detect malicious activity. This ensures that any unauthorized actions related to customer-created KMS CMKs can be detected promptly.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventSource\\s*=\\s*kms.amazonaws.com.+\\$\\.eventName\\s*=\\s*DisableKey.+\\$\\.eventName\\s*=\\s*ScheduleKeyDeletion\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for disabling or scheduled deletion of customer created KMS CMKs.", + "text": "To ensure security, it is recommended to establish a metric filter and alarm for disabling or scheduled deletion of customer-created KMS CMKs.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und ein Alarm existieren, um das Deaktivieren oder geplante Löschen von vom Kunden erstellten KMS CMKs zu überwachen.", + "risk": "Durch die Überwachung nicht autorisierter API-Aufrufe werden Anwendungsfehler aufgedeckt und die Zeit zur Erkennung bösartiger Aktivitäten reduziert. Dadurch können unbefugte Aktionen im Zusammenhang mit vom Kunden erstellten KMS CMKs rechtzeitig erkannt werden.", + "remediation": "Um die Sicherheit zu gewährleisten, wird empfohlen, einen Metrikfilter und einen Alarm für das Deaktivieren oder geplante Löschen von vom Kunden erstellten KMS CMKs einzurichten." + } } }, { "name": "log_metric_filters_s3_bucket_policy_changes", - "title": "Ensure a log metric filter and alarm exist for S3 bucket policy changes.", + "title": "Ensure a log metric filter and alarm exist for S3 bucket policy changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and reduce time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventSource\\s*=\\s*s3.amazonaws.com.+\\$\\.eventName\\s*=\\s*PutBucketAcl.+\\$\\.eventName\\s*=\\s*PutBucketPolicy.+\\$\\.eventName\\s*=\\s*PutBucketCors.+\\$\\.eventName\\s*=\\s*PutBucketLifecycle.+\\$\\.eventName\\s*=\\s*PutBucketReplication.+\\$\\.eventName\\s*=\\s*DeleteBucketPolicy.+\\$\\.eventName\\s*=\\s*DeleteBucketCors.+\\$\\.eventName\\s*=\\s*DeleteBucketLifecycle.+\\$\\.eventName\\s*=\\s*DeleteBucketReplication\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for S3 bucket policy changes.", + "text": "It is recommended to establish a metric filter and alarm for S3 bucket policy changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metrikenfilter und ein Alarm für Änderungen an S3-Bucket-Richtlinien vorhanden sind", + "risk": "Die Überwachung unberechtigter API-Aufrufe hilft dabei, Anwendungsfehler aufzudecken und die Erkennungszeit für bösartige Aktivitäten zu verkürzen.", + "remediation": "Es wird empfohlen, einen Metrikenfilter und Alarm für Änderungen an S3-Bucket-Richtlinien einzurichten." + } } }, { "name": "log_metric_filters_config_configuration_changes", "title": "Ensure a log metric filter and alarm exist for AWS Config configuration changes.", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized Configuration API calls helps reveal application errors and may reduce the time taken to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventSource\\s*=\\s*config.amazonaws.com.+\\$\\.eventName\\s*=\\s*StopConfigurationRecorder.+\\$\\.eventName\\s*=\\s*DeleteDeliveryChannel.+\\$\\.eventName\\s*=\\s*PutDeliveryChannel.+\\$\\.eventName\\s*=\\s*PutConfigurationRecorder\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for AWS Config configuration changes.", + "text": "To mitigate this issue, it is recommended to establish a metric filter and alarm for AWS Config configuration changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und ein Alarm für AWS Config Konfigurationsänderungen vorhanden sind.", + "risk": "Die Überwachung nicht autorisierter Konfigurations-API-Aufrufe hilft, Anwendungsfehler aufzudecken und die Zeit zur Erkennung bösartiger Aktivitäten zu verkürzen.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, einen Metrikfilter und einen Alarm für AWS Config Konfigurationsänderungen einzurichten." + } } }, { "name": "log_metric_filters_security_group_changes", "title": "Ensure a log metric filter and alarm exist for security group changes.", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and reduce time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*AuthorizeSecurityGroupIngress.+\\$\\.eventName\\s*=\\s*AuthorizeSecurityGroupEgress.+\\$\\.eventName\\s*=\\s*RevokeSecurityGroupIngress.+\\$\\.eventName\\s*=\\s*RevokeSecurityGroupEgress.+\\$\\.eventName\\s*=\\s*CreateSecurityGroup.+\\$\\.eventName\\s*=\\s*DeleteSecurityGroup\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for security group changes.", + "text": "It is recommended to establish a metric filter and alarm for security group changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass eine Protokoll-Metrikenfilterung und ein Alarm für Sicherheitsgruppenänderungen existieren.", + "risk": "Die Überwachung unauthorisierter API-Aufrufe hilft, Anwendungsfehler aufzudecken und die Zeit zur Erkennung von bösartigen Aktivitäten zu verkürzen.", + "remediation": "Es wird empfohlen, eine Metrikenfilterung und einen Alarm für Sicherheitsgruppenänderungen einzurichten." + } } }, { "name": "log_metric_filters_network_acl_changes", - "title": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL).", + "title": "Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and reduce the time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*CreateNetworkAcl.+\\$\\.eventName\\s*=\\s*CreateNetworkAclEntry.+\\$\\.eventName\\s*=\\s*DeleteNetworkAcl.+\\$\\.eventName\\s*=\\s*DeleteNetworkAclEntry.+\\$\\.eventName\\s*=\\s*ReplaceNetworkAclEntry.+\\$\\.eventName\\s*=\\s*ReplaceNetworkAclAssociation\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for network ACL changes.", + "text": "To ensure proper monitoring of Network ACL changes, it is recommended to establish a metric filter and alarm.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokoll-Metrikenfilter und eine Alarmierung für Änderungen an Network Access Control Lists (NACL) vorhanden ist", + "risk": "Die Überwachung unberechtigter API-Aufrufe hilft bei der Aufdeckung von Anwendungsfehlern und verringert die Zeit, die zum Erkennen bösartiger Aktivitäten benötigt wird.", + "remediation": "Um eine ordnungsgemäße Überwachung von Netzwerk-ACL-Änderungen sicherzustellen, wird empfohlen, einen Metrikenfilter und eine Alarmierung einzurichten." + } } }, { "name": "log_metric_filters_network_gateway_changes", - "title": "Ensure a log metric filter and alarm exist for changes to network gateways.", + "title": "Ensure a log metric filter and alarm exist for changes to network gateways", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls helps reveal application errors and reduces time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*CreateCustomerGateway.+\\$\\.eventName\\s*=\\s*DeleteCustomerGateway.+\\$\\.eventName\\s*=\\s*AttachInternetGateway.+\\$\\.eventName\\s*=\\s*CreateInternetGateway.+\\$\\.eventName\\s*=\\s*DeleteInternetGateway.+\\$\\.eventName\\s*=\\s*DetachInternetGateway\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for network gateway changes.", + "text": "Establish a metric filter and alarm for network gateway changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikenfilter und eine Alarmierung für Änderungen an Netzwerk-Gateways vorhanden sind", + "risk": "Die Überwachung unbefugter API-Aufrufe hilft bei der Aufdeckung von Anwendungsfehlern und verringert die Zeit zur Erkennung bösartiger Aktivitäten.", + "remediation": "Richten Sie einen Metrikenfilter und eine Alarmierung für Änderungen an Netzwerk-Gateways ein." + } } }, { "name": "log_metric_filters_route_table_changes", "title": "Ensure a log metric filter and alarm exist for route table changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls can help identify application errors and facilitate the detection of malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*CreateRoute.+\\$\\.eventName\\s*=\\s*CreateRouteTable.+\\$\\.eventName\\s*=\\s*ReplaceRoute.+\\$\\.eventName\\s*=\\s*ReplaceRouteTableAssociation.+\\$\\.eventName\\s*=\\s*DeleteRouteTable.+\\$\\.eventName\\s*=\\s*DeleteRoute.+\\$\\.eventName\\s*=\\s*DisassociateRouteTable\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for route table changes.", + "text": "To address this issue, it is recommended to establish a metric filter and alarm for route table changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und eine Alarmierung für Änderungen an der Routentabelle vorhanden sind.", + "risk": "Die Überwachung nicht autorisierter API-Aufrufe kann dazu beitragen, Anwendungsfehler zu identifizieren und die Erkennung von bösartigen Aktivitäten zu ermöglichen.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, einen Metrikfilter und eine Alarmierung für Änderungen an der Routentabelle einzurichten." + } } }, { "name": "log_metric_filters_vpc_changes", - "title": "Ensure a log metric filter and alarm exist for VPC changes.", + "title": "Ensure a log metric filter and alarm exist for VPC changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", + "categories": ["security", "compliance"], + "risk": "Monitoring unauthorized API calls will help reveal application errors and reduce the time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventName\\s*=\\s*CreateVpc.+\\$\\.eventName\\s*=\\s*DeleteVpc.+\\$\\.eventName\\s*=\\s*ModifyVpcAttribute.+\\$\\.eventName\\s*=\\s*AcceptVpcPeeringConnection.+\\$\\.eventName\\s*=\\s*CreateVpcPeeringConnection.+\\$\\.eventName\\s*=\\s*DeleteVpcPeeringConnection.+\\$\\.eventName\\s*=\\s*RejectVpcPeeringConnection.+\\$\\.eventName\\s*=\\s*AttachClassicLinkVpc.+\\$\\.eventName\\s*=\\s*DetachClassicLinkVpc.+\\$\\.eventName\\s*=\\s*DisableVpcClassicLink.+\\$\\.eventName\\s*=\\s*EnableVpcClassicLink\"))" }, "remediation": { - "text": "It is recommended that a metric filter and alarm be established for VPC changes.", + "text": "To address this issue, it is recommended to establish a metric filter and alarm for VPC changes.", "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrik-Filter und ein Alarm für VPC-Änderungen vorhanden sind", + "risk": "Die Überwachung nicht autorisierter API-Aufrufe hilft bei der Aufdeckung von Anwendungsfehlern und reduziert die Zeit zur Erkennung von bösartigen Aktivitäten.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, einen Metrikfilter und Alarm für VPC-Änderungen einzurichten." + } } }, { "name": "log_metric_filters_aws_org_changes", - "title": "Ensure a log metric filter and alarm exist for AWS Organizations changes.", + "title": "Ensure a log metric filter and alarm exist for AWS Organizations changes", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], + "categories": ["security", "compliance"], "risk": "Monitoring unauthorized API calls will help reveal application errors and may reduce time to detect malicious activity.", "severity": "medium", "detect": { "resoto": "is(aws_cloud_trail) and trail_is_multi_region_trail=true and trail_status.is_logging=true with(empty, --> is(aws_cloudwatch_log_group) with(any, --> is(aws_cloudwatch_metric_filter) and filter_pattern~\"\\s*\\$\\.eventSource\\s*=\\s*organizations.amazonaws.com.+\\$\\.eventName\\s*=\\s*\\\"AcceptHandshake\\\".+\\$\\.eventName\\s*=\\s*\\\"AttachPolicy\\\".+\\$\\.eventName\\s*=\\s*\\\"CreateAccount\\\".+\\$\\.eventName\\s*=\\s*\\\"CreateOrganizationalUnit\\\".+\\$\\.eventName\\s*=\\s*\\\"CreatePolicy\\\".+\\$\\.eventName\\s*=\\s*\\\"DeclineHandshake\\\".+\\$\\.eventName\\s*=\\s*\\\"DeleteOrganization\\\".+\\$\\.eventName\\s*=\\s*\\\"DeleteOrganizationalUnit\\\".+\\$\\.eventName\\s*=\\s*\\\"DeletePolicy\\\".+\\$\\.eventName\\s*=\\s*\\\"DetachPolicy\\\".+\\$\\.eventName\\s*=\\s*\\\"DisablePolicyType\\\".+\\$\\.eventName\\s*=\\s*\\\"EnablePolicyType\\\".+\\$\\.eventName\\s*=\\s*\\\"InviteAccountToOrganization\\\".+\\$\\.eventName\\s*=\\s*\\\"LeaveOrganization\\\".+\\$\\.eventName\\s*=\\s*\\\"MoveAccount\\\".+\\$\\.eventName\\s*=\\s*\\\"RemoveAccountFromOrganization\\\".+\\$\\.eventName\\s*=\\s*\\\"UpdatePolicy\\\".+\\$\\.eventName\\s*=\\s*\\\"UpdateOrganizationalUnit\\\"\"))" }, "remediation": { - "text": "Create a metric filter and alarm for AWS organization changes.", - "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html" + "text": "Create a metric filter and alarm for AWS organization changes", + "url": "https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging_data_events_with_cloudtrail.html#logging_data_events_with_cloudtrail_create_trail" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ein Protokollmetrikfilter und ein Alarm für AWS-Organisationsänderungen vorhanden sind", + "risk": "Das Überwachen nicht autorisierter API-Aufrufe hilft dabei, Anwendungsfehler aufzudecken und die Zeit zur Erkennung von bösartigen Aktivitäten zu verkürzen.", + "remediation": "Erstellen Sie einen Metrikfilter und eine Alarmierung für AWS-Organisationsänderungen" + } } }, { "name": "security_hub_enabled", - "title": "Check if Security Hub is enabled and its standard subscriptions.", + "title": "Ensure Security Hub is enabled and its standard subscriptions", "result_kinds": ["aws_cloud_trail"], - "categories": [ - "security", - "compliance" - ], - "risk": "AWS Security Hub gives you a comprehensive view of your security alerts and security posture across your AWS accounts.", + "categories": ["security", "compliance"], + "risk": "If Security Hub is not enabled, security teams will lack a comprehensive view of security alerts and posture across AWS accounts.", "severity": "medium", "detect": { - "manual": "Check if Security Hub is enabled in all regions you operate via: aws securityhub get-enabled-standards" + "manual": "Check if Security Hub is enabled in all regions you operate by running the command: aws securityhub get-enabled-standards" }, "remediation": { - "text": "Security Hub is Regional. When you enable or disable a security standard, it is enabled or disabled only in the current Region or in the Region that you specify.", + "text": "Remember that Security Hub is regional, so enabling or disabling a security standard only affects the current region or the specified region. Follow the steps in the documentation to enable or disable standards in Security Hub.", "url": "https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards-enable-disable.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Security Hub aktiviert ist und seine Standard-Abonnements", + "risk": "Wenn Security Hub nicht aktiviert ist, werden Sicherheitsteams keine umfassende Übersicht über Sicherheitswarnungen und den Sicherheitsstatus in AWS-Konten haben.", + "remediation": "Beachten Sie, dass Security Hub regional ist, daher wirkt sich das Aktivieren oder Deaktivieren eines Sicherheitsstandards nur auf die aktuelle Region oder die angegebene Region aus. Befolgen Sie die Schritte in der Dokumentation, um Standards in Security Hub zu aktivieren oder zu deaktivieren." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_cloudwatch.json b/resotocore/resotocore/static/report/checks/aws/aws_cloudwatch.json new file mode 100644 index 0000000000..f90ca4bf67 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_cloudwatch.json @@ -0,0 +1,75 @@ +{ + "provider": "aws", + "service": "cloudwatch", + "checks": [ + { + "name": "cross_account_sharing_enabled", + "title": "Ensure Cross-Account Sharing is Configured for CloudWatch Logs", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "If cross-account sharing for CloudWatch logs is not enabled, it complicates security monitoring and compliance auditing across diverse AWS accounts. This may lead to increased risk of undetected security incidents and failure to meet compliance standards.", + "severity": "medium", + "detect": { + "resoto": "is(aws_account) with(empty, -[1:2]-> is(aws_iam_role) and name=\"CloudWatch-CrossAccountSharingRole\")" + }, + "remediation": { + "text": "Navigate to Settings in the CloudWatch Console. Then, under 'Configure', select 'share data' and provide the ID of the monitoring account. This sets up cross-account sharing for CloudWatch logs.", + "url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Cross-Account-Cross-Region.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Cross-Account-Freigabe für CloudWatch-Logs konfiguriert ist", + "risk": "Wenn die Cross-Account-Freigabe für CloudWatch-Logs nicht aktiviert ist, erschwert dies die Überwachung der Sicherheit und die Prüfung der Einhaltung von Vorschriften in verschiedenen AWS-Konten. Dies kann zu erhöhtem Risiko nicht erkannter Sicherheitsvorfälle und Nichteinhaltung von Standards führen.", + "remediation": "Gehen Sie zur CloudWatch Console auf die Einstellungen. Wählen Sie dann unter 'Konfigurieren' 'Daten freigeben' und geben Sie die ID des Überwachungskontos ein. Dadurch wird die Cross-Account-Freigabe für CloudWatch-Logs eingerichtet." + } + } + }, + { + "name": "log_group_encryption_at_rest_enabled", + "title": "Ensure AWS KMS is Protecting CloudWatch Log Groups", + "result_kinds": ["aws_cloudwatch_log_group"], + "categories": ["retention"], + "risk": "Not using customer-managed KMS for encryption can compromise the confidentiality of the CloudWatch log group data. Neglecting encryption controls can result in unauthorized access and potential data loss.", + "severity": "medium", + "detect": { + "resoto": "is(aws_cloudwatch_log_group) with(empty, --> is(aws_kms_key))" + }, + "remediation": { + "text": "To address this, associate your CloudWatch Log Group with a KMS Key. Ensure that the specified KMS Key is used for encrypting all ingested data for the log group. Implementing this association is essential for CloudWatch Logs to decrypt the data when requested.", + "url": "https://docs.aws.amazon.com/cli/latest/reference/logs/associate-kms-key.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS KMS CloudWatch Log-Gruppen schützt", + "risk": "Die Verwendung von nicht kundenverwalteter KMS-Verschlüsselung kann die Vertraulichkeit der Daten in der CloudWatch Log-Gruppe gefährden. Die Vernachlässigung von Verschlüsselungskontrollen kann zu unbefugtem Zugriff und potenziellem Datenverlust führen.", + "remediation": "Um dies zu beheben, verknüpfen Sie Ihre CloudWatch Log-Gruppe mit einem KMS-Schlüssel. Stellen Sie sicher, dass der angegebene KMS-Schlüssel zur Verschlüsselung aller eingespeisten Daten für die Log-Gruppe verwendet wird. Die Implementierung dieser Verknüpfung ist für CloudWatch-Logs unerlässlich, um die Daten auf Anforderung zu entschlüsseln." + } + } + }, + { + "name": "log_group_retention_days_at_least_365", + "title": "Ensure that CloudWatch Log Groups Retain Data for at Least 365 Days", + "result_kinds": ["aws_cloudwatch_log_group"], + "categories": ["retention"], + "risk": "Failure to retain CloudWatch Logs for at least 365 days can result in non-compliance with regulatory requirements for long-term log storage. Additionally, it could hamper forensic analysis and retrospective detection of long-term security breach patterns.", + "severity": "medium", + "detect": { + "resoto": "is(aws_cloudwatch_log_group) and group_retention_in_days<{{log_group_retention_days}}" + }, + "default_values": { + "log_group_retention_days": "365d" + }, + "remediation": { + "text": "Apply a log retention policy of at least 365 days to CloudWatch Log Groups. This ensures the continuity of logs and traces to aid in security analysis and regulatory obligations.", + "url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass CloudWatch Log-Gruppen Daten für mindestens 365 Tage aufbewahren", + "risk": "Das Nichtvorhandensein einer Datenbewahrung von CloudWatch-Logs für mindestens 365 Tage kann zu Nichteinhaltung von Vorschriften für die langfristige Speicherung von Logs führen. Außerdem kann es die forensische Analyse und rückwirkende Erkennung von Sicherheitsverletzungen mit Langzeitmuster beeinträchtigen.", + "remediation": "Legen Sie eine Protokollbewahrungspolitik von mindestens 365 Tagen für CloudWatch-Log-Gruppen fest. Dadurch wird die Fortführung von Protokollen und Traces sichergestellt, um bei Sicherheitsanalysen und regulatorischen Verpflichtungen zu helfen." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_config.json b/resotocore/resotocore/static/report/checks/aws/aws_config.json index db6bd0ceed..2041eb70fa 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_config.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_config.json @@ -7,14 +7,43 @@ "title": "Ensure AWS Config is enabled in all regions.", "result_kinds": ["aws_region"], "categories": ["security", "compliance"], - "risk": "The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking and compliance auditing.", + "risk": "Failure to enable AWS Config in all regions may result in the inability to capture the AWS configuration item history, leading to a lack of security analysis, resource change tracking, and compliance auditing.", "severity": "medium", "detect": { "resoto": "is(aws_region) with(empty, --> is(aws_config_recorder) and recorder_status.recording=true and recorder_group.all_supported=true and recorder_status.last_status=SUCCESS)" }, "remediation": { - "text": "It is recommended to enable AWS Config be enabled in all regions.", + "text": "To address this issue, it is recommended to enable AWS Config in all regions to ensure proper security analysis, resource change tracking, and compliance auditing.", "url": "https://aws.amazon.com/blogs/mt/aws-config-best-practices/" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS Config in allen Regionen aktiviert ist.", + "risk": "Die Unterlassung, AWS Config in allen Regionen zu aktivieren, kann dazu führen, dass die AWS-Konfigurationsverlaufseinträge nicht erfasst werden können. Dies führt zu einem Mangel an Sicherheitsanalyse, Ressourcenänderungsnachverfolgung und Compliance-Prüfungen.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, AWS Config in allen Regionen zu aktivieren, um eine ordnungsgemäße Sicherheitsanalyse, Ressourcenänderungsnachverfolgung und Compliance-Prüfung sicherzustellen." + } + } + }, + { + "name": "remediation_enabled", + "title": "Ensure AWS Config Remediation is Enabled", + "result_kinds": ["aws_region"], + "categories": ["security", "compliance"], + "risk": "Failure to implement AWS Config remediation (manual or automatic through System Manager) configurations can lead to noncompliant resources not being remediated, leaving potential vulnerabilities in the AWS cloud infrastructure.", + "severity": "medium", + "detect": { + "manual": "AWS Config allows for the remediation of noncompliant resources evaluated by AWS Config Rules. Remediation is applied through AWS Systems Manager Automation documents, defining the actions to be performed on noncompliant AWS resources." + }, + "remediation": { + "text": "To remediate the issue, manual configuration can be done with user-defined reviews, or automatic remediation can be set up using AWS Config.", + "url": "https://docs.aws.amazon.com/config/latest/developerguide/remediation.html#setup-manualremediation" + }, + "localizations": { + "de": { + "title": "Sicherstellen, dass die AWS Config Behebung aktiviert ist", + "risk": "Das Versäumen, AWS Config-Behebungskonfigurationen (manuell oder automatisch über System Manager) zu implementieren, kann dazu führen, dass nicht konforme Ressourcen nicht behoben werden und potenzielle Sicherheitslücken in der AWS Cloud-Infrastruktur hinterlassen.", + "remediation": "Um das Problem zu beheben, kann manuelle Konfiguration mit benutzerdefinierten Überprüfungen durchgeführt werden oder automatische Behebung mithilfe von AWS Config eingerichtet werden." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_dms.json b/resotocore/resotocore/static/report/checks/aws/aws_dms.json new file mode 100644 index 0000000000..aaee24b34b --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_dms.json @@ -0,0 +1,28 @@ +{ + "provider": "aws", + "service": "dms", + "checks": [ + { + "name": "public_ip_address", + "title": "Ensure EC2 Instances for Database Migration Service (DMS) are not accessible via Public IP", + "result_kinds": ["aws_ec2_instance"], + "categories": [], + "risk": "Exposing an EC2 instance directly to the internet increases the attack surface and poses a high risk of compromise. In the context of DMS Replication instances, this can have significant compliance and security implications.", + "severity": "medium", + "detect": { + "manual": "Go to AWS DMS and select replication instances." + }, + "remediation": { + "text": "To fix the issue, ensure that replication instances are created in private subnets and do not have a public IP.", + "url": "https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.PublicPrivate.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EC2-Instanzen für den Database Migration Service (DMS) nicht über eine öffentliche IP-Adresse erreichbar sind", + "risk": "Das direkte Bereitstellen einer EC2-Instanz im Internet erhöht die Angriffsfläche und birgt ein hohes Risiko für Kompromittierung. Im Zusammenhang mit DMS-Replikationsinstanzen kann dies erhebliche Auswirkungen auf die Compliance und Sicherheit haben.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass Replication Instances in privaten Subnetzen erstellt werden und keine öffentliche IP-Adresse haben." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_dynamodb.json b/resotocore/resotocore/static/report/checks/aws/aws_dynamodb.json new file mode 100644 index 0000000000..2b8c845ea7 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_dynamodb.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "dynamodb", + "checks": [ + { + "name": "table_kms_encryption_enabled", + "title": "Ensure DynamoDB Table is Configured with KMS CMK Encryption", + "result_kinds": ["aws_dynamodb_table"], + "categories": ["security", "compliance"], + "risk": "Without encryption at rest using KMS CMK, your sensitive user data stored in DynamoDB is at risk. This failure to protect data complexity and operational burden can result in breaches or loss of confidential data.", + "severity": "medium", + "detect": { + "resoto": "is(aws_dynamodb_table) and dynamodb_sse_description.sse_type!=KMS" + }, + "remediation": { + "text": "To fix this issue, create a new table specifying a KMS CMK encryption key, or, for an existing table, update the configuration to include a KMS CMK encryption key using the AWS Management Console.", + "url": "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/encryption.tutorial.html" + }, + "url": "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die DynamoDB-Tabelle mit der KMS CMK-Verschlüsselung konfiguriert ist", + "risk": "Ohne die Verschlüsselung von Daten mit KMS CMK sind Ihre sensiblen Benutzerdaten, die in DynamoDB gespeichert sind, gefährdet. Diese mangelnde Schutzkomplexität und Betriebslast kann zu Sicherheitsverletzungen oder Verlust vertraulicher Daten führen.", + "remediation": "Um dieses Problem zu beheben, erstellen Sie eine neue Tabelle und geben Sie einen KMS CMK-Verschlüsselungsschlüssel an. Für eine bestehende Tabelle aktualisieren Sie die Konfiguration, um einen KMS CMK-Verschlüsselungsschlüssel mit der AWS Management Console einzuschließen." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_ec2.json b/resotocore/resotocore/static/report/checks/aws/aws_ec2.json index ec666207b8..bcfa4da42f 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_ec2.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_ec2.json @@ -4,55 +4,102 @@ "checks": [ { "name": "snapshot_encrypted", - "title": "Ensure EBS Snapshots are encrypted and not public.", + "title": "Ensure that EBS Snapshots are both encrypted and not publicly accessible", "result_kinds": ["aws_ec2_snapshot"], "categories": ["security", "compliance"], - "risk": "When you share a snapshot, you are giving others access to all of the data on the snapshot. Share snapshots only with people with whom you want to share all of your snapshot data.", + "risk": "Sharing snapshots publicly can expose all the data they contain. Worst, if these snapshots are not encrypted, unauthorized access and potential data breaches could occur. To maintain data privacy and comply with security standards, limit snippet access and use encryption.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_snapshot) and encrypted=false" }, "remediation": { - "text": "Ensure the snapshot are encrypted.", + "text": "You can enforce snapshot encryption by using AWS Key Management Service (AWS KMS) keys. Modify the permissions of each snapshot to restrict public access, making them exclusively accessible to specific AWS accounts.", "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-snapshot.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EBS-Snapshots sowohl verschlüsselt als auch nicht öffentlich zugänglich sind", + "risk": "Das öffentliche Teilen von Snapshots kann alle enthaltenen Daten offenlegen. Im schlimmsten Fall könnten durch unbefugten Zugriff und potenzielle Datenverletzungen erhebliche Sicherheitsprobleme auftreten. Um die Datensicherheit zu gewährleisten und Sicherheitsstandards einzuhalten, beschränken Sie den Zugriff auf die Snippets und verwenden Sie Verschlüsselung.", + "remediation": "Sie können die Snapshot-Verschlüsselung durch die Verwendung von AWS Key Management Service (AWS KMS)-Schlüsseln erzwingen. Ändern Sie die Berechtigungen jedes Snapshots, um den öffentlichen Zugriff einzuschränken und sie ausschließlich für bestimmte AWS-Konten zugänglich zu machen." + } } }, { "name": "unused_elastic_ip", - "title": "Check if there is any unassigned Elastic IP.", + "title": "Ensure There are no Unassigned Elastic IPs in Your AWS Environment", "result_kinds": ["aws_ec2_elastic_ip"], "categories": ["cost"], - "risk": "Unassigned Elastic IPs may result in extra cost.", + "risk": "Unused Elastic IPs can lead to unnecessary expenses. Non-utilized resources can artificially inflate costs and contribute to inefficient resource management.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_elastic_ip) with(empty, <-- is(aws_ec2_instance, aws_ec2_network_interface))" }, "remediation": { - "text": "Ensure Elastic IPs are not unassigned.", + "text": "Review all Elastic IPs within your AWS environment. Confirm they are appropriately associated with running AWS instances or network interfaces. Release any unused Elastic IPs.", "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass es in Ihrer AWS-Umgebung keine nicht zugewiesenen elastischen IP-Adressen gibt", + "risk": "Nicht verwendete elastische IP-Adressen können zu unnötigen Kosten führen. Nicht genutzte Ressourcen können Kosten künstlich erhöhen und zu einer ineffizienten Ressourcenverwaltung beitragen.", + "remediation": "Überprüfen Sie alle elastischen IP-Adressen in Ihrer AWS-Umgebung. Stellen Sie sicher, dass sie ordnungsgemäß mit laufenden AWS-Instanzen oder Netzwerkschnittstellen verbunden sind. Geben Sie nicht verwendete elastische IP-Adressen frei." + } + } + }, + { + "name": "instance_in_vpc", + "title": "Ensure All EC2 Instances Operate Within a VPC Instead of EC2-Classic", + "result_kinds": ["aws_ec2_instance"], + "categories": ["security", "compliance"], + "risk": "Using deprecated EC2-Classic could impose significant security and compliance risks as it no longer receives updates or support from AWS. Operational stability could also be compromised.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ec2_instance) and instance_subnet_id==null" + }, + "remediation": { + "text": "To mitigate the risk, replace EC2 instances deployed in EC2-Classic with ones operating within a VPC. This shift will enhance your security posture and align you with AWS's current practices.", + "url": "https://aws.amazon.com/blogs/aws/ec2-classic-is-retiring-heres-how-to-prepare/" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Introduction.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass alle EC2-Instanzen innerhalb eines VPCs anstelle von EC2-Classic betrieben werden", + "risk": "Die Verwendung des veralteten EC2-Classic kann erhebliche Sicherheits- und Compliance-Risiken mit sich bringen, da es keine Updates oder Unterstützung von AWS mehr erhält. Auch die operative Stabilität könnte beeinträchtigt werden.", + "remediation": "Um das Risiko zu minimieren, ersetzen Sie EC2-Instanzen, die in EC2-Classic bereitgestellt sind, durch solche, die innerhalb eines VPCs betrieben werden. Diese Umstellung verbessert Ihre Sicherheitsposition und entspricht den aktuellen Praktiken von AWS." + } } }, { "name": "internet_facing_with_instance_profile", - "title": "Check for internet facing EC2 instances with Instance Profiles attached.", + "title": "Ensure No Internet Facing EC2 Instances with Instance Profiles Attached Exist", "result_kinds": ["aws_ec2_instance"], "categories": ["security", "compliance"], - "risk": "Exposing an EC2 directly to internet increases the attack surface and therefore the risk of compromise.", + "risk": "Having a public EC2 instance increases the attack surface and exposes the server to potential threats. If an Instance Profile is attached to these instances, it could potentially provide access keys to unauthorized users.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_instance) and instance_public_ip_address!=null and instance_iam_instance_profile!=null" }, "remediation": { - "text": "Ensure Elastic IPs are not unassigned.", - "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html" + "text": "Ensure Elastic IPs are not unassigned and that EC2 instances are not unnecessarily exposed to the Internet. Compartmentalize AWS services to limit exposure and mitigate risk.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html" + }, + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/associate-elastic-ip-ec2/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine öffentlich zugänglichen EC2-Instanzen mit angehängten Instanzprofilen vorhanden sind", + "risk": "Eine öffentliche EC2-Instanz erhöht die Angriffsfläche und setzt den Server potenziellen Bedrohungen aus. Wenn einem Solchen Instanzprofil angehängt ist, könnten unbefugte Benutzer potenziell Zugriffsschlüssel erhalten.", + "remediation": "Stellen Sie sicher, dass Elastic IPs nicht nicht zugewiesen sind und dass EC2-Instanzen nicht unnötig dem Internet ausgesetzt sind. Segmentieren Sie AWS-Services, um die Exposition zu begrenzen und Risiken zu minimieren." + } } }, { "name": "old_instances", - "title": "Check EC2 Instances older than specific days.", + "title": "Ensure EC2 Instances Are Not Older Than Specific Days.", "result_kinds": ["aws_ec2_instance"], "categories": ["security", "compliance"], - "risk": "Having old instances within your AWS account could increase the risk of having vulnerable software.", + "risk": "Retaining old instances within your AWS account may heighten the risk of unsecured, outdated software. This could subsequently cause vulnerabilities, leading to potential data breaches or attacks.", "severity": "low", "detect": { "resoto": "is(aws_ec2_instance) and instance_status=running and age>{{old_instance_age}}" @@ -61,541 +108,991 @@ "old_instance_age": "180d" }, "remediation": { - "text": "Check if software running in the instance is up to date and patched accordingly.", + "text": "Ensure all software running on the instance is up-to-date and patched. Evaluate the necessity of each instance and decommission those that are obsolete or unused.", "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/viewing-patch-compliance-results.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Filtering.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EC2-Instanzen nicht älter als eine bestimmte Anzahl von Tagen sind.", + "risk": "Das Beibehalten alter Instanzen in Ihrem AWS-Konto kann das Risiko von ungesichertem, veraltetem Software erhöhen. Dies kann anschließend zu Schwachstellen führen, die zu potenziellen Datenverletzungen oder Angriffen führen können.", + "remediation": "Stellen Sie sicher, dass alle auf der Instanz ausgeführte Software auf dem neuesten Stand und gepatcht ist. Bewertung der Notwendigkeit jeder Instanz und Außerbetriebnahme derjenigen, die veraltet oder nicht verwendet werden." + } } }, { "name": "instance_profile_attached", - "title": "Ensure IAM instance roles are used for AWS resource access from instances", + "title": "Ensure AWS resource access from instances is carried out using IAM instance roles", "result_kinds": ["aws_ec2_instance"], "categories": ["security", "compliance"], - "risk": "AWS access from within AWS instances can be done by either encoding AWS keys into AWS API calls or by assigning the instance to a role which has an appropriate permissions policy for the required access. AWS IAM roles reduce the risks associated with sharing and rotating credentials that can be used outside of AWS itself. If credentials are compromised, they can be used from outside of the AWS account.", + "risk": "Utilising hard-coded AWS keys for resource access exposes your infrastructure to potential security risks. If these cloud-embedded credentials are compromised, unauthorized access could be gained from outside the AWS environment resulting in data breaches or infrastructure tampering.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_instance) and instance_iam_instance_profile=null" }, "remediation": { - "text": "Create an IAM instance role if necessary and attach it to the corresponding EC2 instance..", + "text": "Ensure your AWS instances access resources using IAM roles. Create and attach these roles as necessary to each corresponding EC2 instance, thereby displacing any hard-coded credentials.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Zugriff auf AWS-Ressourcen von Instanzen mit IAM-Instanzrollen durchgeführt wird", + "risk": "Die Verwendung von fest codierten AWS-Schlüsseln für den Ressourcenzugriff setzt Ihre Infrastruktur potenziellen Sicherheitsrisiken aus. Wenn diese in der Cloud eingebetteten Anmeldeinformationen kompromittiert werden, kann von außerhalb der AWS-Umgebung unbefugter Zugriff erfolgen, was zu Datenverstößen oder Eingriffen in die Infrastruktur führen kann.", + "remediation": "Stellen Sie sicher, dass Ihre AWS-Instanzen auf Ressourcen mit IAM-Rollen zugreifen. Erstellen und fügen Sie bei Bedarf diese Rollen zu jeder entsprechenden EC2-Instanz hinzu, um fest codierte Anmeldeinformationen zu ersetzen." + } } }, { "name": "public_ip_address", - "title": "Check for EC2 Instances with Public IP.", + "title": "Ensure EC2 Instances are not Exposed to Public IPs", "result_kinds": ["aws_ec2_instance"], "categories": ["security", "compliance"], - "risk": "Exposing an EC2 directly to internet increases the attack surface and therefore the risk of compromise.", + "risk": "Having EC2 instances openly exposed to the internet significantly increases the risk of cyber attacks and potential security breaches, thereby compromising data integrity.", "severity": "medium", "detect": { - "resoto": "is(aws_ec2_instance) and instance_public_ip_address!=null" + "resoto": "is(aws_ec2_instance) and instance_status==running and instance_public_ip_address!=null" }, "remediation": { - "text": "Use an ALB and apply WAF ACL.", + "text": "To mitigate this issue, employ an Application Load Balancer (ALB) as an intermediary and apply a Web Application Firewall (WAF) Access Control List (ACL) to effectively filter access.", "url": "https://aws.amazon.com/blogs/aws/aws-web-application-firewall-waf-for-application-load-balancers/" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EC2-Instanzen nicht öffentlichen IP-Adressen ausgesetzt sind", + "risk": "Wenn EC2-Instanzen offen im Internet exponiert sind, erhöht sich das Risiko von Cyberangriffen und potenziellen Sicherheitsverletzungen erheblich, was die Integrität der Daten beeinträchtigen kann.", + "remediation": "Um dieses Problem zu beheben, verwenden Sie einen Application Load Balancer (ALB) als Zwischeninstanz und wenden Sie eine Web Application Firewall (WAF) Access Control List (ACL) an, um den Zugriff effektiv zu filtern." + } } }, { "name": "allow_ingress_any_port_ipv4", - "title": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to any port.", + "title": "Ensure No Network ACLs Allow Ingress from 0.0.0.0/0 to Any Port.", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Our Network ACLs currently allow unsolicited traffic from any IP (0.0.0.0/0) to any port. This presents a security risk including network vulnerability to exploits and unauthorized access to sensitive data.", "severity": "high", "detect": { "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and cidr_block=\"0.0.0.0/0\" and rule_action=allow and protocol=-1) } | jq --no-rewrite 'if (( [.reported.acl_entries[]? | contains({egress:false, cidr_block:\"0.0.0.0/0\", protocol:\"-1\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | select(.protocol==\"-1\") |select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | select(.protocol==\"-1\") | select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"deny\") | .rule_number ))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Implement a strict security policy. Scan and revise overly permissive network ACLs regularly. Make sure to limit ingress ports by narrowing their definitions to only the minimum required.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Netzwerk-ACLs keinen Zugriff von 0.0.0.0/0 auf beliebige Ports zulassen.", + "risk": "Unsere Netzwerk-ACLs erlauben derzeit unerwünschten Datenverkehr von jeder IP (0.0.0.0/0) auf beliebige Ports. Dies stellt ein Sicherheitsrisiko dar, einschließlich Netzwerk-Schwachstellen für Angriffe und unbefugten Zugriff auf sensible Daten.", + "remediation": "Implementieren Sie eine strikte Sicherheitsrichtlinie. Überprüfen Sie regelmäßig übermäßig großzügige Netzwerk-ACLs und passen Sie sie an. Stellen Sie sicher, dass Ingress-Ports eingeschränkt sind und nur das Minimum an erforderlichen Ports zulassen." + } + } }, { "name": "allow_ingress_any_port_ipv6", - "title": "Ensure no Network ACLs allow ingress from ::/0 to any port.", + "title": "Ensure that Network ACLs do not permit ingress from ::/0 to any port.", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Leaving network ACLs open can expose sensitive ports to vulnerabilities, as any user or malware can scan for these ports, bypass the perimeter firewall, and access your VPC. This poses an imminent threat to security and data confidentiality.", "severity": "high", "detect": { "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and ipv6_cidr_block=\"::/0\" and rule_action=allow and protocol=-1) } | jq --no-rewrite 'if (( [.reported.acl_entries[]? | contains({egress:false, ipv6_cidr_block:\"::/0\", protocol:\"-1\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | select(.protocol==\"-1\") |select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | select(.protocol==\"-1\") | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"deny\") | .rule_number ))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "To resolve this issue, apply the Zero Trust approach. Continuously scan and modify network ACLs that are unrestricted or overly permissive. Ensure that you limit the definition to the minimum ports required, and regularly update your protocols as part of best practices.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "The acl entries must be interpreted in sequence. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html#nacl", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Netzwerk-ACLs kein Ingress von ::/0 zu einem beliebigen Port erlauben.", + "risk": "Das Offenlassen von Netzwerk-ACLs kann empfindliche Ports für Schwachstellen freilegen, da jeder Benutzer oder Malware nach diesen Ports scannen, die Perimeter-Firewall umgehen und auf Ihre VPC zugreifen kann. Dies stellt eine unmittelbare Bedrohung für die Sicherheit und Vertraulichkeit von Daten dar.", + "remediation": "Um dieses Problem zu beheben, wenden Sie den Zero-Trust-Ansatz an. Scannen und ändern Sie kontinuierlich Netzwerk-ACLs, die uneingeschränkt oder übermäßig freizügig sind. Stellen Sie sicher, dass Sie die Definition auf die erforderlichen Mindest-Ports beschränken und aktualisieren Sie regelmäßig Ihre Protokolle im Rahmen bewährter Verfahren." + } + } }, { "name": "allow_ingress_ssh_port_22_ipv4", - "title": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to SSH port 22", + "title": "Ensure Network ACLs Do Not Allow Ingress from 0.0.0.0/0 to SSH Port 22", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Leaving Network ACLs open can expose sensitive ports to external threats. Anyone with VPC access, including potential cyber-attackers and malware, can exploit this to gain unauthorized access to instances, risking data breach and system compromise.", "severity": "high", "detect": { "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and cidr_block=\"0.0.0.0/0\" and rule_action=allow and (protocol=-1 or (port_range.from_range<22 and port_range.to_range>22)))} | jq --no-rewrite 'if (( [.reported.acl_entries[]? | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<22) | select(.port_range.to_range>=22)) end | contains({egress:false, cidr_block:\"0.0.0.0/0\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=22) | select(.port_range.to_range>=22) ) end | select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=22) | select(.port_range.to_range>=22) ) end | select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"deny\") | .rule_number))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt a Zero Trust approach. Regularly scan your Network ACLs for unrestricted or overly permissive access and remediate immediately. Be sure to limit your ACLs to only the minimum necessary ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "The ACL entries need to be processed in order. An alert should be triggered if there's no deny entry or if the deny entry number is greater than the allow entry number.", + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/vpc-restrict-ingress-traffic/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Netzwerk-ACLs keinen Ingress vom IP-Bereich 0.0.0.0/0 zum SSH-Port 22 erlauben", + "risk": "Offene Netzwerk-ACLs können sensible Ports externen Bedrohungen aussetzen. Jeder mit VPC-Zugriff, einschließlich potenzieller Cyber-Angreifer und Malware, kann dies ausnutzen, um unbefugten Zugriff auf Instanzen zu erlangen und ein Datenleck und eine Kompromittierung des Systems zu riskieren.", + "remediation": "Verfolgen Sie einen Zero-Trust-Ansatz. Überprüfen Sie regelmäßig Ihre Netzwerk-ACLs auf uneingeschränkten oder übermäßig berechtigten Zugriff und beheben Sie dies sofort. Stellen Sie sicher, dass Sie Ihre ACLs auf nur die erforderlichen Ports beschränken." + } + } }, { "name": "allow_ingress_ssh_port_22_ipv6", - "title": "Ensure no Network ACLs allow ingress from ::/0 to SSH port 22", + "title": "Ensure No Network ACLs Allow Ingress from ::/0 to SSH Port 22", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Leaving network ACLs open can inadvertently give any user or malware with VPC access the ability to scan sensitive ports and access instances, making your infrastructure more susceptible to malicious activities.", "severity": "high", "detect": { - "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and ipv6_cidr_block=\"::/0\" and rule_action=allow and (protocol=-1 or (port_range.from_range<22 and port_range.to_range>22)))} | jq --no-rewrite 'if (( [.reported.acl_entries[]? | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<22) | select(.port_range.to_range>=22)) end | contains({egress:false, ipv6_cidr_block:\"::/0\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=22) | select(.port_range.to_range>=22) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=22) | select(.port_range.to_range>=22) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"deny\") | .rule_number))) then [.] else [] end' | flatten" + "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and ipv6_cidr_block=\"::/0\" and rule_action=allow and (protocol=-1 or (port_range.from_range<22 and port_range.to_range>22)))} | jq --no-rewrite 'if (( [.reported.acl_entries[]? | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<22) | select(.port_range.to_range>=22)) end | contains({egress:false, ipv6_cidr_block:\"::/0\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_RANGE<=22) | select(.port_range.to_range>=22) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=22) | select(.port_range.to_range>=22) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"deny\") | .rule_number))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Consider implementing the Zero Trust approach. Consistently scan and remediate unrestricted or overly permissive network ACLs. As a best practice, limit traffic to minimum necessary ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "Pay attention to ACL entries interpretation sequence: if there's no deny entry or if the deny_entry_number is greater than the allow_entry_number, an alert should be issued.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_ACLs.html", + "localizations": { + "de": { + "title": "Sicherstellen, dass kein Netzwerk-ACL Eingänge von ::/0 zum SSH-Port 22 erlaubt", + "risk": "Offene Netzwerk-ACLs können unbeabsichtigt jedem Benutzer oder Schadsoftware mit VPC-Zugriff die Möglichkeit geben, sensible Ports zu scannen und auf Instanzen zuzugreifen, wodurch Ihre Infrastruktur anfälliger für bösartige Aktivitäten wird.", + "remediation": "Erwägen Sie die Implementierung des Zero Trust-Ansatzes. Scannen und beheben Sie konsequent unbeschränkte oder übermäßig erlaubende Netzwerk-ACLs. Als bewährte Methode begrenzen Sie den Datenverkehr auf die notwendigen Ports." + } + } }, { "name": "allow_ingress_rdp_port_3389_ipv4", - "title": "Ensure no Network ACLs allow ingress from 0.0.0.0/0 to Microsoft RDP port 3389", + "title": "Ensure that Network ACLs do not allow ingress from 0.0.0.0/0 to Microsoft RDP port 3389", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Allowing network ACLs to remain open exposes your system to threats such as external attack probes scanning for vulnerable, well-known ports. This could lead to unauthorized access to instances within your VPC, even with a perimeter firewall in place.", "severity": "high", "detect": { "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and cidr_block=\"0.0.0.0/0\" and rule_action=allow and (protocol=-1 or (port_range.from_range<3389 and port_range.to_range>3389)))} | jq --no-rewrite 'if (( [.reported.acl_entries[]? | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<3389) | select(.port_range.to_range>=3389)) end | contains({egress:false, cidr_block:\"0.0.0.0/0\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=3389) | select(.port_range.to_range>=3389) ) end | select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=3389) | select(.port_range.to_range>=3389) ) end | select(.cidr_block==\"0.0.0.0/0\") | select(.rule_action==\"deny\") | .rule_number))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt a Zero Trust security model. Regularly scan and update ACLs to curtail unrestricted or overly permissive entries. Aim to narrow down ports to the bare minimum required for your operations.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/controlling-access.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Netzwerk ACLs keine Eingänge von 0.0.0.0/0 auf den Microsoft RDP-Port 3389 zulassen", + "risk": "Durch das Offenlassen von Netzwerk ACLs wird Ihr System Bedrohungen ausgesetzt, wie zum Beispiel externe Angriffssonden, die nach verwundbaren, bekannten Ports scannen. Dies kann unberechtigten Zugriff auf Instanzen in Ihrem VPC ermöglichen, selbst wenn eine Perimeter-Firewall vorhanden ist.", + "remediation": "Übernehmen Sie ein Sicherheitsmodell mit Null Vertrauen. Scannen und aktualisieren Sie regelmäßig ACLs, um uneingeschränkte oder zu weitreichende Einträge einzuschränken. Streben Sie danach, die Anzahl der Ports auf das absolute Minimum zu reduzieren, das für Ihre Operationen erforderlich ist." + } + } }, { "name": "allow_ingress_rdp_port_3389_ipv6", - "title": "Ensure no Network ACLs allow ingress from ::/0 to Microsoft RDP port 3389", + "title": "Ensure that Network ACLs do not permit inbound traffic from ::/0 to Microsoft RDP port 3389", "result_kinds": ["aws_ec2_network_acl"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having network acls open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Leaving network ACLs open can expose well-known and sensitive ports to unauthorized users or malicious software. This can lead to potential intrusions, even if a perimeter firewall already exists.", "severity": "high", "detect": { "resoto_cmd": "search is(aws_ec2_network_acl) and acl_entries[*].{(egress=false and ipv6_cidr_block=\"::/0\" and rule_action=allow and (protocol=-1 or (port_range.from_range<3389 and port_range.to_range>3389)))} | jq --no-rewrite 'if (( [.reported.acl_entries[]? | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<3389) | select(.port_range.to_range>=3389)) end | contains({egress:false, ipv6_cidr_block:\"::/0\", rule_action:\"deny\"}) ] | any | not ) or ((.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=3389) | select(.port_range.to_range>=3389) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"allow\") | .rule_number) < (.reported.acl_entries | sort_by(.rule_number) | .[]? | select(.egress==false) | if .protocol==\"-1\" then . else (. | select(.port_range.from_range<=3389) | select(.port_range.to_range>=3389) ) end | select(.ipv6_cidr_block==\"::/0\") | select(.rule_action==\"deny\") | .rule_number))) then [.] else [] end' | flatten" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt the Zero Trust security model. Regularly scan for and amend overly permissive or unrestricted network ACLs. Limit access to the minimum ports needed to perform necessary functions.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" }, - "internal_notes": "The acl entries have to be interpreted in order. idea: if (no_deny_entry or (deny_entry_number > allow_entry_number)) then alert" + "internal_notes": "ACL entries must be interpreted in sequential order. The alert triggers if there is no deny entry or if the deny entry number is greater than the allow entry number.", + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/network-acl-nacl-ec2/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Netzwerk-ACLs den eingehenden Datenverkehr von ::/0 zum Microsoft RDP-Anschluss 3389 nicht zulassen", + "risk": "Das Offenlassen von Netzwerk-ACLs kann bekannte und sensible Ports für unbefugte Benutzer oder bösartige Software freigeben. Dies kann zu potenziellen Eindringlingen führen, selbst wenn eine Perimeter-Firewall bereits vorhanden ist.", + "remediation": "Übernehmen Sie das Sicherheitsmodell des Zero Trust. Überprüfen Sie regelmäßig, ob zu großzügige oder uneingeschränkte Netzwerk-ACLs vorliegen, und beheben Sie diese. Begrenzen Sie den Zugriff auf die minimal benötigten Ports, um erforderliche Funktionen durchzuführen." + } + } }, { "name": "allow_ingress_from_internet_to_any_port", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to any port.", + "title": "Ensure that no security groups permit ingress from 0.0.0.0/0 or ::/0 to any port.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Inadequate configuration of Security Groups can significantly expand the attack surface, leaving vital resources vulnerable to unauthorized access and potential network intrusion.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{ip_protocol=\"-1\" and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Embrace the Zero Trust model. Consistently audit and rectify unrestricted or too lenient network Access Control Lists (ACLs). Ensure to define the minimum required ports with access to minimize potential breaches.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf jeden Port erlauben.", + "risk": "Eine unzureichende Konfiguration der Sicherheitsgruppen kann die Angriffsfläche erheblich erweitern und wichtige Ressourcen anfällig für unbefugten Zugriff und potenzielle Netzwerkeindringungen machen.", + "remediation": "Befolgen Sie das Zero-Trust-Modell. Überprüfen Sie konsequent und korrigieren Sie uneingeschränkte oder zu nachlässige Netzwerk-Zugriffssteuerungslisten (ACLs). Stellen Sie sicher, dass nur die minimal erforderlichen Ports Zugriff haben, um potenzielle Sicherheitslücken zu minimieren." + } } }, { "name": "allow_ingress_from_internet_to_any_port_ipv4", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 to any port.", + "title": "Ensure No Security Groups Allow Ingress from 0.0.0.0/0 to Any Port.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "An improperly configured security group can lead to exposure of all ports to the internet, vastly expanding the attack surface. Such positions can result in significant system vulnerability and potential breaches.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{ip_protocol=\"-1\" and ip_ranges[*].cidr_ip=\"0.0.0.0/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", - "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + "text": "Adopt a Zero Trust network model. Implement regular scans and remediations for unrestricted or overly permissive network ACLs, and restrict ingress to minimum necessary ports. For outbound traffic, use whitelist approach to limit the exposed interfaces.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 auf beliebige Ports erlauben.", + "risk": "Eine fehlerhaft konfigurierte Sicherheitsgruppe kann dazu führen, dass alle Ports für das Internet freigegeben werden, was die Angriffsfläche erheblich erweitert. Solche Positionen können zu erheblichen Systemrisiken und potenziellen Verstößen führen.", + "remediation": "Etablieren Sie ein Netzwerkmodell ohne Vertrauen (Zero-Trust). Führen Sie regelmäßige Scans und Maßnahmen zur Behebung von uneingeschränkten oder zu weitreichenden Netzwerk-ACLs durch und beschränken Sie den Zugriff auf die minimal erforderlichen Ports. Für ausgehenden Traffic verwenden Sie einen Whitelist-Ansatz, um die freigegebenen Schnittstellen zu begrenzen." + } } }, { "name": "allow_ingress_from_internet_to_any_port_ipv6", - "title": "Ensure no security groups allow ingress from ::/0 to any port.", + "title": "Ensure that no security groups are allowing ingress from ::/0 to any port in your VPC.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "A misconfigured security group, specifically allowing traffic from all IPv6 addresses (::/0) to any port, significantly widens your attack surface. This can lead to increased vulnerabilities, unwarranted access or potential data breaches.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{ip_protocol=\"-1\" and ipv6_ranges[*].cidr_ipv6=\"::/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adapt a Zero Trust approach. Start by regularly auditing your security groups for overly permissive rules. Limit the traffic to the minimum required ports and restrict ingress from specific IP addresses or ranges.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen Ingress vom ::/0 zu einem beliebigen Port in Ihrem VPC zulassen.", + "risk": "Eine fehlerhaft konfigurierte Sicherheitsgruppe, die speziell den Datenverkehr von allen IPv6-Adressen (::/0) zu jedem Port erlaubt, erweitert erheblich Ihre Angriffsfläche. Dies kann zu erhöhten Sicherheitslücken, unberechtigtem Zugriff oder potenziellen Datenlecks führen.", + "remediation": "Passen Sie den Zero Trust-Ansatz an. Beginnen Sie damit, Ihre Sicherheitsgruppen regelmäßig auf zu weitreichende Regeln zu überprüfen. Begrenzen Sie den Datenverkehr auf die minimal erforderlichen Ports und beschränken Sie den Ingress auf spezifische IP-Adressen oder Bereiche." + } } }, { "name": "allow_ingress_from_internet_to_port_mongodb_27017_27018", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to MongoDB ports 27017 and 27018.", + "title": "Ensure no security groups permit ingress from 0.0.0.0/0 or ::/0 to MongoDB ports 27017 and 27018.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Improper configuration of Security groups can significantly increase the attack surface, potentially leading to unauthorized data access or system breaches.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=27017 and to_port<=27017 and ip_protocol=tcp) or (from_port>=27018 and to_port<=27018 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt a Zero Trust model. Regularly scan and rectify unrestricted or overly broad network acls. Best practice recommends defining the minimal necessary ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf die MongoDB-Ports 27017 und 27018 erlauben.", + "risk": "Die fehlerhafte Konfiguration von Sicherheitsgruppen kann die Angriffsfläche erheblich erhöhen und möglicherweise zu unberechtigtem Datenzugriff oder Systemverletzungen führen.", + "remediation": "Verwenden Sie das Zero-Trust-Modell. Scannen Sie regelmäßig und beheben Sie uneingeschränkte oder zu weitreichende Netzwerk-ACLs. Bewährte Praxis empfiehlt, die minimal benötigten Ports zu definieren." + } } }, { "name": "allow_ingress_from_internet_to_ssh_port_22", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to SSH port 22.", + "title": "Ensure no security groups permit ingress from 0.0.0.0/0 or ::/0 to SSH port 22.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Failure to properly configure Security groups expands the attack surface, exposing the infrastructure to higher risk of intrusion and cyber attacks.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=22 and to_port<=22 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", - "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + "text": "Adhere to Zero Trust principles. Establish routine scanning and remediation of unrestricted or excessively permissive network acls. Ensure only the necessary minimum ports are open to fulfill operational requirements.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Appendix_NACLs.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf den SSH-Port 22 erlauben.", + "risk": "Fehlerhafte Konfiguration von Sicherheitsgruppen erweitert die Angriffsfläche und erhöht das Risiko von Eindringlingen und Cyberangriffen.", + "remediation": "Befolgen Sie die Prinzipien des Zero Trust. Führen Sie regelmäßige Scans und die Behebung von uneingeschränkten oder übermäßig freizügigen Netzwerk-ACLs durch. Stellen Sie sicher, dass nur die notwendigen Mindestports geöffnet sind, um die betrieblichen Anforderungen zu erfüllen." + } } }, { "name": "allow_ingress_from_internet_to_ssh_port_22_ipv4", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 to SSH port 22.", + "title": "Ensure that no security groups permit unrestricted ingress from 0.0.0.0/0 to SSH port 22.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "The risk of system breaches is heightened if security groups are not correctly configured. Failures in these configurations inevitably expand the attack surface, increasing vulnerability to cyber threats.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=22 and to_port<=22 and ip_protocol=tcp)) and ip_ranges[*].cidr_ip=\"0.0.0.0/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adoption of the Zero Trust strategy is imperative. Initiate a systematic scan to detect and remediate unrestricted or excessively permissive network acls. As a best practice, limit the definition to the minimum ports required to significantly boost security.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen einen uneingeschränkten Zugriff von 0.0.0.0/0 auf den SSH-Port 22 erlauben.", + "risk": "Das Risiko von Systemeindringen erhöht sich, wenn Sicherheitsgruppen nicht korrekt konfiguriert sind. Fehler in diesen Konfigurationen erweitern zwangsläufig die Angriffsfläche und erhöhen die Anfälligkeit für Cyberbedrohungen.", + "remediation": "Die Einführung der Zero-Trust-Strategie ist unerlässlich. Führen Sie systematische Scans durch, um netzwerkweite Zugriffssteuerungslisten ohne Beschränkungen oder mit übermäßig hohen Berechtigungen zu erkennen und zu beheben. Als bewährte Praxis beschränken Sie die Definition auf die minimal erforderlichen Ports, um die Sicherheit erheblich zu verbessern." + } } }, { "name": "allow_ingress_from_internet_to_ssh_port_22_ipv6", - "title": "Ensure no security groups allow ingress from ::/0 to SSH port 22.", + "title": "Ensure No Security Groups Allow Ingress From ::/0 to SSH Port 22 via IPV6.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Insecure configuration can potentially expose the system to unauthorized access and attacks from any location, risking infrastructure security and data compromise.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=22 and to_port<=22 and ip_protocol=tcp)) and ipv6_ranges[*].cidr_ipv6=\"::/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", - "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + "text": "Adhere to the principles of 'Zero Trust'. Limit allowed ingress by specifying trusted CIDRs, and revoking unnecessary privileges. Refine security groups to minimize exposure by only allowing required ports.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen Ingress von ::/0 zum SSH-Port 22 über IPV6 erlauben.", + "risk": "Eine unsichere Konfiguration kann das System potenziell unautorisiertem Zugriff und Angriffen aus jedem Ort aussetzen, wodurch die Sicherheit der Infrastruktur und die Kompromittierung von Daten gefährdet werden.", + "remediation": "Befolgen Sie die Grundsätze des 'Zero Trust'. Begrenzen Sie den erlaubten Ingress, indem Sie vertrauenswürdige CIDRs festlegen und unnötige Berechtigungen widerrufen. Verfeinern Sie Sicherheitsgruppen, um die Exposition zu minimieren, indem Sie nur erforderliche Ports zulassen." + } } }, { "name": "allow_ingress_from_internet_to_ftp_port_20_21", "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to FTP ports 20 or 21.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Non-restrictive security group configurations can cause an increase in the attack surface for cyberthreats. This can potentially lead to unauthorized access, data breaches, and other security incidents.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=20 and to_port<=20 and ip_protocol=tcp) or (from_port>=21 and to_port<=21 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt a Zero Trust security strategy. Conduct regular audits to identify and restrict any overly permissive network access control lists (ACLs). Strive to minimize the number of necessary ports and impose strict access rules for each.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#AddRemoveRule", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Internetzugriff von 0.0.0.0/0 oder ::/0 auf FTP-Ports 20 oder 21 ermöglichen.", + "risk": "Nicht restriktive Sicherheitsgruppeneinstellungen können die Angriffsfläche für Cyberrisiken erhöhen. Dies kann zu unbefugtem Zugriff, Datenverletzungen und anderen Sicherheitsvorfällen führen.", + "remediation": "Verfolgen Sie eine Sicherheitsstrategie des Zero Trust. Führen Sie regelmäßige Überprüfungen durch, um jeglichen übermäßig freizügigen Netzwerkzugriff einzuschränken. Streben Sie an, die Anzahl der erforderlichen Ports zu minimieren und strenge Zugriffsregeln für jeden Port festzulegen." + } } }, { "name": "allow_ingress_from_internet_to_rdp_port_3389", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to port 3389.", + "title": "Ensure that security groups don't permit ingress from 0.0.0.0/0 or ::/0 to port 3389.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Improper configuration of security groups leading to permitting ingress from 0.0.0.0/0 and ::/0 to port 3389 can significantly increase the attack surface by potentially exposing resources to irrelevant or malicious traffic.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=3389 and to_port<=3389 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt a Zero Trust Approach and ensure that security groups are properly configured to restrict unnecessary ingress. Implement processes to frequently scan and remediate any unrestricted or overly permissive network ACLs. Best practice is to restrict ingress to the least number of ports necessary.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/security-group-rules-conflict/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Sicherheitsgruppen keinen Ingress vom 0.0.0.0/0 oder ::/0 auf Port 3389 erlauben.", + "risk": "Eine fehlerhafte Konfiguration von Sicherheitsgruppen, die Ingress vom 0.0.0.0/0 und ::/0 auf Port 3389 ermöglicht, kann die Angriffsfläche erheblich erhöhen, indem potenziell irrelevanter oder bösartiger Traffic freigegeben wird.", + "remediation": "Verfolgen Sie einen Zero-Trust-Ansatz und stellen Sie sicher, dass Sicherheitsgruppen ordnungsgemäß konfiguriert sind, um unnötigen Ingress einzuschränken. Implementieren Sie Prozesse, um ungeeignete oder übermäßig großzügige Netzwerk-ACLs regelmäßig zu überprüfen und zu beheben. Es empfiehlt sich, Ingress auf die kleinste Anzahl von erforderlichen Ports zu beschränken." + } } }, { "name": "allow_ingress_from_internet_to_rdp_port_3389_ipv4", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 to port 3389.", + "title": "Ensure Security Groups do not Allow Ingress from 0.0.0.0/0 to TCP Port 3389.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "An improperly configured Security Group, allowing open ingress to port 3389, increases the attack surface making your cloud infrastructure more vulnerable to potential security threats or breaches.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=3389 and to_port<=3389 and ip_protocol=tcp)) and ip_ranges[*].cidr_ip=\"0.0.0.0/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adopt the Zero Trust model. Regularly scan and make necessary amendments to eliminate unrestricted or overly permissive network ACLs. Minimise the attack surface by defining the exact ports required, rather than leaving ports unnecessarily open.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Security Groups keinen Zugriff von 0.0.0.0/0 auf den TCP-Port 3389 erlauben.", + "risk": "Eine fehlerhaft konfigurierte Security Group, die offenen Zugriff auf den Port 3389 erlaubt, erhöht die Angriffsfläche und macht Ihre Cloud-Infrastruktur anfälliger für potenzielle Sicherheitsbedrohungen oder Angriffe.", + "remediation": "Übernehmen Sie das Zero Trust-Modell. Scannen Sie regelmäßig und nehmen Sie erforderliche Änderungen vor, um uneingeschränkte oder übermäßig zugriffsberechtigte Netzwerk-ACLs zu beseitigen. Minimieren Sie die Angriffsfläche, indem Sie die genauen Ports definieren, die benötigt werden, anstatt Ports unnötig offen zu lassen." + } } }, { "name": "allow_ingress_from_internet_to_rdp_port_3389_ipv6", - "title": "Ensure no security groups allow ingress from ::/0 to port 3389.", + "title": "Ensure that no security groups permit ingress from ::/0 to port 3389.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Unsecured security group configurations could potentially enlarge the attack surface, making your cloud infrastructure more vulnerable to malicious attacks and unauthorised access attempts.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=3389 and to_port<=3389 and ip_protocol=tcp)) and ipv6_ranges[*].cidr_ipv6=\"::/0\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Adhere to the principles of Zero Trust approach. Conduct regular scans and fix any unrestricted or overly permissive network ACLs. Narrow down the rules to include only the necessary ports, thereby minimising potential risks.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von ::/0 auf Port 3389 gestatten.", + "risk": "Unsichere Sicherheitsgruppeneinstellungen könnten die Angriffsfläche vergrößern und Ihre Cloud-Infrastruktur anfälliger für bösartige Angriffe und unbefugte Zugriffsversuche machen.", + "remediation": "Halten Sie sich an die Prinzipien des Zero-Trust-Ansatzes. Führen Sie regelmäßige Scans durch und beheben Sie alle nicht eingeschränkten oder zu weit geöffneten Netzwerk-ACLs. Beschränken Sie die Regeln auf die notwendigen Ports, um potenzielle Risiken zu minimieren." + } } }, { "name": "allow_ingress_from_internet_to_cassandra_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Cassandra ports 7199 or 9160 or 8888.", + "title": "Ensure Ingress to Cassandra Ports 7199, 9160 and 8888 is Not Allowed From 0.0.0.0/0 or ::/0", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Inappropriate configuration of security groups can significantly increase the attack surface. Publicly exposed Cassandra ports can open a vector for potential cyber-attacks that may compromise data integrity.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=9160 and to_port<=9160 and ip_protocol=tcp) or (from_port>=8888 and to_port<=8888 and ip_protocol=tcp) or (from_port>=7199 and to_port<=7199 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Implement Zero Trust networking model. Regularly scan and remediate unrestricted or overly permissive network access control lists (ACLs). It's a best practice to minimize the attack surface by restricting ports to the bare minimum required for functioning.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Zugriff auf die Cassandra-Ports 7199, 9160 und 8888 von 0.0.0.0/0 oder ::/0 nicht erlaubt ist", + "risk": "Eine unangemessene Konfiguration der Sicherheitsgruppen kann die Angriffsfläche erheblich vergrößern. Öffentlich zugängliche Cassandra-Ports können einen Angriffsvektor darstellen, der die Integrität der Daten gefährden kann.", + "remediation": "Implementieren Sie das Zero-Trust-Netzwerkmodell. Scannen und beheben Sie regelmäßig uneingeschränkte oder übermäßig zugängliche Netzwerkzugriffssteuerungslisten (Access Control Lists, ACLs). Es ist bewährte Praxis, die Angriffsfläche zu minimieren, indem Sie die Ports auf das absolute Minimum beschränken, das für den reibungslosen Betrieb erforderlich ist." + } } }, { "name": "allow_ingress_from_internet_to_kibana_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Elasticsearch/Kibana ports.", + "title": "Ensure Security Groups Disallow Ingress from 0.0.0.0/0 or ::/0 to Elasticsearch/Kibana Ports", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Unregulated ingress into Elasticsearch/Kibana ports increases attack surface, leading to potential unauthorized access and data breaches, which directly undermines system security and compliance.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=9200 and to_port<=9200 and ip_protocol=tcp) or (from_port>=9300 and to_port<=9300 and ip_protocol=tcp) or (from_port>=5601 and to_port<=5601 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Implement the Zero Trust model. Ensure frequent scanning and remediation of insecure or overly permissive network ACLs. It is recommended to limit the scope to the minimum necessary ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.RDSSecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Security Groups den Eingriff von 0.0.0.0/0 oder ::/0 in die Elasticsearch/Kibana-Ports untersagen", + "risk": "Ein ungeregelter Eingriff in die Elasticsearch/Kibana-Ports erhöht die Angriffsfläche und führt zu potenziell unbefugtem Zugriff und Datenverletzungen, was die Systemsecurity und Compliance unmittelbar untergräbt.", + "remediation": "Implementieren Sie das Zero Trust-Modell. Stellen Sie eine regelmäßige Überprüfung und Behebung unsicherer oder übermäßig freizügiger Netzwerk-ACLs sicher. Es wird empfohlen, den Umfang auf die minimal notwendigen Ports zu beschränken." + } } }, { "name": "allow_ingress_from_internet_to_kafka_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Kafka ports.", + "title": "Ensure that security groups restrict ingress from 0.0.0.0/0 or ::/0 to Kafka ports.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "A lack of properly configured security groups broadens your attack surface. This increases the risk of unauthorized access to your AWS infrastructure and data breaches.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=9092 and to_port<=9092 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Leverage the Zero Trust approach. Initiate a process to regularly scan and patch unrestricted or overly permissive network access control lists (ACLs). Best practice dictates narrowing the definition to the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf Kafka-Ports einschränken.", + "risk": "Ein Mangel an ordnungsgemäß konfigurierten Sicherheitsgruppen erhöht Ihre Angriffsfläche. Dadurch steigt das Risiko unbefugten Zugriffs auf Ihre AWS-Infrastruktur und Datenverletzungen.", + "remediation": "Nutzen Sie den Zero-Trust-Ansatz. Initiieren Sie regelmäßige Scans und Patching von uneingeschränkten oder übermäßig großzügigen Netzwerkzugriffskontrolllisten (ACLs). Die bewährte Praxis besteht darin, die Definition auf die erforderlichen Mindestports zu beschränken." + } } }, { "name": "allow_ingress_from_internet_to_memcached_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Memcached ports.", + "title": "Ensure security groups do not permit ingress from 0.0.0.0/0 or ::/0 to Memcached ports 11211.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Allowing unrestricted access to Memcached ports increases the risk of unauthorized data access and potential DDoS attacks. Ensuring proper configurations is fundamental for defending the AWS infrastructure.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=11211 and to_port<=11211 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Security groups should be refined to disregard traffic from 0.0.0.0/0 or ::/0 to Memcached ports. Embrace a Zero Trust approach and impose stricter network ACLs that only allow minimum required port access as part of a routine scanning and remediation process.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#VPCSecurityGroups", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Sicherheitsgruppen keinen Zugriff von 0.0.0.0/0 oder ::/0 auf Memcached Ports 11211 erlauben.", + "risk": "Die Erlaubnis eines unbeschränkten Zugriffs auf Memcached Ports erhöht das Risiko eines unberechtigten Datenzugriffs und potenzieller DDoS-Angriffe. Eine ordnungsgemäße Konfiguration ist entscheidend für die Verteidigung der AWS-Infrastruktur.", + "remediation": "Sicherheitsgruppen sollten so angepasst werden, dass der Datenverkehr von 0.0.0.0/0 oder ::/0 zu Memcached Ports blockiert wird. Verfolgen Sie einen Zero Trust Ansatz und setzen Sie strengere Netzwerk-ACLs ein, die nur den minimal erforderlichen Portzugriff erlauben. Dies sollte Teil eines regelmäßigen Scanning- und Remediation-Prozesses sein." + } } }, { "name": "allow_ingress_from_internet_to_mysql_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to MySQL ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to MySQL ports", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "If security groups are not properly configured, the attack surface is increased. This allows unauthorized access to MySQL ports, potentially compromising sensitive data and affecting the availability of the service.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=3306 and to_port<=3306 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", - "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + "text": "To fix this issue, implement a Zero Trust approach by applying the principle of least privilege. Create explicit security group rules that only allow necessary inbound traffic to MySQL ports. Regularly scan and remediate any unrestricted or overly permissive network ACLs. Refer to the AWS documentation for recommended best practices.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html#nacls-recommendations" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf MySQL-Ports erlauben", + "risk": "Wenn Sicherheitsgruppen nicht ordnungsgemäß konfiguriert sind, wird die Angriffsfläche erhöht. Dadurch wird unbefugter Zugriff auf MySQL-Ports ermöglicht, was potenziell sensible Daten gefährdet und die Verfügbarkeit des Dienstes beeinträchtigt.", + "remediation": "Um dieses Problem zu beheben, implementieren Sie einen Ansatz des 'Zero Trust', indem Sie das Prinzip des geringsten Privilegs anwenden. Erstellen Sie explizite Sicherheitsgruppenregeln, die nur den erforderlichen eingehenden Datenverkehr zu MySQL-Ports zulassen. Scannen und beheben Sie regelmäßig alle uneingeschränkten oder übermäßig freizügigen Netzwerk-ACLs. Lesen Sie die AWS-Dokumentation für empfohlene bewährte Verfahren." + } } }, { "name": "allow_ingress_from_internet_to_oracledb", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to OracleDB ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to OracleDB ports", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Improperly configured security groups increase the attack surface, making the infrastructure vulnerable.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=1521 and to_port<=1521 and ip_protocol=tcp) or (from_port>=2483 and to_port<=2483 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "To fix this issue, it is recommended to apply a Zero Trust approach, implement a process for scanning and remediating unrestricted or overly permissive network ACLs. It is also advised to narrow the definition for the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den eingehenden Verkehr von 0.0.0.0/0 oder ::/0 zu den OracleDB-Ports erlauben", + "risk": "Fehlkonfigurierte Sicherheitsgruppen erhöhen die Angriffsfläche und machen die Infrastruktur anfällig.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, einen Zero-Trust-Ansatz anzuwenden, einen Prozess zur Überprüfung und Behebung von uneingeschränkten oder übermäßig unberechtigten Netzwerk-ACLs zu implementieren. Es wird außerdem empfohlen, die Definition für die minimal erforderlichen Ports einzuschränken." + } } }, { "name": "allow_ingress_from_internet_to_postgresql_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to PostgresQL ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to PostgreSQL ports.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Allowing ingress from 0.0.0.0/0 or ::/0 to PostgreSQL ports increases the attack surface and can expose sensitive data.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=5432 and to_port<=5432 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Implement a Zero Trust approach by scanning and remediating unrestricted or overly permissive network ACLs. Apply recommended best practices by narrowing the definition for the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Eingriff von 0.0.0.0/0 oder ::/0 auf PostgreSQL-Ports erlauben.", + "risk": "Das Zulassen von Eingriffen von 0.0.0.0/0 oder ::/0 auf PostgreSQL-Ports erhöht die Angriffsfläche und kann sensible Daten preisgeben.", + "remediation": "Implementieren Sie einen Zero-Trust-Ansatz durch Scannen und Beheben von uneingeschränkten oder übermäßig genehmigten Netzwerk-ACLs. Wenden Sie empfohlene bewährte Methoden an, indem Sie die Definition für die erforderlichen Mindestports einschränken." + } } }, { "name": "allow_ingress_from_internet_to_redis_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Redis ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Redis ports", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "If security groups are not properly configured, the attack surface is increased. Unauthorized access from the internet can lead to data breaches and security incidents.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=6379 and to_port<=6379 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Apply a Zero Trust approach to security. Implement a process to scan and remediate unrestricted or overly permissive network ACLs. It is recommended to define specific rules and narrow down the allowed ingress IP ranges to minimize the attack surface.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf Redis-Ports ermöglichen", + "risk": "Wenn Sicherheitsgruppen nicht ordnungsgemäß konfiguriert sind, erhöht sich die Angriffsfläche. Nicht autorisierter Zugriff aus dem Internet kann zu Datenverstößen und Sicherheitsvorfällen führen.", + "remediation": "Wenden Sie einen Zero-Trust-Ansatz für die Sicherheit an. Implementieren Sie einen Prozess zur Überprüfung und Behebung von uneingeschränkten oder übermäßig freizügigen Netzwerk-ACLs. Es wird empfohlen, spezifische Regeln festzulegen und die erlaubten Eingangs-IP-Bereiche einzugrenzen, um die Angriffsfläche zu minimieren." + } } }, { "name": "allow_ingress_from_internet_to_sql_server_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to SQLServer ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to SQL Server ports.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "If security groups are not properly configured, the attack surface is significantly increased.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=1433 and to_port<=1433 and ip_protocol=tcp) or (from_port>=1434 and to_port<=1434 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "Implement a Zero Trust approach. Scan and remediate unrestricted or overly permissive network ACLs. It is recommended to narrow the definition for the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf SQL Server-Ports erlauben.", + "risk": "Wenn Sicherheitsgruppen nicht ordnungsgemäß konfiguriert sind, wird die Angriffsfläche erheblich erhöht.", + "remediation": "Implementieren Sie einen Zero Trust-Ansatz. Scannen und beheben Sie unbeschränkte oder übermäßig weitreichende Netzwerk-ACLs. Es wird empfohlen, die Definition für die erforderlichen Mindestports zu reduzieren." + } } }, { "name": "allow_ingress_from_internet_to_telnet_ports", - "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Telnet ports.", + "title": "Ensure no security groups allow ingress from 0.0.0.0/0 or ::/0 to Telnet ports", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Failing to properly configure security groups increases the attack surface.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].{(ip_protocol=-1 or (from_port>=23 and to_port<=23 and ip_protocol=tcp)) and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "To fix this issue, apply a Zero Trust approach by implementing a process to scan and remediate unrestricted or overly permissive network ACLs. It is recommended to narrow the definition for the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Sicherheitsgruppen den Zugriff von 0.0.0.0/0 oder ::/0 auf Telnet-Ports erlauben", + "risk": "Eine falsche Konfiguration von Sicherheitsgruppen erhöht die Angriffsfläche.", + "remediation": "Um das Problem zu beheben, verwenden Sie einen Zero-Trust-Ansatz, indem Sie einen Prozess implementieren, um nicht eingeschränkte oder zu großzügige Netzwerk-ACLs zu überprüfen und zu beheben. Es wird empfohlen, die Definition der erforderlichen Mindest-Ports zu verengen." + } } }, { "name": "wide_open_ipv4_security_group", - "title": "Security group is configured wide open.", + "title": "Ensure security group is properly configured to reduce attack surface.", "result_kinds": ["aws_ec2_security_group"], - "categories": [ "security", "compliance" ], - "risk": "If Security groups are not properly configured the attack surface is increased.", + "categories": ["security", "compliance"], + "risk": "Improperly configured security groups increase the attack surface, making the AWS cloud infrastructure vulnerable.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and group_ip_permissions[*].ip_ranges[*].{cidr_ip!=\"0.0.0.0/0\" and cidr_ip!~\"^192\" and cidr_ip!~\"^10\" and cidr_ip~\"\\/(([1-9])|(1[0-9])|(2[0-3]))$\"}" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "To fix the issue, implement a Zero Trust approach and establish a process to regularly scan and remediate unrestricted or overly permissive network access control lists. It is recommended to narrow the definition of the minimum required ports.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html", + "localizations": { + "de": { + "title": "Sicherheitsgruppe korrekt konfigurieren, um die Angriffsfläche zu reduzieren.", + "risk": "Falsch konfigurierte Sicherheitsgruppen erhöhen die Angriffsfläche und machen die AWS Cloud-Infrastruktur anfällig.", + "remediation": "Um das Problem zu beheben, implementieren Sie einen Zero-Trust-Ansatz und etablieren Sie einen Prozess, um uneingeschränkte oder übermäßig freizügige Netzwerkzugriffskontrolllisten regelmäßig zu überprüfen und zu beheben. Es wird empfohlen, die Definition der erforderlichen Ports zu verengen." + } } }, { "name": "default_restrict_traffic", - "title": "Ensure the default security group of every VPC restricts all traffic.", + "title": "Ensure that the default security group of every VPC restricts all traffic", "result_kinds": ["aws_vpc"], - "categories": [ "security", "compliance" ], - "risk": "Even having a perimeter firewall, having security groups open allows any user or malware with vpc access to scan for well known and sensitive ports and gain access to instance.", + "categories": ["security", "compliance"], + "risk": "Leaving security groups open allows any user or malware with VPC access to scan for well-known and sensitive ports, potentially gaining unrestricted access to instances.", "severity": "high", "detect": { "resoto": "is(aws_ec2_security_group) and name=\"default\" and group_ip_permissions[*].{ip_protocol=\"-1\" and (ip_ranges[*].cidr_ip=\"0.0.0.0/0\" or ipv6_ranges[*].cidr_ipv6=\"::/0\")} <-- is(aws_vpc)" }, "remediation": { - "text": "Apply Zero Trust approach. Implement a process to scan and remediate unrestricted or overly permissive network acls. Recommended best practices is to narrow the definition for the minimum ports required.", + "text": "To address this issue, apply a Zero Trust approach and implement a process to regularly scan and remediate any unrestricted or overly permissive network ACLs. It is recommended to narrow the definition for the minimum ports required.", "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html" + }, + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Standard-Sicherheitsgruppe jeder VPC den gesamten Datenverkehr einschränkt", + "risk": "Das Offenlassen von Sicherheitsgruppen ermöglicht es jedem Benutzer oder Schadprogramm mit VPC-Zugriff, bekannte und sensible Ports zu scannen und möglicherweise uneingeschränkten Zugriff auf Instanzen zu erhalten.", + "remediation": "Um dieses Problem zu beheben, wenden Sie einen Zero-Trust-Ansatz an und implementieren Sie einen Prozess, um regelmäßig nicht eingeschränkte oder zu weitreichende Netzwerk-ACLs zu scannen und zu beheben. Es wird empfohlen, die Definition für die erforderlichen Mindestports einzugrenzen." + } } }, { "name": "routing_tables_with_least_privilege", - "title": "Ensure routing tables for VPC peering are least access.", + "title": "Ensure VPC peering routing tables have least access.", "result_kinds": ["aws_vpc"], - "categories": [ "security", "compliance" ], - "risk": "Being highly selective in peering routing tables is a very effective way of minimizing the impact of breach as resources outside of these routes are inaccessible to the peered VPC.", + "categories": ["security", "compliance"], + "risk": "By being highly selective in peering routing tables, the impact of a breach can be minimized as resources outside of these routes will be inaccessible to the peered VPC.", "severity": "medium", "detect": { "resoto_cmd": "search is(aws_vpc_peering_connection) {/vpc: <-- is(aws_vpc), /route_tables[]: <-- is(aws_vpc) --> is(aws_ec2_route_table)} | jq --no-rewrite 'if [.route_tables[]?.reported.route_table_routes[]? | select(.origin!=\"CreateRouteTable\") | (.destination_cidr_block==\"0.0.0.0/0\") or (.destination_cidr_block==.reported.connection_accepter_vpc_info.cidr_block) or (.destination_cidr_block==.reported.connection_requester_vpc_info.cidr_block)] | any then [.vpc] else [] end' | flatten" }, "remediation": { - "text": "Review routing tables of peered VPCs for whether they route all subnets of each VPC and whether that is necessary to accomplish the intended purposes for peering the VPCs.", + "text": "To fix this issue, review the routing tables of the peered VPCs and determine whether they route all subnets of each VPC and whether such routing is necessary for the intended purposes of peering the VPCs.", "url": "https://docs.aws.amazon.com/vpc/latest/peering/peering-configurations-partial-access.html" }, - "internal_notes": "Load peering connections and merge vpc and route tables. Then check if any route table cidr is 0.0.0.0/0 or the same as requester cidr or accepter cidr." + "internal_notes": "Load peering connections and merge VPC and route tables. Then check if any route table CIDR is set to 0.0.0.0/0 or the same as the requester or accepter CIDR.", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass VPC-Peering-Routing-Tabellen den geringsten Zugriff haben.", + "risk": "Durch eine hohe Selektivität bei den Peering-Routing-Tabellen kann der Schaden eines Sicherheitsverstoßes minimiert werden, da Ressourcen außerhalb dieser Routen für das gepeerte VPC nicht zugänglich sind.", + "remediation": "Um dieses Problem zu beheben, überprüfen Sie die Routing-Tabellen der gepeerten VPCs und prüfen Sie, ob sie alle Subnetze jedes VPC routen und ob eine solche Weiterleitung für den beabsichtigten Zweck des Peering der VPCs erforderlich ist." + } + } }, { "name": "volume_not_encrypted", - "title": "Ensure there are no EBS Volumes unencrypted.", + "title": "Ensure all EBS Volumes are encrypted.", "result_kinds": ["aws_ec2_volume"], - "categories": [ "security", "compliance" ], - "risk": "Data encryption at rest prevents data visibility in the event of its unauthorized access or theft.", + "categories": ["security", "compliance"], + "risk": "Failure to encrypt EBS volumes may lead to unauthorized access or theft of data.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_volume) and volume_encrypted=false" }, "remediation": { - "text": "Encrypt all EBS volumes and Enable Encryption by default You can configure your AWS account to enforce the encryption of the new EBS volumes and snapshot copies that you create. For example; Amazon EBS encrypts the EBS volumes created when you launch an instance and the snapshots that you copy from an unencrypted snapshot.", + "text": "To fix the issue, encrypt all EBS volumes and enable encryption by default. You can enforce encryption of new EBS volumes and snapshot copies in your AWS account. Amazon EBS automatically encrypts volumes created when launching an instance and copying from an unencrypted snapshot.", "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass alle EBS-Volumes verschlüsselt sind.", + "risk": "Das Fehlen der Verschlüsselung von EBS-Volumes kann zu unbefugtem Zugriff oder Diebstahl von Daten führen.", + "remediation": "Um das Problem zu beheben, verschlüsseln Sie alle EBS-Volumes und aktivieren Sie die standardmäßige Verschlüsselung. Sie können die Verschlüsselung neuer EBS-Volumes und Snapshot-Kopien in Ihrem AWS-Konto erzwingen. Amazon EBS verschlüsselt automatisch Volumes, die beim Start einer Instanz erstellt werden, sowie Kopien von unverschlüsselten Snapshots." + } } }, { "name": "vpc_flow_logs_enabled", - "title": "Ensure VPC Flow Logging is Enabled in all VPCs.", + "title": "Ensure VPC Flow Logging is Enabled in all VPCs", "result_kinds": ["aws_vpc"], - "categories": [ "security", "compliance" ], - "risk": "VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.", + "categories": ["security", "compliance"], + "risk": "Failure to enable VPC Flow Logs may result in a lack of visibility into network traffic, making it difficult to detect and respond to anomalous activity, compromising security measures.", "severity": "medium", "detect": { "resoto": "is(aws_vpc) with(empty, --> is(aws_ec2_flow_log))" }, "remediation": { - "text": "It is recommended that VPC Flow Logs be enabled for packet Rejects for VPCs.", - "url": "http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html" + "text": "To fix this issue, it is recommended to enable VPC Flow Logs for packet rejects in all VPCs.", + "url": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html" + }, + "url": "https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass VPC-Flussprotokollierung in allen VPCs aktiviert ist", + "risk": "Das Nichtaktivieren von VPC-Flussprotokollen kann zu einer fehlenden Sichtbarkeit des Netzwerkverkehrs führen, was die Erkennung und Reaktion auf abweichende Aktivitäten erschwert und die Sicherheitsmaßnahmen gefährdet.", + "remediation": "Um dieses Problem zu beheben, wird empfohlen, die VPC-Flussprotokollierung für verworfene Pakete in allen VPCs zu aktivieren." + } } }, { - "name": "instance_imdsv2_enabled", - "title": "Ensure that EC2 Metadata Service only allows IMDSv2", + "name": "instance_uses_imdsv2", + "title": "Ensure EC2 Metadata Service is configured to only allow IMDSv2", "result_kinds": ["aws_ec2_instance"], - "categories": [ "security", "compliance" ], - "risk": "Allowing Version 1 of the service may open EC2 instances to Server-Side Request Forgery (SSRF) attacks, so Amazon recommends utilizing Version 2 for better instance security.", + "categories": ["security", "compliance"], + "risk": "Allowing Version 1 of the service may open EC2 instances to Server-Side Request Forgery (SSRF) attacks, putting the instances at risk. It is recommended to utilize Version 2 for better instance security.", "severity": "medium", "detect": { "resoto": "is(aws_ec2_instance) and not instance_metadata_options.{(http_endpoint=enabled and http_tokens=required) or http_endpoint=disabled}" }, "remediation": { - "text": "Login to AWS Management Console and open the Amazon EC2 console using https://console.aws.amazon.com/ec2/ Under the Instances menu, select Instances. For each Instance, select the instance, then choose Actions > Modify instance metadata options. If the Instance metadata service is enabled, set IMDSv2 to Required.", - "awscli": "aws ec2 modify-instance-metadata-options --instance-id --http-tokens required", - "url": "https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/:https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html" + "text": "To fix the issue, login to the AWS Management Console and open the Amazon EC2 console using the link: https://console.aws.amazon.com/ec2/. Then, navigate to the Instances menu and select Instances. For each instance, choose Actions > Modify instance metadata options. If the Instance metadata service is enabled, set IMDSv2 to Required.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-metadata-transition-to-version-2.html", + "action": { + "aws_cli": "aws ec2 modify-instance-metadata-options --instance-id --http-tokens required" + } + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-metadata-transition-to-version-2.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der EC2-Metadatendienst nur IMDSv2 zulässt.", + "risk": "Die Zulassung von Version 1 des Dienstes kann EC2-Instanzen anfällig für Server-seitige Request Forgery (SSRF)-Angriffe machen und die Instanzen gefährden. Es wird empfohlen, Version 2 für eine bessere Instanzsicherheit zu nutzen.", + "remediation": "Um das Problem zu beheben, melden Sie sich in der AWS Management Console an und öffnen Sie die Amazon EC2 Console über den Link: https://console.aws.amazon.com/ec2/. Navigieren Sie dann zum Menü 'Instances' und wählen Sie 'Instances' aus. Wählen Sie für jede Instanz 'Actions > Modify instance metadata options'. Wenn der Instanzdienst für Metadaten aktiviert ist, setzen Sie IMDSv2 auf 'Erforderlich'." + } + } + }, + { + "name": "instance_stopped_since_30d", + "title": "Ensure that stopped EC2 instances are terminated within 30 days", + "result_kinds": ["aws_ec2_instance"], + "categories": ["security", "compliance"], + "risk": "Instances that are stopped have a tendency to be forgotten and can pose a security risk. It is important to terminate instances that are not in use to mitigate this risk.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ec2_instance) and instance_status==stopped and mtime>{{stopped_instance_age}}" + }, + "default_values": { + "stopped_instance_age": "30d" + }, + "remediation": { + "text": "To fix this issue, terminate any EC2 instance that has been shutdown for more than 30 days. Make sure to create backups of the instances that are still required for future use.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass gestoppte EC2-Instanzen innerhalb von 30 Tagen beendet werden", + "risk": "Instanzen, die gestoppt sind, haben die Tendenz, vergessen zu werden und stellen ein Sicherheitsrisiko dar. Es ist wichtig, Instanzen, die nicht in Gebrauch sind, zu beenden, um dieses Risiko zu mindern.", + "remediation": "Um dieses Problem zu beheben, beenden Sie alle EC2-Instanzen, die seit mehr als 30 Tagen heruntergefahren sind. Stellen Sie sicher, Backups von den Instanzen zu erstellen, die für zukünftigen Gebrauch noch benötigt werden." + } + } + }, + { + "name": "ebs_volume_unused", + "title": "Ensure there are no unused EBS volumes on the AWS Account", + "result_kinds": ["aws_ec2_volume"], + "categories": ["security", "compliance"], + "risk": "If not solved, unused EBS volumes pose a security risk by containing previously stored data, violating compliance requirements. It is essential to dispose of them properly.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ec2_volume) and last_access>7d and volume_attachments==[]" + }, + "remediation": { + "text": "To fix the issue, navigate to the EC2 Dashboard, select the unused volumes, go to the action tab, and click on \"delete volumes\".", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass auf dem AWS-Konto keine ungenutzten EBS-Volumes vorhanden sind", + "risk": "Wenn nicht behoben, stellen ungenutzte EBS-Volumes ein Sicherheitsrisiko dar, da sie zuvor gespeicherte Daten enthalten und gegen Compliance-Anforderungen verstoßen. Es ist unerlässlich, sie ordnungsgemäß zu entsorgen.", + "remediation": "Um das Problem zu beheben, navigieren Sie zur EC2-Dashboard, wählen Sie die ungenutzten Volumes aus, gehen Sie zum Aktions-Tab und klicken Sie auf 'Volumes löschen'." + } + } + }, + { + "name": "instance_managed_by_ssm", + "title": "Ensure EC2 instances are managed by AWS Systems Manager", + "result_kinds": ["aws_ec2_instance"], + "categories": ["security", "compliance"], + "risk": "If EC2 instances are not managed by AWS Systems Manager, there is a risk of misconfigured or non-compliant instances, leading to potential security vulnerabilities.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ec2_instance) with(empty, <-- is(aws_ssm_instance))" + }, + "remediation": { + "text": "To fix this issue, verify if the EC2 instances are managed by AWS Systems Manager and apply Systems Manager Prerequisites.", + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/managed_instances.html" + }, + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/managed_instances.html", + "localizations": { + "de": { + "title": "Stelle sicher, dass EC2-Instanzen von AWS Systems Manager verwaltet werden", + "risk": "Wenn EC2-Instanzen nicht von AWS Systems Manager verwaltet werden, besteht ein Risiko von fehlerhaft konfigurierten oder nicht konformen Instanzen, die potenzielle Sicherheitsrisiken mit sich bringen.", + "remediation": "Um dieses Problem zu beheben, überprüfen Sie, ob die EC2-Instanzen von AWS Systems Manager verwaltet werden und wenden Sie die Systems Manager-Voraussetzungen an." + } + } + }, + { + "name": "image_public", + "title": "Ensure EC2 AMIs are not publicly accessible.", + "result_kinds": ["aws_ec2_image"], + "categories": ["security", "compliance"], + "risk": "Publicly accessible AMIs can be used by anyone with an AWS account to launch EC2 instances, potentially exposing sensitive data contained in the AMIs.", + "severity": "critical", + "detect": { + "resoto": "search is(aws_ec2_image) and public==true" + }, + "remediation": { + "text": "To prevent unauthorized access, ensure that your EC2 AMIs are not set as public or available in the Community AMIs.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cancel-sharing-an-AMI.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/cancel-sharing-an-AMI.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EC2-AMIs nicht öffentlich zugänglich sind.", + "risk": "Öffentlich zugängliche AMIs können von jedem mit einem AWS-Konto verwendet werden, um EC2-Instanzen zu starten und potenziell sensible Daten, die in den AMIs enthalten sind, offenzulegen.", + "remediation": "Um unbefugten Zugriff zu verhindern, stellen Sie sicher, dass Ihre EC2-AMIs nicht als öffentlich zugänglich oder in den Community-AMIs verfügbar sind." + } + } + }, + { + "name": "no_secrets_in_instance_user_data", + "title": "Ensure there are no secrets in EC2 User Data.", + "result_kinds": ["aws_ec2_instance"], + "categories": ["security", "compliance"], + "risk": "Adding secrets in userdata can compromise security as these scripts can be viewed by anyone. Secrets hardcoded into instance user data can be used by malware and bad actors to gain lateral access to other services.", + "severity": "medium", + "detect": { + "resoto_cmd": "search is(aws_ec2_instance) and instance_user_data!=null | detect-secrets --path instance_user_data --with-secrets" + }, + "remediation": { + "text": "Always ensure that User data picks up secrets from a managed service like Parameter Store or Secrets Manager, rather than having it hardcoded in the actual script.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass sich keine Geheimnisse in der EC2-Benutzerdaten befinden.", + "risk": "Das Hinzufügen von Geheimnissen in Benutzerdaten kann die Sicherheit beeinträchtigen, da diese Skripts von jedem angesehen werden können. In Benutzerdaten fest codierte Geheimnisse können von Malware und böswilligen Akteuren verwendet werden, um seitlichen Zugriff auf andere Dienste zu erlangen.", + "remediation": "Stellen Sie immer sicher, dass die Benutzerdaten Geheimnisse aus einem verwalteten Dienst wie dem Parameter Store oder Secrets Manager abrufen, anstatt sie im tatsächlichen Skript fest zu codieren." + } + } + }, + { + "name": "no_secrets_in_launch_template_user_data", + "title": "Ensure there are no secrets in EC2 Launch Template User Data", + "result_kinds": ["aws_ec2_launch_template"], + "categories": ["security", "compliance"], + "risk": "If secrets are added to the EC2 Launch Template User Data, it can compromise security as these scripts can be viewed by anyone. Hackers and malware can exploit these secrets to gain unauthorized access to other services.", + "severity": "medium", + "detect": { + "resoto_cmd": "search is(aws_ec2_launch_template) and launch_template_data.user_data!=null | detect-secrets --path launch_template_data.user_data --with-secrets" + }, + "remediation": { + "text": "To fix this issue, always ensure that User Data retrieves secrets from a managed service like AWS Systems Manager Parameter Store or AWS Secrets Manager, instead of hardcoding them in the user data script.", + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html#user-data-best-practices" + }, + "url": "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass in der EC2-Startvorlage keine Geheimnisse in den Benutzerdaten enthalten sind", + "risk": "Wenn Geheimnisse zu den Benutzerdaten der EC2-Startvorlage hinzugefügt werden, kann dies die Sicherheit beeinträchtigen, da diese Skripte von jedem eingesehen werden können. Hacker und Malware können diese Geheimnisse ausnutzen, um unbefugten Zugriff auf andere Dienste zu erlangen.", + "remediation": "Um dieses Problem zu beheben, stellen Sie immer sicher, dass die Benutzerdaten Geheimnisse aus einem verwalteten Dienst wie dem AWS Systems Manager-Parameter Store oder dem AWS Secrets Manager abrufen, anstatt sie im Benutzerdatenskript fest zu codieren." + } + } + }, + { + "name": "subnet_auto_assign_public_ip_disabled", + "title": "Ensure subnets have auto-assign public IP disabled to prevent accidental public access", + "result_kinds": ["aws_ec2_subnet"], + "categories": ["security", "compliance"], + "risk": "Disabling auto-assign public IP in subnets is crucial for security as it reduces the exposure of instances to the public internet, minimizing the risk of external attacks. This setting helps control the network accessibility of EC2 instances and maintain a secure environment within the VPC.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ec2_subnet) and subnet_map_public_ip_on_launch=true" + }, + "remediation": { + "text": "To fix this issue, select the subnet, click 'Actions', then 'Modify auto-assign IP settings', uncheck 'Auto-assign IPv4', and save.", + "url": "https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Subnetze Auto-Assign Public IP deaktiviert haben, um versehentlichen öffentlichen Zugriff zu verhindern", + "risk": "Die Deaktivierung von Auto-Assign Public IP in Subnetzen ist für die Sicherheit entscheidend, da sie die Exposition von Instanzen gegenüber dem öffentlichen Internet verringert und das Risiko externer Angriffe minimiert. Diese Einstellung hilft dabei, die Netzwerkzugänglichkeit von EC2-Instanzen zu kontrollieren und eine sichere Umgebung im VPC aufrechtzuerhalten.", + "remediation": "Um dieses Problem zu beheben, wählen Sie das Subnetz aus, klicken Sie auf 'Aktionen', dann auf 'Auto-Assign IP-Einstellungen ändern', deaktivieren Sie 'Auto-Assign IPv4' und speichern Sie." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_ecr.json b/resotocore/resotocore/static/report/checks/aws/aws_ecr.json index eae76260f8..5b457e8d33 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_ecr.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_ecr.json @@ -4,17 +4,71 @@ "checks": [ { "name": "image_scan_on_push", - "title": "Check if ECR image scan on push is enabled.", - "result_kind": "aws_ecr_repository", - "categories": [], - "risk": "Amazon ECR image scanning helps in identifying software vulnerabilities in your container images. Amazon ECR uses the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project and provides a list of scan findings. ", + "title": "Ensure ECR image scan on push is enabled", + "result_kinds": ["aws_ecr_repository"], + "categories": ["security", "compliance"], + "risk": "Without enabling ECR image scanning, software vulnerabilities in container images may go undetected, increasing the risk of potential security incidents. ECR image scanning provides a list of scan findings based on the Common Vulnerabilities and Exposures (CVEs) database from the open-source Clair project.", "severity": "medium", "detect": { "resoto": "is(aws_ecr_repository) and image_scan_on_push = false" }, "remediation": { - "text": "Enable ECR image scanning and review the scan findings for information about the security of the container images that are being deployed.", + "text": "To fix this issue, enable ECR image scanning and review the scan findings for information about the security of the container images being deployed.", "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html" + }, + "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die ECR-Bildscan beim Pushen aktiviert ist", + "risk": "Ohne die Aktivierung des ECR-Bildscans beim Pushen können Software-Schwachstellen in Container-Bildern unentdeckt bleiben. Dadurch steigt das Risiko potenzieller Sicherheitsvorfälle. Der ECR-Bildscan liefert eine Liste von Scan-Ergebnissen, die auf der Common Vulnerabilities and Exposures (CVEs) Datenbank des Open-Source-Projekts Clair basieren.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie den ECR-Bildscan und überprüfen Sie die Scan-Ergebnisse, um Informationen zur Sicherheit der bereitgestellten Container-Bilder zu erhalten." + } + } + }, + { + "name": "repository_prohibit_public_access", + "title": "Ensure ECR repositories are not publicly accessible to mitigate security risks, prevent unauthorized access, and maintain control over container image distribution.", + "result_kinds": ["aws_ecr_repository"], + "categories": ["security", "compliance"], + "risk": "Failure to secure ECR repositories can lead to unauthorized access to code bases and application artifacts, posing serious security risks.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ecr_repository) and repository_visibility==public" + }, + "remediation": { + "text": "To fix the issue, delete any public repositories in ECR and redeploy them as private.", + "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html" + }, + "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ECR-Repositories nicht öffentlich zugänglich sind, um Sicherheitsrisiken zu minimieren, unbefugten Zugriff zu verhindern und die Kontrolle über die Verteilung von Container-Images zu behalten.", + "risk": "Die Nichtsicherung von ECR-Repositories kann zu unbefugtem Zugriff auf Code-Basen und Anwendungsartefakte führen und ernsthafte Sicherheitsrisiken darstellen.", + "remediation": "Um das Problem zu beheben, löschen Sie alle öffentlichen Repositories in ECR und erstellen Sie sie erneut als private." + } + } + }, + { + "name": "repository_lifecycle_policy_enabled", + "title": "Ensure ECR repositories have lifecycle policies enabled", + "result_kinds": ["aws_ecr_repository"], + "categories": ["security", "compliance"], + "risk": "If lifecycle policies are not enabled for Amazon ECR repositories, there is a risk of retaining a large number of images, which can result in unnecessary costs.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ecr_repository) and lifecycle_policy!=null" + }, + "remediation": { + "text": "To fix this issue, open the Amazon ECR console and create a lifecycle policy for each repository.", + "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html" + }, + "url": "https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon ECR-Repositories Lebenszyklusrichtlinien aktiviert haben", + "risk": "Wenn für Amazon ECR-Repositories keine Lebenszyklusrichtlinien aktiviert sind, besteht das Risiko, dass eine große Anzahl von Abbildern aufbewahrt wird, was zu unnötigen Kosten führen kann.", + "remediation": "Um dieses Problem zu beheben, öffnen Sie die Amazon ECR-Konsole und erstellen Sie für jedes Repository eine Lebenszyklusrichtlinie." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_ecs.json b/resotocore/resotocore/static/report/checks/aws/aws_ecs.json new file mode 100644 index 0000000000..17e4443901 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_ecs.json @@ -0,0 +1,141 @@ +{ + "provider": "aws", + "service": "ecs", + "checks": [ + { + "name": "no_secrets_in_task_definition_env", + "title": "Ensure there are no secrets in ECS task definition environment variables", + "result_kinds": ["aws_ecs_task_definition"], + "categories": ["security", "compliance"], + "risk": "Using hard-coded passwords increases the risk of password guessing. Malicious users may gain unauthorized access through the compromised account.", + "severity": "high", + "detect": { + "resoto_cmd": "search is(aws_ecs_task_definition) and container_definitions[*].{environment_files[*].value != null or environment[*].value!=None} | detect-secrets --path container_definitions[*].environment_files[*].value container_definitions[*].environment[*].value --with-secrets" + }, + "remediation": { + "text": "To securely provide credentials to containers, use AWS Secrets Manager or Parameter Store instead of hard-coding the secrets in the code or passing them through environment variables. Note that task definition revisions containing plaintext secrets cannot be deleted. AWS is planning to implement a feature to address this in 2023, so it is recommended to rotate plaintext secrets while moving them to Secrets Manager or Parameter Store.", + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass in der Umgebungsvariablen der ECS Task-Definition keine Secrets enthalten sind", + "risk": "Die Verwendung von fest codierten Passwörtern erhöht das Risiko von Passwortraten. Bösartige Benutzer können durch das kompromittierte Konto unbefugten Zugriff erlangen.", + "remediation": "Verwenden Sie zur sicheren Bereitstellung von Anmeldeinformationen an Container AWS Secrets Manager oder Parameter Store anstelle einer Festcodierung der Secrets im Code oder einer Übermittlung über Umgebungsvariablen. Beachten Sie, dass Task-Definitionen, die Klartext-Secrets enthalten, nicht gelöscht werden können. AWS plant, ab 2023 eine Funktion zur Behebung dieses Problems zu implementieren. Es wird empfohlen, Klartext-Secrets beim Transfer zu Secrets Manager oder Parameter Store zu rotieren." + } + } + }, + { + "name": "host_mode_not_privileged_nor_root", + "title": "Ensure there are no task definitions with containers in host mode running as root or with privileged access", + "result_kinds": ["aws_ecs_task_definition"], + "categories": ["security", "compliance"], + "risk": "Running an ECS Task Definition with a container in host networking mode, either with privileged access or as the root user, grants extensive control over the host system. This configuration increases the risk of security breaches, as it could allow malicious actors, if they compromise the container, to access or compromise not only the host machine but also other containers running on it.", + "severity": "high", + "detect": { + "resoto": "is(aws_ecs_task_definition) and network_mode==host and container_definitions[*].{privileged==true or user==root}" + }, + "remediation": { + "text": "To fix the issue, revise the ECS Task Definition to disable privileged mode and avoid running containers as the root user, especially in host networking mode. Use least-privileged user accounts within containers and limit network access where possible. Ensure that containers have only the permissions they need to operate, and review IAM roles and policies for necessary restrictions. For existing task definitions that require significant permissions, assess and implement security best practices to minimize potential attack surfaces. AWS documentation provides guidelines for securing ECS tasks and containers.", + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine Task-Definitionen mit Containern im host-Modus als Root-Benutzer oder mit privilegiertem Zugriff ausgeführt werden", + "risk": "Die Ausführung einer ECS Task-Definition mit einem Container im Host-Netzwerkmodus, entweder mit privilegiertem Zugriff oder als Root-Benutzer, gewährt umfassende Kontrolle über das Host-System. Diese Konfiguration erhöht das Risiko von Sicherheitsverletzungen, da sie bösartigen Akteuren, wenn sie den Container kompromittieren, den Zugriff oder die Kompromittierung nicht nur der Host-Maschine, sondern auch anderer darauf laufender Container ermöglichen könnte.", + "remediation": "Um das Problem zu beheben, überprüfen Sie die ECS Task-Definition, um den privilegierten Modus zu deaktivieren und Container nicht als Root-Benutzer, insbesondere im Host-Netzwerkmodus, auszuführen. Verwenden Sie Benutzerkonten mit möglichst wenigen Rechten in den Containern und beschränken Sie die Netzwerkzugriffe, wo immer möglich. Stellen Sie sicher, dass Container nur die Berechtigungen haben, die zum Betrieb erforderlich sind, und überprüfen Sie IAM-Rollen und -Policies auf notwendige Einschränkungen. Für bestehende Task-Definitionen, die umfangreiche Berechtigungen erfordern, sollten Sicherheitsbest Practices bewertet und implementiert werden, um potenzielle Angriffsflächen zu minimieren. Die AWS-Dokumentation enthält Richtlinien zur Absicherung von ECS-Aufgaben und Containern." + } + } + }, + { + "name": "writable_root_filesystem_in_ecs_tasks", + "title": "Ensure ECS Task Definitions Have Read-Only Root Filesystems", + "result_kinds": ["aws_ecs_task_definition"], + "categories": ["security", "compliance"], + "risk": "Allowing writable root filesystems in ECS task definitions can pose a significant security risk. If a container is compromised, attackers can manipulate system files, install malicious software, and perform unauthorized activities.", + "severity": "medium", + "detect": { + "resoto": "search is(aws_ecs_task_definition) and container_definitions[*].readonly_root_filesystem!=true" + }, + "remediation": { + "text": "To fix this issue, modify ECS task definitions and set 'readonly_root_filesystem' to 'true' for each container. This ensures that containers operate with a read-only root filesystem, enhancing security by preventing unauthorized changes to system files. If necessary, use attached volumes for directories that require write access. For more information, refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions) on configuring container definitions in ECS task definitions.", + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ECS-Task-Definitionen über schreibgeschützte Stammdateisysteme verfügen", + "risk": "Die Verwendung von beschreibbaren Stammdateisystemen in ECS-Task-Definitionen kann ein erhebliches Sicherheitsrisiko darstellen. Wenn ein Container kompromittiert wird, können Angreifer Systemdateien manipulieren, bösartige Software installieren und unbefugte Aktivitäten durchführen.", + "remediation": "Um dieses Problem zu beheben, passen Sie die ECS-Task-Definitionen an und setzen Sie für jeden Container 'readonly_root_filesystem' auf 'true'. Dadurch wird sichergestellt, dass Container mit einem schreibgeschützten Stammdateisystem arbeiten und die Sicherheit verbessert wird, indem unbefugte Änderungen an Systemdateien verhindert werden. Verwenden Sie bei Bedarf angehängte Volumes für Verzeichnisse, die Schreibzugriff erfordern. Weitere Informationen finden Sie in der [AWS-Dokumentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions) zur Konfiguration von Container-Definitionen in ECS-Task-Definitionen." + } + } + }, + { + "name": "ecs_cluster_container_insights_enabled", + "title": "Ensure Container Insights is enabled for improved visibility and monitoring of container-based applications in Amazon ECS.", + "result_kinds": ["aws_ecs_task_definition"], + "categories": ["security", "compliance"], + "risk": "Not enabling Container Insights could result in reduced visibility of anomalies, delayed threat responses, potential compliance violations, and unchecked resource usage in containerized applications.", + "severity": "medium", + "detect": { + "manual": "Go to the AWS ECS console, select Account settings." + }, + "remediation": { + "text": "To enable Container Insights, check the box for default opt-in at the bottom of the AWS ECS console page.", + "url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html" + }, + "url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Container Insights aktiviert ist, um die Sichtbarkeit und Überwachung von anwendungsbasierten Containern in Amazon ECS zu verbessern.", + "risk": "Die Nichtaktivierung von Container Insights kann zu einer reduzierten Sichtbarkeit von Anomalien, verzögerten Bedrohungsreaktionen, potenziellen Compliance-Verstößen und einer unkontrollierten Ressourcennutzung in containerisierten Anwendungen führen.", + "remediation": "Um Container Insights zu aktivieren, aktivieren Sie das Kontrollkästchen für die standardmäßige Zustimmung am unteren Rand der AWS ECS-Konsole-Seite." + } + } + }, + { + "name": "ecs_task_definition_logging_enabled", + "title": "Ensure all ECS Task Definitions have logging enabled to Cloudwatch", + "result_kinds": ["aws_ecs_task_definition"], + "categories": ["security", "compliance"], + "risk": "Not enabling ECS task definition logging increases the risk of missing critical information about application behavior, making it harder to diagnose issues, track security incidents, or audit system activities for compliance purposes.", + "severity": "medium", + "detect": { + "resoto": "is(aws_ecs_task_definition) and container_definitions[*].log_configuration.log_driver==null" + }, + "remediation": { + "text": "Update the task definition to include logging with an appropriate CloudWatch log group. Also, ensure that the task has appropriate permissions to push logs to CloudWatch from IAM.", + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/firelens-example-taskdefs.html" + }, + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass alle ECS Task-Definitionen zur Cloudwatch geloggt werden", + "risk": "Das Nichtaktivieren der ECS Task-Definition-Protokollierung erhöht das Risiko, wichtige Informationen über das Verhalten der Anwendung zu verpassen. Dadurch wird es schwieriger, Probleme zu diagnostizieren, Sicherheitsvorfälle zu verfolgen oder Systemaktivitäten für Compliance-Zwecke zu überprüfen.", + "remediation": "Aktualisieren Sie die Task-Definition, um das Protokollieren mit einer geeigneten CloudWatch-Log-Gruppe einzuschließen. Stellen Sie außerdem sicher, dass die Aufgabe die entsprechenden Berechtigungen besitzt, um Protokolle von IAM aus in CloudWatch zu übertragen." + } + } + }, + { + "name": "ecs_service_fargate_using_latest_platform_version", + "title": "Ensure that ECS Fargate services are using the latest platform version to take advantage of the latest patch and vulnerability management", + "result_kinds": [], + "categories": ["security", "compliance"], + "risk": "Not using the latest platform version for an ECS service can lead to exposure to vulnerabilities, missing out on security patches, and lacking the latest security features. This can potentially compromise the security of your containerized applications and data.", + "severity": "medium", + "detect": { + "manual": "Go to AWS ECS -> Click ECS Cluster and then Tasks" + }, + "remediation": { + "text": "To fix this issue, update the task definition for Fargate to use the latest platform version.", + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html" + }, + "url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass ECS Fargate-Services die neueste Plattformversion verwenden, um von den neuesten Patches und Schwachstellenverwaltungen zu profitieren", + "risk": "Die Verwendung der neuesten Plattformversion für einen ECS-Dienst kann zu Sicherheitslücken führen, da Sicherheitspatches und aktuelle Sicherheitsfunktionen fehlen können. Dadurch könnte die Sicherheit Ihrer containerisierten Anwendungen und Daten beeinträchtigt werden.", + "remediation": "Um dieses Problem zu beheben, aktualisieren Sie die Task-Definition für Fargate, um die neueste Plattformversion zu verwenden." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_efs.json b/resotocore/resotocore/static/report/checks/aws/aws_efs.json index f7f667fa0f..8959fca502 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_efs.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_efs.json @@ -4,17 +4,49 @@ "checks": [ { "name": "storage_encrypted", - "title": "Check if EFS protects sensitive data with encryption at rest", + "title": "Ensure EFS File Systems Encrypt Sensitive Data at Rest", "result_kinds": ["aws_efs_file_system"], "categories": ["security", "compliance"], - "risk": "EFS should be encrypted at rest to prevent exposure of sensitive data to bad actors", + "risk": "If EFS file systems are not encrypted at rest, sensitive data may be exposed to bad actors, leading to potential data breaches and compliance violations.", "severity": "medium", "detect": { "resoto": "is(aws_efs_file_system) and volume_encrypted==false" }, "remediation": { - "text": "Ensure that encryption at rest is enabled for EFS file systems. Encryption at rest can only be enabled during the file system creation.", + "text": "To fix the issue, ensure encryption at rest is enabled for EFS file systems. Encryption at rest can only be enabled during the file system creation.", "url": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html" + }, + "url": "https://docs.aws.amazon.com/efs/latest/ug/encryption-at-rest.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass EFS-Dateisysteme sensitive Daten in Ruhe verschlüsseln", + "risk": "Wenn EFS-Dateisysteme nicht in Ruhe verschlüsselt sind, können sensible Daten für böswillige Akteure zugänglich sein, was zu potenziellen Datenverstößen und Verletzungen der Compliance führen kann.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass die Verschlüsselung in Ruhe für EFS-Dateisysteme aktiviert ist. Die Verschlüsselung in Ruhe kann nur während der Erstellung des Dateisystems aktiviert werden." + } + } + }, + { + "name": "not_publicly_accessible", + "title": "Ensure Restrict Public Access in EFS File System Policies", + "result_kinds": ["aws_efs_file_system"], + "categories": ["security", "compliance"], + "risk": "Allowing public access to Elastic File System (EFS) could lead to unauthorized data exposure or data breaches. Publicly accessible EFS file systems are at risk of exposing sensitive information, making them potential targets for malicious actors.", + "severity": "medium", + "detect": { + "resoto": "is(aws_efs_file_system) and file_system_policy==null" + }, + "remediation": { + "text": "Modify the EFS file system's resource policy to restrict public access. Ensure that the policy does not include a principal set to '*', which would grant access to any user. Instead, define specific principals or use AWS IAM to manage access securely. Review the AWS documentation for guidance on creating and managing EFS access policies to safeguard your file systems effectively.", + "url": "https://docs.aws.amazon.com/efs/latest/ug/access-control-block-public-access.html" + }, + "internal_note": "Evaluate the statement in the file_system_policy to determine if it is publicly accessible.", + "url": "https://docs.aws.amazon.com/efs/latest/ug/access-control-block-public-access.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der öffentliche Zugriff in EFS-Dateisystemrichtlinien eingeschränkt ist", + "risk": "Die Zulassung öffentlichen Zugriffs auf das Elastic File System (EFS) kann zu unbefugter Datenexposition oder Datenverletzungen führen. Öffentlich zugängliche EFS-Dateisysteme sind gefährdet und können sensible Informationen preisgeben, was sie zu potenziellen Zielen für bösartige Akteure macht.", + "remediation": "Ändern Sie die Ressourcenrichtlinie des EFS-Dateisystems, um den öffentlichen Zugriff einzuschränken. Stellen Sie sicher, dass die Richtlinie keinen Prinzipal enthält, der auf '*' gesetzt ist, was jedem Benutzer Zugriff gewähren würde. Definieren Sie stattdessen spezifische Prinzipale oder verwenden Sie AWS IAM, um den Zugriff sicher zu verwalten. Konsultieren Sie die AWS-Dokumentation für Anleitungen zur Erstellung und Verwaltung von EFS-Zugriffsrichtlinien, um Ihre Dateisysteme effektiv abzusichern." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_eks.json b/resotocore/resotocore/static/report/checks/aws/aws_eks.json new file mode 100644 index 0000000000..a13cc9945d --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_eks.json @@ -0,0 +1,74 @@ +{ + "provider": "aws", + "service": "eks", + "checks": [ + { + "name": "cluster_endpoint_restrict_public_access", + "title": "Ensure that Public Access to EKS Cluster Endpoint is Restricted", + "result_kinds": ["aws_eks_cluster"], + "categories": ["security", "compliance"], + "risk": "If EKS cluster endpoint is publicly accessible, it exposes the cluster to potential security risks as it is reachable from the public internet. This could lead to unauthorized access or data breaches.", + "severity": "medium", + "detect": { + "resoto": "is(aws_eks_cluster) and cluster_resources_vpc_config.endpoint_public_access==true" + }, + "remediation": { + "text": "Restrict the EKS cluster's public access by adjusting the security group settings. Implement network policies that enforce secure access controls and restrict inbound traffic. You can also choose to disable public access entirely or limit the IP addresses that can access your API server from the internet.", + "url": "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html" + }, + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/eks-api-server-unauthorized-error/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der öffentliche Zugriff auf den EKS Cluster Endpoint eingeschränkt ist", + "risk": "Wenn der EKS Cluster Endpoint öffentlich erreichbar ist, birgt dies potenzielle Sicherheitsrisiken, da er über das öffentliche Internet erreichbar ist. Dies kann zu unbefugtem Zugriff oder Datenverletzungen führen.", + "remediation": "Schränken Sie den öffentlichen Zugriff auf den EKS Cluster ein, indem Sie die Sicherheitseinstellungen der Sicherheitsgruppe anpassen. Implementieren Sie Netzwerkrichtlinien, die sichere Zugriffskontrollen durchsetzen und eingehenden Datenverkehr beschränken. Sie können auch wählen, den öffentlichen Zugriff vollständig zu deaktivieren oder die IP-Adressen zu beschränken, die auf Ihren API-Server aus dem Internet zugreifen können." + } + } + }, + { + "name": "cluster_control_plane_audit_logging_enabled", + "title": "Ensure Control Planes for EKS Clusters are Configured with Audit Logging to Maintain Security Forensics and Audit Compliance", + "result_kinds": ["aws_eks_cluster"], + "categories": ["security", "compliance"], + "risk": "Without audit logging, potentially suspicious activities within the cluster could go unnoticed, leaving the system vulnerable to internal and external threats. It could also make it challenging to meet audit and compliance requirements.", + "severity": "medium", + "detect": { + "resoto": "is(aws_eks_cluster) and cluster_logging.cluster_logging[*].enabled = false" + }, + "remediation": { + "text": "Navigate to the 'Logging' tab of your EKS cluster in the AWS console to change settings. Select 'Edit' and enable or adjust control plane logging. Remember to save any change.", + "url": "https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html" + }, + "url": "https://docs.aws.amazon.com/eks/latest/userguide/cluster-logging.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Steuerungsebenen für EKS-Cluster mit Audit-Logging konfiguriert sind, um Sicherheitsforensik und Audit-Konformität zu gewährleisten", + "risk": "Ohne Audit-Logging könnten potenziell verdächtige Aktivitäten innerhalb des Clusters unbemerkt bleiben und das System anfällig für interne und externe Bedrohungen machen. Es könnte auch schwierig sein, Audit- und Compliance-Anforderungen zu erfüllen.", + "remediation": "Navigieren Sie zur 'Logging'-Registerkarte Ihres EKS-Clusters in der AWS-Konsole, um die Einstellungen zu ändern. Wählen Sie 'Bearbeiten' aus und aktivieren oder stellen Sie das Steuerungs-Logging ein. Vergessen Sie nicht, jede Änderung zu speichern." + } + } + }, + { + "name": "cluster_encryption_enabled", + "title": "Ensure Kubernetes Secrets are encrypted using AWS KMS Customer Master Keys (CMKs)", + "result_kinds": ["aws_eks_cluster"], + "categories": ["security", "compliance"], + "risk": "Without envelope encryption, sensitive data may be exposed within your applications, leading to potential security breaches. This measure is crucial to a comprehensive defence-in-depth security strategy.", + "severity": "medium", + "detect": { + "resoto": "is(aws_eks_cluster) and cluster_encryption_config in [null, []]" + }, + "remediation": { + "text": "Create a Customer Master Key (CMK) in AWS Key Management Service (KMS). Then, while creating a new Amazon EKS cluster, provide this CMK's Amazon Resource Name (ARN).", + "url": "https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Kubernetes Secrets mit AWS KMS Customer Master Keys (CMKs) verschlüsselt sind", + "risk": "Ohne Umschlagverschlüsselung können sensible Daten in Ihren Anwendungen offengelegt werden, was zu potenziellen Sicherheitsverletzungen führen kann. Diese Maßnahme ist entscheidend für eine umfassende Verteidigung-in-Depth-Sicherheitsstrategie.", + "remediation": "Erstellen Sie einen Customer Master Key (CMK) im AWS Key Management Service (KMS). Geben Sie dann beim Erstellen eines neuen Amazon EKS-Clusters den Amazon Resource Name (ARN) dieses CMKs an." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_elb.json b/resotocore/resotocore/static/report/checks/aws/aws_elb.json new file mode 100644 index 0000000000..9b90e893db --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_elb.json @@ -0,0 +1,118 @@ +{ + "provider": "aws", + "service": "elb", + "checks": [ + { + "name": "alb_has_no_listeners", + "title": "Ensure that Elastic Load Balancers V2 have listeners configured.", + "result_kinds": ["aws_alb"], + "categories": ["unused"], + "risk": "If no listeners are configured for an Application Load Balancer, it will not be able to receive traffic from clients and route requests to registered targets.", + "severity": "medium", + "detect": { + "resoto": "is(aws_alb) and alb_listener not in [null, []]" + }, + "remediation": { + "text": "To fix this issue, add listeners to the Elastic Load Balancers V2.", + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html" + }, + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Elastic Load Balancers V2 konfigurierte Listener haben.", + "risk": "Wenn keine Listener für einen Application Load Balancer konfiguriert sind, kann er keinen Datenverkehr von Clients empfangen und Anfragen an registrierte Ziele weiterleiten.", + "remediation": "Um dieses Problem zu beheben, fügen Sie Listener zu den Elastic Load Balancers V2 hinzu." + } + } + }, + { + "name": "elb_has_no_listeners", + "title": "Ensure Elastic Load Balancers have listeners.", + "result_kinds": ["aws_elb"], + "categories": ["unused"], + "risk": "Without listeners, Elastic Load Balancers cannot receive traffic from clients and cannot route requests to registered targets.", + "severity": "medium", + "detect": { + "resoto": "search is(aws_elb) and listener not in [null, []]" + }, + "remediation": { + "text": "Add listeners to Elastic Load Balancers to allow them to receive and route traffic.", + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Elastic Load Balancers Zuhörer haben.", + "risk": "Ohne Zuhörer können Elastic Load Balancers keinen Datenverkehr von Clients empfangen und Anfragen nicht an registrierte Ziele weiterleiten.", + "remediation": "Fügen Sie Elastic Load Balancers Zuhörer hinzu, um ihnen den Empfang und die Weiterleitung von Datenverkehr zu ermöglichen." + } + } + }, + { + "name": "logging_enabled", + "title": "Ensure that logging is enabled for Application load balancers", + "result_kinds": ["aws_elb"], + "categories": ["security", "compliance"], + "risk": "Without enabling logging for Application load balancers, there is a risk of limited visibility into security risks, reduced threat detection, and inability to measure performance metrics.", + "severity": "medium", + "detect": { + "resoto": "is(aws_elb) and elb_attributes.access_log.enabled==false" + }, + "remediation": { + "text": "To enable logging for Application load balancers, go to the Attributes tab, find the Access logs section, click the Edit button, enable access logs, provide the necessary information such as the S3 bucket and prefix, and save the changes.", + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Protokollierung für Application Load Balancers aktiviert ist", + "risk": "Ohne die Aktivierung der Protokollierung für Application Load Balancers besteht die Gefahr einer begrenzten Sichtbarkeit von Sicherheitsrisiken, einer verringerten Bedrohungserkennung und der Unfähigkeit, Leistungsmetriken zu messen.", + "remediation": "Um die Protokollierung für Application Load Balancers zu aktivieren, gehen Sie zum Abschnitt 'Attribute', suchen Sie den Bereich 'Zugriffsprotokolle' und klicken Sie auf die Schaltfläche 'Bearbeiten'. Aktivieren Sie die Zugriffsprotokolle und geben Sie die erforderlichen Informationen wie den S3-Bucket und den Präfix ein und speichern Sie die Änderungen." + } + } + }, + { + "name": "uses_ssl_certificate", + "title": "Ensure Load balancer uses SSL certificate for port 443", + "result_kinds": ["aws_elb"], + "categories": ["security", "compliance"], + "risk": "Not having a valid SSL certificate for an open port 443 can result in security risks such as exposure of sensitive information, vulnerability to multiple attacks, and loss of customer trust.", + "severity": "medium", + "detect": { + "resoto": "is(aws_elb) and elb_listener_descriptions[*].{listener.protocol in [HTTPS, SSL] and listener.ssl_certificate_id==null}" + }, + "remediation": { + "text": "To fix the issue, go to the load balancer settings, click on 'View/edit rules' for port 443, and associate a valid SSL certificate.", + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-update-certificates.html" + }, + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-update-certificates.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Lastenausgleicher ein SSL-Zertifikat für den Port 443 verwendet", + "risk": "Das Fehlen eines gültigen SSL-Zertifikats für einen offenen Port 443 kann zu Sicherheitsrisiken führen, wie z. B. der Offenlegung sensibler Informationen, der Anfälligkeit für verschiedene Angriffe und dem Verlust des Vertrauens der Kunden.", + "remediation": "Um das Problem zu beheben, gehen Sie zu den Einstellungen des Lastenausgleichers, klicken Sie auf 'Regeln anzeigen/bearbeiten' für Port 443 und verknüpfen Sie ein gültiges SSL-Zertifikat." + } + } + }, + { + "name": "waf_enabled", + "title": "Ensure that Application Load Balancer has Web Application Firewall (WAF) enabled for enhanced security", + "result_kinds": ["aws_alb"], + "categories": ["security", "compliance"], + "risk": "Enabling WAF on an Application Load Balancer provides a layer of protection against common web exploits and attacks. It filters, monitors, and blocks harmful traffic before reaching applications, enhancing overall security.", + "severity": "medium", + "detect": { + "resoto": "is(aws_alb) with (empty, <-- is(aws_waf_web_acl))" + }, + "remediation": { + "text": "Open the AWS WAF & Shield console, and go to 'Web ACLs'. Create or select an existing Web ACL for the Load Balancer.", + "url": "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Application Load Balancer die Web Application Firewall (WAF) für eine verbesserte Sicherheit aktiviert hat", + "risk": "Die Aktivierung von WAF auf einem Application Load Balancer bietet eine Schutzschicht gegen häufige Web-Exploits und Angriffe. Es filtert, überwacht und blockiert schädlichen Datenverkehr, bevor er die Anwendungen erreicht und somit die Gesamtsicherheit verbessert.", + "remediation": "Öffnen Sie die AWS WAF & Shield-Konsole und gehen Sie zu 'Web ACLs'. Erstellen Sie eine neue Web ACL oder wählen Sie eine vorhandene Web ACL für den Load Balancer aus." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_glacier.json b/resotocore/resotocore/static/report/checks/aws/aws_glacier.json new file mode 100644 index 0000000000..d95778541c --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_glacier.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "glacier", + "checks": [ + { + "name": "vaults_policy_public_access", + "title": "Ensure S3 Glacier vaults have policies that restrict access to authorized users.", + "result_kinds": ["aws_glacier_vault"], + "categories": ["security"], + "risk": "If the issue is not resolved, S3 Glacier vaults accessible to everyone could lead to unauthorized access and exposure of sensitive data to potential malicious attackers.", + "severity": "critical", + "detect": { + "resoto": "is(aws_glacier_vault) and glacier_access_policy.Statement[*].{Effect==Allow and (Principal==\"*\" or Principal.AWS=\"*\" or Principal.CanonicalUser=\"*\")}" + }, + "remediation": { + "text": "To rectify the problem, ensure that the vault policy does not grant access to unauthorized users. Follow the guidelines provided in the AWS documentation to set up proper access controls for S3 Glacier vaults.", + "url": "https://docs.aws.amazon.com/amazonglacier/latest/dev/access-control-overview.html" + }, + "url": "https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-glacier.html", + "localizations": { + "de": { + "title": "Sorgen Sie dafür, dass S3 Glacier-Tresore Richtlinien haben, die den Zugriff auf autorisierte Benutzer beschränken.", + "risk": "Wenn das Problem nicht behoben wird, können S3 Glacier-Tresore, die für jeden zugänglich sind, zu unbefugtem Zugriff und Offenlegung sensibler Daten für potenziell bösartige Angreifer führen.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass die Tresor-Richtlinie keinen Zugriff für nicht autorisierte Benutzer gewährt. Befolgen Sie die Richtlinien in der AWS-Dokumentation, um angemessene Zugriffskontrollen für S3 Glacier-Tresore einzurichten." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_iam.json b/resotocore/resotocore/static/report/checks/aws/aws_iam.json index 8cb1d5b39e..56fc1f0890 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_iam.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_iam.json @@ -4,100 +4,170 @@ "checks": [ { "name": "account_maintain_current_contact_details", - "title": "Maintain current contact details.", + "title": "Ensure contact details for AWS accounts are current and mapped to multiple individuals in your organization", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. An AWS account supports a number of contact details; and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy. If an AWS account is observed to be behaving in a prohibited or suspicious manner; AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation; proactive measures may be taken; including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question.", + "categories": ["security", "compliance"], + "risk": "Failure to maintain current contact email and telephone details for AWS accounts can result in security breaches. If AWS observes prohibited or suspicious behavior from an account and is unable to contact the account owner, it may take proactive measures such as throttling traffic, causing impaired service to and from the account.", "severity": "medium", "detect": { - "manual": "Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Contact Information." + "manual": "To check and update contact information, login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Contact Information." }, "remediation": { - "text": "Using the Billing and Cost Management console complete contact details.", + "text": "To maintain current contact details, access the Billing and Cost Management console and complete the necessary information.", "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html" + }, + "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Kontaktinformationen für AWS-Konten aktuell sind und mehreren Personen in Ihrer Organisation zugeordnet sind", + "risk": "Das Versäumnis, aktuelle Kontakt-E-Mail- und Telefoninformationen für AWS-Konten aufrechtzuerhalten, kann zu Sicherheitsverletzungen führen. Wenn AWS unzulässiges oder verdächtiges Verhalten von einem Konto feststellt und keinen Kontakt zum Kontoinhaber herstellen kann, kann es proaktive Maßnahmen ergreifen, wie beispielsweise die Drosselung des Datenverkehrs, was zu einer beeinträchtigten Servicequalität für das Konto führt.", + "remediation": "Um die aktuellen Kontaktinformationen aufrechtzuerhalten, greifen Sie auf die Billing and Cost Management Console zu und vervollständigen Sie die erforderlichen Informationen." + } } }, { "name": "account_security_contact_information_is_registered", "title": "Ensure security contact information is registered", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "AWS provides customers with the option of specifying the contact information for accounts security team. It is recommended that this information be provided. Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.", + "categories": ["security", "compliance"], + "risk": "Failure to provide security contact information in AWS account settings could result in missed security advisories. Registering security-specific contact information will ensure that security advisories sent by AWS reach the appropriate team in your organization, enhancing your ability to respond effectively.", "severity": "medium", "detect": { - "manual": "Login to the AWS Console. Choose your account name on the top right of the window -> My Account -> Alternate Contacts -> Security Section." + "manual": "To check, login to the AWS Console, click on your account name on the top right of the window, go to My Account, and navigate to the Alternate Contacts section. Check if the Security Section is completed." }, "remediation": { - "text": "Go to the My Account section and complete alternate contacts.", + "text": "To fix this issue, go to the My Account section in the AWS Console, and complete the alternate contacts, specifically the Security Section.", "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html" + }, + "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Sicherheitskontaktinformationen registriert sind", + "risk": "Das Nichtbereitstellen von Sicherheitskontaktinformationen in den AWS-Kontoeinstellungen kann zu versäumten Sicherheitswarnungen führen. Durch die Registrierung von sicherheitsspezifischen Kontaktinformationen wird sichergestellt, dass Sicherheitswarnungen von AWS das entsprechende Team in Ihrer Organisation erreichen und somit Ihre Fähigkeit zur effektiven Reaktion verbessern.", + "remediation": "Um dieses Problem zu beheben, gehen Sie zur Sektion 'Mein Konto' in der AWS-Konsole und füllen Sie die alternativen Kontakte aus, insbesondere den Sicherheitsabschnitt." + } } }, { "name": "account_security_questions_are_registered_in_the_aws_account", - "title": "Ensure security questions are registered in the AWS account.", + "title": "Ensure security questions are registered in the AWS account", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "The AWS support portal allows account owners to establish security questions that can be used to authenticate individuals calling AWS customer service for support. It is recommended that security questions be established. When creating a new AWS account a default super user is automatically created. This account is referred to as the root account. It is recommended that the use of this account be limited and highly controlled. During events in which the root password is no longer accessible or the MFA token associated with root is lost", + "categories": ["security", "compliance"], + "risk": "Failure to establish security questions in the AWS support portal can lead to limited control and accessibility issues with the root account. In the event of a lost root password or MFA token, the account owner may face difficulties in account recovery and verification.", "severity": "medium", "detect": { - "manual": "Login to the AWS Console as root. Choose your account name on the top right of the window -> My Account -> Configure Security Challenge Questions." + "manual": "To check if security questions are registered, login to the AWS Console as the root account. Choose your account name on the top right of the window, go to My Account, and select Configure Security Challenge Questions." }, "remediation": { - "text": "Login as root account and from My Account configure Security questions.", + "text": "To fix this issue, login as the root account and configure Security Questions in My Account.", "url": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-security-challenge.html" + }, + "localizations": { + "de": { + "title": "Sicherstellen, dass Sicherheitsfragen im AWS-Konto registriert sind", + "risk": "Das Versäumnis, Sicherheitsfragen im AWS-Support-Portal festzulegen, kann zu eingeschränkter Kontrolle und Zugänglichkeitsproblemen mit dem Root-Konto führen. Im Falle eines verlorenen Root-Passworts oder MFA-Tokens kann der Kontoinhaber Schwierigkeiten bei der Wiederherstellung und Verifizierung des Kontos haben.", + "remediation": "Um dieses Problem zu beheben, melden Sie sich als Root-Konto an und konfigurieren Sie Sicherheitsfragen in Mein Konto." + } } }, { "name": "no_root_access_key", "title": "Ensure no root account access key exists", "result_kinds": ["aws_root_user"], - "categories": [ "security", "compliance" ], - "risk": "The root account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the root account be removed. Removing access keys associated with the root account limits vectors by which the account can be compromised. Removing the root access keys encourages the creation and use of role based accounts that are least privileged.", + "categories": ["security", "compliance"], + "risk": "The root account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the root account be removed. Removing access keys associated with the root account limits vectors by which the account can be compromised. Removing the root access keys encourages the creation and use of role-based accounts that are least privileged.", "severity": "critical", "detect": { "resoto": "is(aws_root_user) with(any, --> is(access_key))" }, "remediation": { - "text": "Use the credential report to ensure the access_key_1_active and access_key_2_active fields are set to FALSE. If the access keys are still active, delete them using the IAM console.", + "text": "Ensure the access_key_1_active and access_key_2_active fields in the credential report are set to FALSE. If the access keys are still active, delete them using the IAM console.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass kein Zugangsschlüssel für das Root-Konto vorhanden ist", + "risk": "Das Root-Konto ist der privilegierteste Benutzer in einem AWS-Konto. AWS-Zugangsschlüssel ermöglichen programmgesteuerten Zugriff auf ein bestimmtes AWS-Konto. Es wird empfohlen, alle Zugangsschlüssel, die mit dem Root-Konto verbunden sind, zu entfernen. Das Entfernen der Zugangsschlüssel, die mit dem Root-Konto verbunden sind, begrenzt die Angriffsvektoren, über die das Konto kompromittiert werden kann. Durch das Entfernen der Root-Zugangsschlüssel wird die Erstellung und Verwendung rollenbasierter Konten gefördert, die minimalste Berechtigungen haben.", + "remediation": "Stellen Sie sicher, dass die Felder access_key_1_active und access_key_2_active im Credential-Bericht auf FALSE gesetzt sind. Wenn die Zugangsschlüssel immer noch aktiv sind, löschen Sie sie über die IAM-Konsole." + } } }, { "name": "root_mfa_enabled", "title": "Ensure MFA is enabled for the root account", "result_kinds": ["aws_root_user"], - "categories": [ "security", "compliance" ], - "risk": "The root account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled when a user signs in to an AWS website they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. When virtual MFA is used for root accounts it is recommended that the device used is NOT a personal device but rather a dedicated mobile device (tablet or phone) that is managed to be kept charged and secured independent of any individual personal devices. (non-personal virtual MFA) This lessens the risks of losing access to the MFA due to device loss / trade-in or if the individual owning the device is no longer employed at the company.", + "categories": ["security", "compliance"], + "risk": "The root account is the most privileged user in an AWS account. Without MFA, the root account is vulnerable to unauthorized access, increasing the risk of security breaches. Enabling MFA adds an extra layer of protection, ensuring that only authorized individuals can access the account.", "severity": "critical", "detect": { "resoto": "is(aws_root_user) and mfa_active!=true" }, "remediation": { - "text": "Using IAM console navigate to Dashboard and expand Activate MFA on your root account.", + "text": "To fix this issue, navigate to the AWS Identity and Access Management (IAM) console, access the Dashboard, and activate MFA on the root account.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_considerations", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass MFA für das Root-Konto aktiviert ist", + "risk": "Das Root-Konto ist der privilegierteste Benutzer in einem AWS-Konto. Ohne MFA ist das Root-Konto anfällig für unbefugten Zugriff, was das Risiko von Sicherheitsverstößen erhöht. Die Aktivierung von MFA bietet eine zusätzliche Schutzschicht und stellt sicher, dass nur autorisierte Personen auf das Konto zugreifen können.", + "remediation": "Um dieses Problem zu beheben, navigieren Sie zur AWS Identity and Access Management (IAM) Konsole, greifen Sie auf das Dashboard zu und aktivieren Sie MFA für das Root-Konto." + } } }, { "name": "root_hardware_mfa_enabled", "title": "Ensure hardware MFA is enabled for the root account", "result_kinds": ["aws_root_user"], - "categories": [ "security", "compliance" ], - "risk": "The root account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled when a user signs in to an AWS website they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2 it is recommended that the root account be protected with a hardware MFA./ trade-in or if the individual owning the device is no longer employed at the company.", - "severity": "critical", + "categories": ["security", "compliance"], + "risk": "The root account is the most privileged user in an AWS account. Enabling MFA adds an extra layer of protection by requiring an authentication code from a hardware MFA device along with the user name and password, significantly reducing the risk of unauthorized access to the root account. It is important for Level 2 security to have the root account protected with a hardware MFA device.", + "severity": "low", "detect": { - "resoto": "is(aws_root_user) and user_virtual_mfa_devices!=null and user_virtual_mfa_devices!=[]" + "resoto": "is(aws_root_user) and user_virtual_mfa_devices==null or user_virtual_mfa_devices==[]" }, "remediation": { - "text": "Using IAM console navigate to Dashboard and expand Activate MFA on your root account.", + "text": "To enable hardware MFA for the root account, navigate to the IAM console Dashboard and activate MFA.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Hardware-MFA für das Root-Konto aktiviert ist", + "risk": "Das Root-Konto ist der privilegierteste Benutzer in einem AWS-Konto. Durch die Aktivierung der MFA wird eine zusätzliche Sicherheitsebene geschaffen, die neben Benutzername und Passwort auch einen Authentifizierungscode von einem Hardware-MFA-Gerät erfordert. Dies reduziert das Risiko eines unbefugten Zugriffs auf das Root-Konto erheblich. Es ist wichtig, dass das Root-Konto auf Sicherheitsstufe 2 mit einem Hardware-MFA-Gerät geschützt ist.", + "remediation": "Um die Hardware-MFA für das Root-Konto zu aktivieren, navigieren Sie zur IAM-Konsolen-Dashboard und aktivieren Sie MFA." + } + } + }, + { + "name": "user_hardware_mfa_enabled", + "title": "Ensure hardware MFA is enabled for all IAM users", + "result_kinds": ["aws_iam_user"], + "categories": ["security", "compliance"], + "risk": "Not having hardware MFA enabled for IAM users increases the risk of unauthorized access to the AWS account.", + "severity": "low", + "detect": { + "resoto": "is(aws_iam_user) and user_virtual_mfa_devices in [null, []]" + }, + "remediation": { + "text": "To fix this issue, enable a hardware MFA device for each IAM user from the AWS Management Console, command line, or IAM API.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Hardware-MFA für alle IAM-Benutzer aktiviert ist", + "risk": "Das Fehlen von aktivierter Hardware-MFA für IAM-Benutzer erhöht das Risiko eines unbefugten Zugriffs auf das AWS-Konto.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie ein Hardware-MFA-Gerät für jeden IAM-Benutzer über die AWS Management Console, die Befehlszeile oder die IAM API." + } } }, { "name": "avoid_root_usage", - "title": "Avoid the use of the root accounts", + "title": "Ensure the Avoidance of Root Account Usage", "result_kinds": ["aws_root_user"], - "categories": [ "security", "compliance" ], - "risk": "The root account has unrestricted access to all resources in the AWS account. It is highly recommended that the use of this account be avoided.", + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, the root account will maintain unrestricted access to all resources in the AWS account. It is highly recommended to avoid using this account due to the associated risks.", "severity": "critical", "detect": { "resoto": "is(aws_root_user) {access_keys[]: --> is(access_key)} password_last_used>{{last_access_younger_than.ago}} or access_keys[*].reported.access_key_last_used.last_used>{{last_access_younger_than.ago}}" @@ -106,76 +176,116 @@ "last_access_younger_than": "1d" }, "remediation": { - "text": "Follow the remediation instructions of the Ensure IAM policies are attached only to groups or roles recommendation.", + "text": "To fix the issue, follow the remediation instructions provided in the 'Ensure IAM policies are attached only to groups or roles' recommendation.", "url": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Verwendung des Root-Kontos vermieden wird", + "risk": "Wenn das Problem nicht gelöst wird, behält das Root-Konto uneingeschränkten Zugriff auf alle Ressourcen im AWS-Konto. Es wird dringend empfohlen, die Verwendung dieses Kontos aufgrund der damit verbundenen Risiken zu vermeiden.", + "remediation": "Um das Problem zu beheben, befolgen Sie die Anweisungen zur Fehlerbehebung in der Empfehlung 'Stellen Sie sicher, dass IAM-Richtlinien nur Gruppen oder Rollen zugewiesen sind'." + } } }, { "name": "password_policy_minimum_length_14", - "title": "Ensure IAM password policy requires minimum length of 14 or greater", + "title": "Ensure IAM password policy requires a minimum length of 14 or greater", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "Password policies are used to enforce password complexity requirements. IAM password policies can be used to ensure password are comprised of different character sets. It is recommended that the password policy require minimum length of 14 or greater.", + "categories": ["security", "compliance"], + "risk": "Failure to enforce a strong password policy increases the risk of unauthorized access to AWS resources. Without a minimum length requirement of 14 or greater, it becomes easier for attackers to crack passwords and gain unauthorized access.", "severity": "medium", "detect": { "resoto": "is(aws_account) and minimum_password_length<14" }, "remediation": { - "text": "Ensure \"Minimum password length\" is checked under \"Password Policy\".", + "text": "To fix this issue, ensure that the \"Minimum password length\" option is checked under \"Password Policy\" in the AWS Identity and Access Management (IAM) console.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Passwortrichtlinie eine Mindestlänge von 14 oder mehr erfordert", + "risk": "Die Nichtdurchsetzung einer starken Passwortrichtlinie erhöht das Risiko eines unbefugten Zugriffs auf AWS-Ressourcen. Ohne eine Mindestlängenanforderung von 14 oder mehr wird es einfacher für Angreifer, Passwörter zu knacken und unbefugten Zugriff zu erlangen.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass die Option \"Mindest-Passwortlänge\" unter \"Passwortrichtlinie\" in der AWS Identity and Access Management (IAM) Konsole aktiviert ist." + } } }, { "name": "password_policy_reuse_24", "title": "Ensure IAM password policy prevents password reuse: 24 or greater", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "Password policies are used to enforce password complexity requirements. IAM password policies can be used to ensure password are comprised of different character sets. It is recommended that the password policy prevents at least password reuse of 24 or greater.", + "categories": ["security", "compliance"], + "risk": "Failure to enforce a strong password policy increases the risk of unauthorized access to the AWS account. Without preventing password reuse of 24 or greater, weak passwords may be reused, making it easier for attackers to gain access.", "severity": "medium", "detect": { "resoto": "is(aws_account) and password_reuse_prevention<24" }, "remediation": { - "text": "Ensure \"Number of passwords to remember\" is set to 24.", + "text": "To fix this issue, ensure that the \"Number of passwords to remember\" in the account password policy is set to 24 or greater.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Passwortrichtlinie die Wiederverwendung von Passwörtern verhindert: 24 oder höher", + "risk": "Die Nichtumsetzung einer starken Passwortrichtlinie erhöht das Risiko eines unbefugten Zugriffs auf das AWS-Konto. Ohne die Verhinderung der Wiederverwendung von Passwörtern mit einer Anzahl von 24 oder höher können schwache Passwörter wiederholt verwendet werden, was es Angreifern erleichtert, Zugriff zu erlangen.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass die \"Anzahl der zu merkenden Passwörter\" in der Konten-Passwortrichtlinie auf 24 oder höher festgelegt ist." + } } }, { "name": "user_mfa_enabled_console_access", - "title": "Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password.", + "title": "Ensure multi-factor authentication (MFA) is enabled for all IAM users with console passwords.", "result_kinds": ["aws_iam_user"], - "categories": [ "security", "compliance" ], - "risk": "Password policies are used to enforce password complexity requirements. IAM password policies can be used to ensure password are comprised of different character sets. It is recommended that the password policy prevents at least password reuse of 24 or greater.", + "categories": ["security", "compliance"], + "risk": "If MFA is not enabled for IAM users with console passwords, it increases the risk of unauthorized access to AWS resources. Password complexity requirements and password policy enforcement may not be sufficient to protect against password reuse. It is recommended to prevent password reuse of 24 or more passwords to enhance security.", "severity": "high", "detect": { "resoto": "is(aws_iam_user) and password_enabled==true and mfa_active==false" }, "remediation": { - "text": "Enable MFA for users account. MFA is a simple best practice that adds an extra layer of protection on top of your user name and password. Recommended to use hardware keys over virtual MFA.", + "text": "To fix this issue, enable MFA for the user's account. MFA adds an extra layer of protection beyond just a username and password. It is recommended to use hardware keys instead of virtual MFA for enhanced security.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Mehr-Faktor-Authentifizierung (MFA) für alle IAM-Benutzer mit Konsolenpasswörtern aktiviert ist.", + "risk": "Wenn die MFA für IAM-Benutzer mit Konsolenpasswörtern nicht aktiviert ist, erhöht sich das Risiko eines unbefugten Zugriffs auf AWS-Ressourcen. Die Anforderungen an die Passwortkomplexität und die Durchsetzung von Passwortrichtlinien sind möglicherweise nicht ausreichend, um sich gegen die Wiederverwendung von Passwörtern zu schützen. Es wird empfohlen, die Wiederverwendung von 24 oder mehr Passwörtern zu verhindern, um die Sicherheit zu erhöhen.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie MFA für das Benutzerkonto. MFA bietet einen zusätzlichen Schutz über Benutzernamen und Passwort hinaus. Es wird empfohlen, anstelle von virtuellem MFA Hardware-Schlüssel für eine verbesserte Sicherheit zu verwenden." + } } }, { "name": "user_uses_access_keys_console_access", - "title": "Do not setup access keys during initial user setup for all IAM users that have a console password", + "title": "Ensure access keys are not created during initial user setup for IAM users with a console password", "result_kinds": ["aws_iam_access_key"], - "categories": [ "security", "compliance" ], - "risk": "AWS console defaults the checkbox for creating access keys to enabled. This results in many access keys being generated unnecessarily. In addition to unnecessary credentials; it also generates unnecessary management work in auditing and rotating these keys. Requiring that additional steps be taken by the user after their profile has been created will give a stronger indication of intent that access keys are (a) necessary for their work and (b) once the access key is established on an account that the keys may be in use somewhere in the organization.", + "categories": ["security", "compliance"], + "risk": "If access keys are generated unnecessarily during the initial user setup, it creates unnecessary credentials and management work to audit and rotate these keys. Requiring additional steps after the user's profile is created will indicate intent and ensure that access keys are only created when truly necessary.", "severity": "medium", "detect": { "resoto": "is(aws_iam_access_key) and access_key_status==\"Active\" and access_key_last_used.last_used==null and /ancestors.aws_iam_user.reported.password_enabled==true" }, "remediation": { - "text": "From the IAM console: generate credential report and disable not required keys.", + "text": "From the IAM console, generate a credential report and disable any access keys that are not required.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass bei der initialen Benutzereinrichtung für IAM-Benutzer mit einem Konsolenpasswort keine Zugriffsschlüssel erstellt werden", + "risk": "Wenn bei der initialen Benutzereinrichtung unnötigerweise Zugriffsschlüssel generiert werden, entstehen unnötige Anmeldeinformationen und Verwaltungsarbeit, um diese Schlüssel zu überprüfen und zu rotieren. Das Erfordernis weiterer Schritte nach Erstellung des Benutzerprofils deutet auf Absicht hin und stellt sicher, dass Zugriffsschlüssel nur dann erstellt werden, wenn sie wirklich erforderlich sind.", + "remediation": "Generieren Sie über die IAM-Konsole einen Berechtigungsbericht und deaktivieren Sie alle nicht erforderlichen Zugriffsschlüssel." + } } }, { "name": "disable_old_credentials", - "title": "Ensure credentials unused for 45 days or greater are disabled", - "result_kinds": ["aws_iam_access_key"], - "categories": [ "security", "compliance" ], - "risk": "To increase the security of your AWS account; remove IAM user credentials (that is; passwords and access keys) that are not needed. For example; when users leave your organization or no longer need AWS access.", + "title": "Ensure unused credentials are disabled after 45 days", + "result_kinds": ["aws_iam_user"], + "categories": ["security", "compliance"], + "risk": "Failure to disable unused IAM user credentials (passwords and access keys) poses a security risk to your AWS account. This includes credentials of users who have left the organization or no longer require AWS access.", "severity": "medium", "detect": { "resoto": "is(aws_iam_user) and password_last_used<{{password_used_since.ago}}" @@ -184,31 +294,46 @@ "password_used_since": "45d" }, "remediation": { - "text": "From the IAM console: generate credential report and disable not required keys.", + "text": "To fix the issue, generate a credential report from the IAM console and disable unnecessary keys.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass nicht genutzte Anmeldeinformationen nach 45 Tagen deaktiviert werden", + "risk": "Das Versäumnis, nicht genutzte IAM-Benutzeranmeldeinformationen (Passwörter und Zugriffsschlüssel) zu deaktivieren, stellt ein Sicherheitsrisiko für Ihr AWS-Konto dar. Dies betrifft die Anmeldeinformationen von Benutzern, die die Organisation verlassen haben oder keinen AWS-Zugriff mehr benötigen.", + "remediation": "Um das Problem zu beheben, generieren Sie einen Bericht über Anmeldeinformationen in der IAM-Konsole und deaktivieren Sie unnötige Schlüssel." + } } }, { "name": "user_has_two_active_access_keys", - "title": "Check if IAM users have two active access keys", + "title": "Ensure IAM users have two active access keys", "result_kinds": ["aws_iam_user"], - "categories": [ "security", "compliance" ], - "risk": "Access Keys could be lost or stolen. It creates a critical risk.", + "categories": ["security", "compliance"], + "risk": "Failure to ensure IAM users have two active access keys can lead to access keys being lost or stolen, presenting a critical security risk.", "severity": "medium", "detect": { "resoto": "is(aws_iam_user) {access_keys[]: --> is(access_key)} access_keys[0].reported.access_key_status==\"Active\" and access_keys[1].reported.access_key_status==\"Active\"" }, "remediation": { - "text": "Avoid using long lived access keys.", + "text": "To fix the issue, avoid using long-lived access keys and rotate them regularly to minimize the risk of compromise.", "url": "https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListAccessKeys.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListAccessKeys.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM-Benutzer über zwei aktive Zugriffsschlüssel verfügen", + "risk": "Wenn IAM-Benutzer nicht über zwei aktive Zugriffsschlüssel verfügen, können Zugriffsschlüssel verloren oder gestohlen werden, was ein kritisches Sicherheitsrisiko darstellt.", + "remediation": "Um das Problem zu beheben, sollten Sie vermeiden, langlebige Zugriffsschlüssel zu verwenden und diese regelmäßig zu rotieren, um das Risiko einer Kompromittierung zu minimieren." + } } }, { "name": "rotate_access_keys_after_90_days", "title": "Ensure access keys are rotated every 90 days or less", "result_kinds": ["aws_iam_access_key"], - "categories": [ "security", "compliance" ], - "risk": "Access keys consist of an access key ID and secret access key which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI)- Tools for Windows PowerShell- the AWS SDKs- or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.", + "categories": ["security", "compliance"], + "risk": "Access keys consist of an access key ID and secret access key which are used to sign programmatic requests that you make to AWS. If access keys are not rotated regularly, it increases the risk of unauthorized access to AWS resources. Regularly rotating access keys helps mitigate the risk of compromised keys.", "severity": "medium", "detect": { "resoto": "is(aws_iam_access_key) and access_key_last_used.last_rotated<{{last_rotated_max.ago}}" @@ -217,61 +342,162 @@ "last_rotated_max": "90d" }, "remediation": { - "text": "Use the credential report to ensure access_key_X_last_rotated is less than 90 days ago.", + "text": "To fix the issue, check the credential report and ensure that the access_key_X_last_rotated value is less than 90 days ago.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Zugriffsschlüssel alle 90 Tage oder weniger rotiert werden", + "risk": "Zugriffsschlüssel bestehen aus einer Zugriffsschlüssel-ID und geheimem Zugriffsschlüssel, die verwendet werden, um programmierte Anfragen zu signieren, die Sie an AWS stellen. Wenn Zugriffsschlüssel nicht regelmäßig rotiert werden, erhöht sich das Risiko eines unbefugten Zugriffs auf AWS-Ressourcen. Durch regelmäßiges Rotieren der Zugriffsschlüssel wird das Risiko von kompromittierten Schlüsseln verringert.", + "remediation": "Um das Problem zu beheben, überprüfen Sie den Berechtigungsbericht und stellen Sie sicher, dass der Wert access_key_X_last_rotated weniger als 90 Tage zurück liegt." + } } }, { "name": "policy_attached_only_to_group_or_roles", "title": "Ensure IAM policies are attached only to groups or roles", - "result_kinds": ["aws_iam_access_key"], - "categories": [ "security", "compliance" ], - "risk": "By default IAM users; groups; and roles have no access to AWS resources. IAM policies are the means by which privileges are granted to users; groups; or roles. It is recommended that IAM policies be applied directly to groups and roles but not users. Assigning privileges at the group or role level reduces the complexity of access management as the number of users grow. Reducing access management complexity may in-turn reduce opportunity for a principal to inadvertently receive or retain excessive privileges.", + "result_kinds": ["aws_iam_user"], + "categories": ["security", "compliance"], + "risk": "By default, IAM users, groups, and roles have no access to AWS resources. IAM policies are used to grant privileges to users, groups, or roles. It is recommended to apply IAM policies directly to groups and roles rather than users. Assigning privileges at the group or role level reduces access management complexity and minimizes the risk of excessive privileges.", "severity": "low", "detect": { "resoto": "is(aws_iam_user) {attached_policy: --> is(aws_iam_policy)} user_policies!=[] or attached_policy!=null" }, "remediation": { - "text": "Remove any policy attached directly to the user. Use groups or roles instead.", + "text": "To fix the issue, remove any directly attached policy from the user and use groups or roles instead.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM-Richtlinien nur Gruppen oder Rollen zugeordnet sind", + "risk": "Standardmäßig haben IAM-Benutzer, Gruppen und Rollen keinen Zugriff auf AWS-Ressourcen. IAM-Richtlinien werden verwendet, um Benutzern, Gruppen oder Rollen Privilegien zuzuweisen. Es wird empfohlen, IAM-Richtlinien direkt Gruppen und Rollen zuzuordnen, anstatt Benutzern. Die Zuweisung von Berechtigungen auf Gruppen- oder Rollenebene reduziert die Komplexität des Zugriffsmanagements und minimiert das Risiko übermäßiger Berechtigungen.", + "remediation": "Um das Problem zu beheben, entfernen Sie alle direkt zugeordneten Richtlinien von Benutzern und verwenden Sie stattdessen Gruppen oder Rollen." + } } }, { "name": "policy_with_administrative_privileges_not_in_use", - "title": "Ensure IAM policies that allow full \"*:*\" administrative privileges are not in use.", + "title": "Ensure IAM policies do not grant full administrative privileges", "result_kinds": ["aws_iam_policy"], - "categories": [ "security", "compliance" ], - "risk": "IAM policies are the means by which privileges are granted to users; groups; or roles. It is recommended and considered a standard security advice to grant least privilege—that is; granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform only those tasks instead of allowing full administrative privileges. Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions.", + "categories": ["security", "compliance"], + "risk": "Granting full administrative privileges through IAM policies exposes resources to potentially unwanted actions. It is recommended to grant least privilege by only providing necessary permissions for users, groups, or roles.", "severity": "medium", "detect": { - "resoto": "is(aws_iam_policy) and policy_document.document.Statement[*].{Effect=Allow and (Action=\"*\" and Resource=\"*\")} and policy_attachment_count>0" + "resoto": "is(aws_iam_policy) and policy_document.document.Statement[*].{Effect=Allow and (Action in [\"*\", [\"*\"]] and Resource in [\"*\", [\"*\"]])} and policy_attachment_count>0" }, "remediation": { - "text": "It is more secure to start with a minimum set of permissions and grant additional permissions as necessary; rather than starting with permissions that are too lenient and then trying to tighten them later. List policies an analyze if permissions are the least possible to conduct business activities.", + "text": "To mitigate the risk, start with a minimum set of permissions and grant additional permissions as necessary. Regularly review policies to ensure permissions are the minimum required for business activities.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM-Richtlinien keine volle administrative Berechtigung gewähren", + "risk": "Indem volle administrative Berechtigungen über IAM-Richtlinien gewährt werden, werden Ressourcen potenziell unerwünschten Aktionen ausgesetzt. Es wird empfohlen, die Minimalberechtigung zu gewähren, indem nur die für Benutzer, Gruppen oder Rollen erforderlichen Berechtigungen erteilt werden.", + "remediation": "Um das Risiko zu mindern, beginnen Sie mit einem minimalen Satz von Berechtigungen und gewähren Sie zusätzliche Berechtigungen, wenn erforderlich. Überprüfen Sie regelmäßig die Richtlinien, um sicherzustellen, dass die Berechtigungen für die Geschäftsaktivitäten minimal erforderlich sind." + } + } + }, + { + "name": "user_inline_policy_no_star_star", + "title": "Ensure IAM users do not have inline policies granting full administrative privileges \"*:*\".", + "result_kinds": ["aws_iam_user"], + "categories": ["security", "compliance"], + "risk": "Granting full administrative privileges to IAM users increases the risk of unauthorized actions and potential security breaches. It is best practice to grant least privilege, providing users with only the necessary permissions to complete their tasks.", + "severity": "medium", + "detect": { + "resoto": "is(aws_iam_user) and user_policies[*].policy_document.Statement[*].{Effect=Allow and (Action in [\"*\", [\"*\"]] and Resource in [\"*\", [\"*\"]])}" + }, + "remediation": { + "text": "Follow the principle of least privilege by starting with minimal permissions and gradually granting additional permissions as needed. Review and analyze existing policies to ensure they only provide the minimum necessary permissions for business activities.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM-Benutzer keine inline-Richtlinien haben, die volle Administrationsberechtigungen \"*:*\" gewähren.", + "risk": "Indem IAM-Benutzern volle Administrationsberechtigungen gewährt werden, erhöht sich das Risiko unbefugter Aktionen und potenzieller Sicherheitsverletzungen. Es ist bewährte Praxis, möglichst geringe Privilegien zu gewähren und den Benutzern nur die für ihre Aufgaben erforderlichen Berechtigungen zur Verfügung zu stellen.", + "remediation": "Befolgen Sie das Prinzip des geringsten Privilegs, indem Sie mit minimalen Berechtigungen beginnen und schrittweise zusätzliche Berechtigungen gewähren, wenn sie benötigt werden. Überprüfen und analysieren Sie vorhandene Richtlinien, um sicherzustellen, dass sie nur die für geschäftliche Aktivitäten erforderlichen minimalen Berechtigungen bieten." + } + } + }, + { + "name": "group_inline_policy_no_star_star", + "title": "Ensure inline policies for IAM groups that grant full administrative privileges are not in use", + "result_kinds": ["aws_iam_group"], + "categories": ["security", "compliance"], + "risk": "Granting full \"*:*\" administrative privileges to IAM groups exposes resources to potentially unwanted actions. It is recommended to follow the principle of least privilege and grant only the necessary permissions to perform specific tasks.", + "severity": "medium", + "detect": { + "resoto": "is(aws_iam_group) and group_policies[*].policy_document.Statement[*].{Effect=Allow and (Action in [\"*\", [\"*\"]] and Resource in [\"*\", [\"*\"]])}" + }, + "remediation": { + "text": "To fix this issue, it is more secure to start with a minimum set of permissions and grant additional permissions as necessary. Analyze the existing policies and ensure that the permissions granted are the least required for business activities.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass inline-Richtlinien für IAM-Gruppen, die volle administrative Berechtigungen gewähren, nicht verwendet werden", + "risk": "Die Gewährung vollständiger \"*:*\" administrativer Berechtigungen an IAM-Gruppen macht Ressourcen potenziell unerwünschten Aktionen ausgesetzt. Es wird empfohlen, dem Prinzip des geringsten Privilegs zu folgen und nur die erforderlichen Berechtigungen für bestimmte Aufgaben zu gewähren.", + "remediation": "Um dieses Problem zu beheben, ist es sicherer, mit einem Mindestmaß an Berechtigungen zu beginnen und bei Bedarf zusätzliche Berechtigungen zu gewähren. Analysieren Sie die vorhandenen Richtlinien und stellen Sie sicher, dass die gewährten Berechtigungen für Geschäftstätigkeiten minimal erforderlich sind." + } + } + }, + { + "name": "role_inline_policy_no_star_star", + "title": "Ensure IAM roles do not use inline policies with full administrative privileges.", + "result_kinds": ["aws_iam_role"], + "categories": ["security", "compliance"], + "risk": "Granting full administrative privileges through IAM roles increases the risk of unauthorized access to resources. Following least privilege principle is crucial to limit potential unwanted actions and protect resources.", + "severity": "medium", + "detect": { + "resoto": "is(aws_iam_role) and role_policies[*].policy_document.Statement[*].{Effect=Allow and (Action in [\"*\", [\"*\"]] and Resource in [\"*\", [\"*\"]])}" + }, + "remediation": { + "text": "To improve security, start with a minimum set of permissions and gradually grant additional permissions as needed. Review the policies and ensure permissions are restricted to the essential tasks required for business activities.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM-Rollen keine inline-Richtlinien mit vollen administrativen Rechten verwenden.", + "risk": "Die Gewährung voller administrativer Rechte über IAM-Rollen erhöht das Risiko unbefugten Zugriffs auf Ressourcen. Das Befolgen des Prinzips des geringsten Privilegs ist entscheidend, um potenziell unerwünschte Aktionen zu begrenzen und Ressourcen zu schützen.", + "remediation": "Um die Sicherheit zu verbessern, beginnen Sie mit einem minimalen Satz von Berechtigungen und gewähren Sie nach Bedarf schrittweise zusätzliche Berechtigungen. Überprüfen Sie die Richtlinien und stellen Sie sicher, dass die Berechtigungen auf die für die Geschäftstätigkeit erforderlichen Aufgaben beschränkt sind." + } } }, { "name": "support_role_exists", "title": "Ensure a support role has been created to manage incidents with AWS Support", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "AWS provides a support center that can be used for incident notification and response; as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.", + "categories": ["security", "compliance"], + "risk": "Without a support role, incident notification and response, as well as technical support and customer services, may be compromised. To mitigate this risk, it is important to create an IAM Role that allows authorized users to manage incidents with AWS Support.", "severity": "medium", "detect": { "resoto": "is(aws_account) with(empty, -[0:2]-> is(aws_iam_role) and name=AWSServiceRoleForSupport and role_assume_role_policy_document.Statement[*].{Effect=Allow and Principal.Service=support.amazonaws.com and Action=\"sts:AssumeRole\"})" }, "remediation": { - "text": "Create an IAM role for managing incidents with AWS.", + "text": "To fix this issue, create an IAM role specifically for managing incidents with AWS Support.", "url": "https://docs.aws.amazon.com/awssupport/latest/user/using-service-linked-roles-sup.html" + }, + "url": "https://docs.aws.amazon.com/awssupport/latest/user/using-service-linked-roles-sup.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass eine Support-Rolle erstellt wurde, um Vorfälle mit AWS Support zu verwalten", + "risk": "Ohne eine Support-Rolle können die Benachrichtigung und Reaktion auf Vorfälle sowie technischer Support und Kundenservice beeinträchtigt werden. Um dieses Risiko zu mindern, ist es wichtig, eine IAM-Rolle zu erstellen, die autorisierten Benutzern das Verwalten von Vorfällen mit AWS Support ermöglicht.", + "remediation": "Um dieses Problem zu beheben, erstellen Sie eine IAM-Rolle, die speziell für das Verwalten von Vorfällen mit AWS Support geeignet ist." + } } }, { "name": "expired_server_certificates", - "title": "Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed.", + "title": "Ensure that all expired SSL/TLS certificates are removed from AWS IAM.", "result_kinds": ["aws_iam_server_certificate"], - "categories": [ "security", "compliance" ], - "risk": "Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB.", + "categories": ["security", "compliance"], + "risk": "If expired SSL/TLS certificates are not removed, there is a risk of accidentally deploying an invalid certificate to a resource like AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website.", "severity": "critical", "detect": { "resoto": "is(aws_iam_server_certificate) and expires<{{certificate_expiration.from_now}}" @@ -280,58 +506,252 @@ "certificate_expiration": "0d" }, "remediation": { - "action":{ - "cli": "search is(aws_iam_server_certificate) and expires<@UTC@ | clean", + "action": { + "resoto": "search is(aws_iam_server_certificate) and expires<@UTC@ | clean", "aws_cli": "aws iam delete-server-certificate --server-certificate-name {{name}}" }, - - "text": "Deleting the certificate could have implications for your application if you are using an expired server certificate with Elastic Load Balancing, CloudFront, etc. One has to make configurations at respective services to ensure there is no interruption in application functionality.", + "text": "To fix this issue, delete the expired SSL/TLS certificate from AWS IAM. However, keep in mind that deleting the certificate could have implications for your application if you are using it with services like Elastic Load Balancing or CloudFront. Make the necessary configurations in these services to ensure no interruption in application functionality.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass alle abgelaufenen SSL/TLS-Zertifikate aus AWS IAM entfernt werden.", + "risk": "Wenn abgelaufene SSL/TLS-Zertifikate nicht entfernt werden, besteht die Gefahr, versehentlich ein ungültiges Zertifikat auf eine Ressource wie den AWS Elastic Load Balancer (ELB) zu deployen, was die Glaubwürdigkeit der Anwendung/Website beeinträchtigen kann.", + "remediation": "Um dieses Problem zu beheben, löschen Sie das abgelaufene SSL/TLS-Zertifikat aus AWS IAM. Beachten Sie jedoch, dass das Löschen des Zertifikats Auswirkungen auf Ihre Anwendung haben kann, wenn Sie es mit Diensten wie Elastic Load Balancing oder CloudFront verwenden. Nehmen Sie die erforderlichen Konfigurationen in diesen Diensten vor, um sicherzustellen, dass es zu keiner Unterbrechung der Anwendungsfunktionalität kommt." + } } }, { "name": "access_analyzer_enabled", - "title": "Check if IAM Access Analyzer is enabled.", + "title": "Ensure IAM Access Analyzer is enabled", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data, which is a security risk. IAM Access Analyzer uses a form of mathematical analysis called automated reasoning, which applies logic and mathematical inference to determine all possible access paths allowed by a resource policy.", + "categories": ["security", "compliance"], + "risk": "Without IAM Access Analyzer enabled, it becomes difficult to identify resources in your organization and accounts that are shared with external entities. This increases the risk of unintended access to your resources and data, posing a security threat. By using IAM Access Analyzer, you can efficiently analyze resource policies and detect and address potential security risks.", "severity": "low", "detect": { - "manual": "Check that IAM Access Analyzer is enabled and that no analyzer produced any findings. `aws accessanalyzer list-analyzers` and `aws accessanalyzer list-findings`" + "manual": "Check that IAM Access Analyzer is enabled and that no analyzer produced any findings. Use the commands `aws accessanalyzer list-analyzers` and `aws accessanalyzer list-findings`." }, "remediation": { - "text": "Enable IAM Access Analyzer for all accounts, create analyzer and take action over it is recommendations (IAM Access Analyzer is available at no additional cost).", + "text": "To mitigate the risk, enable IAM Access Analyzer for all accounts and create an analyzer. Take appropriate action based on the analyzer's recommendations. Enabling IAM Access Analyzer is free of charge and highly beneficial for security purposes.", "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass IAM Access Analyzer aktiviert ist", + "risk": "Ohne aktivierten IAM Access Analyzer wird es schwierig, Ressourcen in Ihrer Organisation und in den Konten zu identifizieren, die mit externen Einheiten geteilt werden. Dadurch besteht ein erhöhtes Risiko für unbeabsichtigten Zugriff auf Ihre Ressourcen und Daten, was eine Sicherheitsbedrohung darstellt. Durch die Verwendung von IAM Access Analyzer können Sie Ressourcenrichtlinien effizient analysieren und potenzielle Sicherheitsrisiken erkennen und beheben.", + "remediation": "Um das Risiko zu minimieren, aktivieren Sie IAM Access Analyzer für alle Konten und erstellen Sie einen Analyzer. Ergreifen Sie entsprechende Maßnahmen basierend auf den Empfehlungen des Analyzers. Die Aktivierung von IAM Access Analyzer ist kostenlos und für Sicherheitszwecke äußerst vorteilhaft." + } } }, { "name": "check_saml_providers_sts", - "title": "Check if there are SAML Providers that can be used for STS", + "title": "Ensure SAML Providers are used for STS to prevent the use of long-lived credentials", "result_kinds": ["aws_account"], - "categories": [ "security", "compliance" ], - "risk": "Without SAML provider users with AWS CLI or AWS API access can use IAM static credentials. SAML helps users to assume role by default each time they authenticate.", + "categories": ["security", "compliance"], + "risk": "Without SAML provider, users with AWS CLI or AWS API access can use IAM static credentials. This increases the risk of unauthorized access and potential misuse of privileges.", "severity": "low", "detect": { - "manual": "Check that saml providers are available: `aws iam list-saml-providers`" + "manual": "Check that SAML providers are available by running the command `aws iam list-saml-providers`" }, "remediation": { - "text": "Enable SAML provider and use temporary credentials. You can use temporary security credentials to make programmatic requests for AWS resources using the AWS CLI or AWS API (using the AWS SDKs ). The temporary credentials provide the same permissions that you have with use long-term security credentials such as IAM user credentials. In case of not having SAML provider capabilities prevent usage of long-lived credentials.", + "text": "To mitigate this issue, enable a SAML provider and use temporary security credentials. Temporary credentials provide the same permissions as long-term credentials but have a limited duration. This helps prevent the unauthorized use of long-lived credentials.", "url": "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass SAML-Provider für STS verwendet werden, um die Verwendung von langfristigen Anmeldeinformationen zu vermeiden", + "risk": "Ohne SAML-Provider können Benutzer mit AWS CLI- oder AWS API-Zugriff IAM-Statikanmeldeinformationen verwenden. Dadurch steigt das Risiko unbefugten Zugriffs und potenziellen Missbrauchs von Berechtigungen.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie einen SAML-Provider und verwenden Sie temporäre Sicherheitsanmeldeinformationen. Temporäre Anmeldeinformationen bieten die gleichen Berechtigungen wie langfristige Anmeldeinformationen, haben jedoch eine begrenzte Gültigkeitsdauer. Dies hilft, die unbefugte Verwendung von langfristigen Anmeldeinformationen zu verhindern." + } } }, { "name": "check_cloudshell_access_restricted", "title": "Ensure access to AWSCloudShellFullAccess is restricted", - "result_kind": ["aws_iam_role", "aws_iam_user"], - "categories": [ "security", "compliance" ], - "risk": "Access to this policy should be restricted as it presents a potential channel for data exfiltration by malicious cloud admins that are given full permissions to the service. AWS documentation describes how to create a more restrictive IAM policy which denies file transfer permissions.", + "result_kinds": ["aws_iam_role", "aws_iam_user"], + "categories": [ + "security", "compliance" + ], + "risk": "Unrestricted access to the AWSCloudShellFullAccess policy poses a risk of data exfiltration by malicious cloud admins with full permissions to the service. To mitigate this risk, it is recommended to create a more restrictive IAM policy that denies file transfer permissions.", "severity": "medium", "detect": { "resoto": "is(aws_iam_policy) and name==AWSCloudShellFullAccess <-- is(aws_iam_user, aws_iam_role)" }, "remediation": { - "text": "Open the IAM console at https://console.aws.amazon.com/iam/ In the left pane, select Policies. Search for and select AWSCloudShellFullAccess. On the Entities attached tab, for each item, check the box and select Detach.", + "text": "To restrict access to the AWSCloudShellFullAccess policy, open the IAM console at https://console.aws.amazon.com/iam/, select Policies in the left pane, search for and select AWSCloudShellFullAccess. On the Entities attached tab, select and detach each item.", "url": "https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Zugriff auf AWSCloudShellFullAccess eingeschränkt ist", + "risk": "Unbeschränkter Zugriff auf die AWSCloudShellFullAccess-Richtlinie birgt ein Risiko der Datenexfiltration durch bösartige Cloud-Administratoren mit vollen Berechtigungen für den Dienst. Zur Minderung dieses Risikos wird empfohlen, eine restriktivere IAM-Richtlinie zu erstellen, die Dateiübertragungsberechtigungen verweigert.", + "remediation": "Um den Zugriff auf die AWSCloudShellFullAccess-Richtlinie einzuschränken, öffnen Sie die IAM-Konsole unter https://console.aws.amazon.com/iam/, wählen Sie in der linken Leiste 'Richtlinien' aus, suchen Sie nach und wählen Sie AWSCloudShellFullAccess aus. Wählen Sie auf der Registerkarte 'Angefügte Entitäten' jedes Element aus und heben Sie die Zuordnung auf." + } + } + }, + { + "name": "password_policy_lowercase", + "title": "Ensure IAM password policy requires at least one lowercase letter", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "Without requiring at least one lowercase letter in the password policy, IAM users may use passwords that lack complexity and are easier to crack. This increases the risk of unauthorized access and potential data breaches.", + "severity": "medium", + "detect": { + "resoto": "is(aws_account) and require_lowercase_characters=false" + }, + "remediation": { + "text": "To fix the issue, ensure that the \"Requires at least one lowercase letter\" option is checked under \"Password Policy\" in the AWS IAM console.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Passwortrichtlinie mindestens einen Kleinbuchstaben erfordert", + "risk": "Ohne die Anforderung mindestens eines Kleinbuchstabens in der Passwortrichtlinie können IAM-Benutzer Passwörter verwenden, die an Komplexität fehlen und leichter zu knacken sind. Dadurch erhöht sich das Risiko eines unbefugten Zugriffs und potenzieller Datenverstöße.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass die Option \"Erfordert mindestens einen Kleinbuchstaben\" unter \"Passwortrichtlinie\" in der AWS IAM-Konsole aktiviert ist." + } + } + }, + { + "name": "password_policy_number", + "title": "Ensure IAM password policy requires at least one number", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, password complexity requirements may not be enforced, allowing weak passwords to be used. It is recommended to require at least one number in the password policy to enhance security.", + "severity": "medium", + "detect": { + "resoto": "is(aws_account) and require_numbers=false" + }, + "remediation": { + "text": "To fix the issue, ensure that the \"Requires at least one number\" option is checked under \"Password Policy\".", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Passwortrichtlinie mindestens eine Nummer erfordert", + "risk": "Wenn das Problem nicht behoben wird, können die Anforderungen an die Passwortkomplexität nicht durchgesetzt werden, was das Verwenden von schwachen Passwörtern ermöglicht. Es wird empfohlen, mindestens eine Nummer in die Passwortrichtlinie aufzunehmen, um die Sicherheit zu verbessern.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass die Option \"Erfordert mindestens eine Nummer\" unter \"Passwortrichtlinie\" aktiviert ist." + } + } + }, + { + "name": "password_policy_symbol", + "title": "Ensure IAM password policy requires at least one symbol", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "Failure to enforce password complexity requirements in IAM can result in weak passwords. It is important to require at least one non-alphanumeric character to ensure stronger password security.", + "severity": "medium", + "detect": { + "resoto": "is(aws_account) and require_symbols=false" + }, + "remediation": { + "text": "To fix the issue, navigate to the \"Password Policy\" section and ensure that the option \"Require at least one non-alphanumeric character\" is checked.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Kennwortrichtlinie mindestens ein Symbol erfordert", + "risk": "Die Nichtdurchsetzung von Anforderungen an die Kennwortkomplexität in IAM kann zu schwachen Kennwörtern führen. Es ist wichtig, mindestens ein nicht alphanumerisches Zeichen zu verlangen, um eine stärkere Kennwortsicherheit zu gewährleisten.", + "remediation": "Um das Problem zu beheben, navigieren Sie zum Abschnitt \"Kennwortrichtlinie\" und stellen Sie sicher, dass die Option \"Mindestens ein nicht alphanumerisches Zeichen erforderlich\" aktiviert ist." + } + } + }, + { + "name": "password_policy_uppercase", + "title": "Ensure IAM password policy requires at least one uppercase letter", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "A strong password policy is important for ensuring password complexity. Without requiring at least one uppercase letter, there is a risk of weak passwords being used, which can lead to potential security breaches.", + "severity": "medium", + "detect": { + "resoto": "is(aws_account) and require_uppercase_characters=false" + }, + "remediation": { + "text": "To fix this issue, go to the AWS Identity and Access Management (IAM) console, navigate to the \"Password Policy\" section, and ensure that \"Require at least one uppercase character\" is checked.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die IAM-Passwortrichtlinie mindestens einen Großbuchstaben erfordert", + "risk": "Eine starke Passwortrichtlinie ist wichtig, um die Komplexität von Passwörtern sicherzustellen. Ohne die Anforderung mindestens eines Großbuchstabens besteht das Risiko, dass schwache Passwörter verwendet werden, was zu möglichen Sicherheitsverletzungen führen kann.", + "remediation": "Um dieses Problem zu beheben, gehen Sie zur AWS Identity and Access Management (IAM) Konsole, navigieren Sie zum Abschnitt \"Passwortrichtlinie\" und stellen Sie sicher, dass \"Mindestens ein Großbuchstabe erforderlich\" ausgewählt ist." + } + } + }, + { + "name": "unused_access_keys", + "title": "Ensure unused access keys are removed", + "result_kinds": ["aws_iam_access_key"], + "categories": ["security", "compliance"], + "risk": "Unused access keys pose a security risk and should be removed to prevent unauthorized access.", + "severity": "medium", + "detect": { + "resoto": "is(aws_iam_access_key) and age>{{access_key_too_old_age}} and (last_used==null or last_used<{{access_key_too_old_age.from_now}})" + }, + "default_values": { + "access_key_too_old_age": "90d" + }, + "remediation": { + "text": "To fix this issue, delete any unused access keys from the IAM user.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html", + "localizations": { + "de": { + "title": "Sicherstellen, dass unbenutzte Zugriffsschlüssel entfernt werden", + "risk": "Unbenutzte Zugriffsschlüssel stellen ein Sicherheitsrisiko dar und sollten entfernt werden, um unbefugten Zugriff zu verhindern.", + "remediation": "Um dieses Problem zu beheben, entfernen Sie alle unbenutzten Zugriffsschlüssel vom IAM-Benutzer." + } + } + }, + { + "name": "password_policy_expire_90", + "title": "Ensure passwords expire within 90 days or less", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "Allowing passwords to remain unchanged for more than 90 days can significantly increase the risk of unauthorized access and security breaches. Older passwords are more likely to be compromised over time, and longer password lifetimes give malicious actors a wider window of opportunity to exploit stolen or weak credentials.", + "severity": "high", + "detect": { + "resoto": "is(aws_account) and (expire_passwords!=true or max_password_age>{{password_age}})" + }, + "default_values": { + "password_age": "90d" + }, + "remediation": { + "text": "To fix this issue, configure IAM policies to enforce password expiration within 90 days. Ensure the 'expire_passwords' setting is enabled and set 'max_password_age' to 90 days or less. Regularly rotating passwords helps maintain account security by reducing the risk of compromised credentials being used for extended periods.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Passwörter innerhalb von 90 Tagen oder weniger ablaufen", + "risk": "Durch das Zulassen von Passwörtern, die länger als 90 Tage unverändert bleiben, kann das Risiko unbefugten Zugriffs und von Sicherheitsverletzungen erheblich steigen. Ältere Passwörter werden im Laufe der Zeit wahrscheinlicher kompromittiert, und eine längere Lebensdauer von Passwörtern gibt bösartigen Akteuren ein größeres Zeitfenster, um gestohlene oder schwache Anmeldedaten auszunutzen.", + "remediation": "Um dieses Problem zu beheben, konfigurieren Sie IAM-Richtlinien, um eine Passwortablaufzeit von 90 Tagen festzulegen. Stellen Sie sicher, dass die Einstellung 'expire_passwords' aktiviert ist und 'max_password_age' auf 90 Tage oder weniger gesetzt ist. Durch regelmäßige Änderung von Passwörtern wird die Sicherheit des Kontos gewährleistet, indem das Risiko von kompromittierten Anmeldedaten, die für längere Zeiträume verwendet werden, verringert wird." + } + } + }, + { + "name": "guardduty_enabled", + "title": "Ensure AWS GuardDuty is Enabled", + "result_kinds": [], + "categories": ["security"], + "risk": "Not enabling AWS GuardDuty poses potential risks to your AWS account. It is recommended to enable GuardDuty to identify and mitigate these risks effectively.", + "severity": "low", + "detect": { + "manual": "Check if GuardDuty is enabled in the AWS console." + }, + "remediation": { + "text": "Enable GuardDuty in the AWS console to benefit from its intelligent threat detection capabilities.", + "url": "https://aws.amazon.com/guardduty/" + }, + "url": "https://aws.amazon.com/guardduty/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS GuardDuty aktiviert ist", + "risk": "Die Nichtaktivierung von AWS GuardDuty birgt potenzielle Risiken für Ihr AWS-Konto. Es wird empfohlen, GuardDuty zu aktivieren, um diese Risiken effektiv zu erkennen und zu mindern.", + "remediation": "Aktivieren Sie GuardDuty in der AWS-Konsole, um von seinen intelligenten Bedrohungserkennungsfunktionen zu profitieren." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_kms.json b/resotocore/resotocore/static/report/checks/aws/aws_kms.json index 3636b66aaf..df054e049e 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_kms.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_kms.json @@ -4,17 +4,71 @@ "checks": [ { "name": "key_rotation_enabled", - "title": "Ensure rotation for customer created KMS CMKs is enabled.", + "title": "Ensure that Key Rotation is Enabled for Customer-Created KMS CMKs", "result_kinds": ["aws_kms_key"], "categories": ["security", "compliance"], - "risk": "Cryptographic best practices discourage extensive reuse of encryption keys. Consequently, Customer Master Keys (CMKs) should be rotated to prevent usage of compromised keys.", + "risk": "The extensive reuse of encryption keys is discouraged as it may lead to the use of compromised keys. Without timely rotation, Customer Master Keys (CMKs) are at an increased risk of compromise, resulting in potential data breaches and operational disruptions.", "severity": "medium", "detect": { "resoto": "is(aws_kms_key) and kms_key_manager==CUSTOMER and access_key_status=Enabled and kms_key_rotation_enabled=false" }, "remediation": { - "text": "For every KMS Customer Master Keys (CMKs), ensure that Rotate this key every year is enabled.", + "text": "For every Customer Master Key (CMK), ensure that the 'Rotate this key every year' option is enabled. Regular rotation reduces the risk of compromise and improves overall system security. Check the key rotation status frequently and apply changes immediately when needed.", "url": "https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html" + }, + "url": "https://aws.amazon.com/kms/features/key-rotation/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Schlüsselrotation für benutzerdefinierte KMS CMKs aktiviert ist", + "risk": "Die umfangreiche Wiederverwendung von Verschlüsselungsschlüsseln wird nicht empfohlen, da dies zu kompromittierten Schlüsseln führen kann. Ohne regelmäßige Rotation besteht ein erhöhtes Risiko, dass Customer Master Keys (CMKs) kompromittiert werden, was zu potenziellen Datenverletzungen und Betriebsunterbrechungen führen kann.", + "remediation": "Stellen Sie für jeden Customer Master Key (CMK) sicher, dass die Option Diesen Schlüssel einmal pro Jahr drehen aktiviert ist. Durch regelmäßige Rotation wird das Risiko einer Kompromittierung reduziert und die allgemeine Systemsicherheit verbessert. überprüfen Sie den Rotationsstatus des Schlüssels regelmäßig und wenden Sie änderungen sofort an, wenn dies erforderlich ist." + } + } + }, + { + "name": "key_not_pending_deletion", + "title": "Ensure No AWS KMS Keys in Use are Pending Deletion", + "result_kinds": ["aws_kms_key"], + "categories": ["security", "compliance"], + "risk": "KMS keys marked for deletion cease to function for operations. An operational loss can occur if a key in use is pending deletion, with potential for irrecoverable data loss.", + "severity": "high", + "detect": { + "resoto": "is(aws_kms_key) and access_key_status==PendingDeletion with(any, <-- not is(region))" + }, + "remediation": { + "text": "Confirm no keys in use are set with 'Pending deletion' status. If such a key is detected, promptly substitute the key with a new one to avoid disruption of services.", + "url": "https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html" + }, + "url": "https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass keine AWS KMS-Schlüssel ausstehend gelöscht werden", + "risk": "KMS-Schlüssel, die zur Löschung markiert sind, funktionieren nicht mehr ordnungsgemäß für Operationen. Bei einem Schlüssel, der noch verwendet wird und zur Löschung aussteht, kann ein Betriebsverlust auftreten, der zu unwiederbringlichem Datenverlust führen kann.", + "remediation": "Überprüfen Sie, ob keine verwendeten Schlüssel den Status 'Ausstehende Löschung' haben. Wenn ein solcher Schlüssel erkannt wird, ersetzen Sie ihn umgehend durch einen neuen, um Unterbrechungen von Diensten zu vermeiden." + } + } + }, + { + "name": "cmk_policy_prohibit_public_access", + "title": "Ensure Customer Managed Keys in use are Not Publicly Accessible", + "result_kinds": ["aws_kms_key"], + "categories": ["security", "compliance"], + "risk": "Maintaining public accessibility of Customer Managed Keys exposes your system to unnecessary security risks by granting external entities potential access. This can compromise the integrity, confidentiality or availability of your services.", + "severity": "medium", + "detect": { + "resoto": "is(aws_kms_key) and kms_key_policy.Statement[*].{ Effect==Allow and Principal==\"*\" and Action in [\"*\", \"kms:*\"] and Condition==null}" + }, + "remediation": { + "text": "Modify the access policy of in-use keys to restrict public access. Grant access only to necessary users or roles, adhering to a least-privilege model to uphold security.", + "url": "https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html" + }, + "url": "https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass verwendete benutzerdefinierte Schlüssel nicht öffentlich zugänglich sind", + "risk": "Die Aufrechterhaltung der öffentlichen Zugänglichkeit von benutzerdefinierten Schlüsseln ermöglicht externen Entitäten potenziellen Zugriff und gefährdet somit die Integrität, Vertraulichkeit oder Verfügbarkeit Ihrer Dienste.", + "remediation": "Ändern Sie die Zugriffsrichtlinie der verwendeten Schlüssel, um den öffentlichen Zugriff einzuschränken. Gewähren Sie nur notwendigen Benutzern oder Rollen Zugriff und halten Sie sich dabei an das Modell des minimalen Privilegienlevels, um die Sicherheit zu gewährleisten." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_lambda.json b/resotocore/resotocore/static/report/checks/aws/aws_lambda.json index e22d8395d3..833e100b77 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_lambda.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_lambda.json @@ -3,33 +3,57 @@ "service": "lambda", "checks": [ { - "name": "not_publicly_accessible", - "title": "Check if Lambda functions have resource-based policy set as Public.", + "name": "publicly_accessible_permissions", + "title": "Ensure Lambda Functions Avoid Public Resource-Based Policies", "result_kinds": ["aws_lambda_function"], - "categories": [ - "security", - "compliance" - ], - "risk": "Publicly accessible services could expose sensitive data to bad actors.", + "categories": ["security", "compliance"], + "risk": "If Lambda functions retain publicly accessible services, they become a potential vector for data breaches, exposing sensitive information to unauthorized use or malicious exploitation.", "severity": "low", "url": "https://docs.aws.amazon.com/lambda/latest/dg/logging-using-cloudtrail.html", "detect": { "resoto": "is(aws_lambda_function) and function_policy.statement[*].{principal~\"*\" or principal.AWS~\"*\" or principal.CanonicalUser~\"*\"}" }, "remediation": { - "text": "Grant usage permission on a per-resource basis and applying least privilege principle.", + "text": "To address this issue, restrict usage permissions on resource-based policies, strictly granting access on a per-resource basis. Always adhere to the principle of least privilege, allowing only necessary permissions.", "url": "https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html" + }, + "localizations": { + "de": { + "title": "Sicherstellen, dass Lambda-Funktionen öffentliche, ressourcenbasierte Richtlinien vermeiden", + "risk": "Wenn Lambda-Funktionen öffentlich zugängliche Dienste beibehalten, werden sie zu einem potenziellen Angriffsvektor für Datenverletzungen, bei denen sensible Informationen unbefugt genutzt oder bösartig ausgebeutet werden können.", + "remediation": "Um dieses Problem zu beheben, sollten Sie die Berechtigungen für ressourcenbasierte Richtlinien einschränken und den Zugriff strikt auf Basis von Ressourcen gewähren. Halten Sie sich immer an das Prinzip des geringsten Privilegs und genehmigen Sie nur die notwendigen Berechtigungen." + } + } + }, + { + "name": "function_in_vpc", + "title": "Ensure that AWS Lambda functions are deployed within a VPC and configured with appropriate security groups for precise and compliant network access controls.", + "result_kinds": ["aws_lambda_function"], + "categories": ["security", "compliance"], + "risk": "Lambda functions without established controls via security groups in a VPC, are vulnerable to unauthorized access. This exposure threatens data security and compliance standards.", + "severity": "medium", + "detect": { + "resoto": "is(aws_lambda_function) with (empty, <-- is(aws_vpc))" + }, + "remediation": { + "text": "Modify your Lambda function configuration to deploy within a VPC. This requires setting up necessary security groups and network access controls.", + "url": "https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html" + }, + "url": "https://aws.amazon.com/premiumsupport/knowledge-center/internet-access-lambda-function/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS Lambda-Funktionen innerhalb einer VPC bereitgestellt werden und mit geeigneten Sicherheitsgruppen für präzise und konforme Netzwerkzugriffskontrollen konfiguriert sind.", + "risk": "Lambda-Funktionen ohne etablierte Kontrollen über Sicherheitsgruppen in einer VPC sind anfällig für unbefugten Zugriff. Diese Exposition bedroht die Datensicherheit und Konformitätsstandards.", + "remediation": "Ändern Sie die Konfiguration Ihrer Lambda-Funktion, um sie innerhalb einer VPC bereitzustellen. Dies erfordert das Einrichten der erforderlichen Sicherheitsgruppen und Netzwerkzugriffskontrollen." + } } }, { "name": "cors_policy", - "title": "Check Lambda Function URL CORS configuration.", + "title": "Ensure Appropriate CORS Configuration for AWS Lambda Functions URL.", "result_kinds": ["aws_lambda_function"], - "categories": [ - "security", - "compliance" - ], - "risk": "Publicly accessible services could expose sensitive data to bad actors.", + "categories": ["security", "compliance"], + "risk": "Unverified CORS configurations can expose services to malicious attacks and unauthorized access, leading to potential data theft or infrastructure manipulation.", "severity": "medium", "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/lambda-functions.html", "detect": { @@ -37,51 +61,89 @@ }, "remediation": { "action": { - "awscli": "aws lambda update-function-url-config --region AWS_REGION --function-name FUNCTION-NAME --auth-type AWS_IAM --cors 'AllowOrigins=https://www.example.com,AllowMethods=*,ExposeHeaders=keep-alive,MaxAge=3600,AllowCredentials=false'" + "aws_cli": "aws lambda update-function-url-config --region AWS_REGION --function-name FUNCTION-NAME --auth-type AWS_IAM --cors 'AllowOrigins=https://www.example.com,AllowMethods=*,ExposeHeaders=keep-alive,MaxAge=3600,AllowCredentials=false'" }, - "text": "Grant usage permission on a per-resource basis and applying least privilege principle.", + "text": "Restrict access by updating the CORS policy to allow specific origins only, applying the principle of least privilege to function URLs to reduce potential security threats.", "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/lambda-functions.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie eine angemessene CORS-Konfiguration für die URL der AWS Lambda-Funktionen sicher.", + "risk": "Nicht verifizierte CORS-Konfigurationen können Dienste Angriffen und unbefugtem Zugriff aussetzen, was zu potenziellem Diebstahl von Daten oder Manipulation der Infrastruktur führen kann.", + "remediation": "Beschränken Sie den Zugriff, indem Sie die CORS-Richtlinie aktualisieren, um nur bestimmte Ursprünge zuzulassen. Verwenden Sie das Prinzip des geringsten Privilegs für Funktions-URLs, um potenzielle Sicherheitsbedrohungen zu verringern." + } } }, { - "name": "is_public", - "title": "Check Public Lambda Function URL.", + "name": "function_restrict_public_access", + "title": "Ensure Lambda Functions are not Publicly Accessible.", "result_kinds": ["aws_lambda_function"], - "categories": [ - "security", - "compliance" - ], - "risk": "Publicly accessible services could expose sensitive data to bad actors.", + "categories": ["security", "compliance"], + "risk": "Publicly accessible Lambda functions pose a security threat as they can expose sensitive data and potentially allow unauthorized access to system resources. This could lead to data breaches or system compromise.", "severity": "high", "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/lambda-functions.html", "detect": { - "resoto": "is(aws_lambda_function) and function_url_config.auth_type not in [null, AWS_IAM]" + "resoto": "is(aws_lambda_function) and function_url_config.auth_type != AWS_IAM" }, "remediation": { "action": { - "awscli": "aws lambda update-function-url-config --region AWS_REGION --function-name FUNCTION-NAME --auth-type AWS_IAM" + "aws_cli": "aws lambda update-function-url-config --region AWS_REGION --function-name FUNCTION-NAME --auth-type AWS_IAM" }, - "text": "Grant usage permission on a per-resource basis and applying least privilege principle.", + "text": "To secure your Lambda functions, update your function URL configuration to use AWS IAM as the authentication type. This will restrict access to permitted entities and applies the 'least privilege' security principle.", "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/lambda-functions.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Lambda-Funktionen nicht öffentlich zugänglich sind.", + "risk": "Öffentlich zugängliche Lambda-Funktionen stellen eine Sicherheitsbedrohung dar, da sie sensible Daten offenlegen und potenziell unbefugten Zugriff auf Systemressourcen ermöglichen können. Dadurch könnte es zu Datenschutzverletzungen oder Kompromittierungen des Systems kommen.", + "remediation": "Um Ihre Lambda-Funktionen abzusichern, aktualisieren Sie die URL-Konfiguration Ihrer Funktion, um AWS IAM als Authentifizierungstyp zu verwenden. Dadurch wird der Zugriff auf genehmigte Entitäten beschränkt und das Sicherheitsprinzip des 'geringsten Privilegs' angewendet." + } } }, { "name": "supported_runtime", - "title": "Check that Lambda does not use an obsolete runtime.", + "title": "Ensure AWS Lambda Deployments are Using Supported Runtimes", "result_kinds": ["aws_lambda_function"], - "categories": [ - "security", - "compliance" - ], - "risk": "If you have functions running on a runtime that will be deprecated in the next 60 days; Lambda notifies you by email that you should prepare by migrating your function to a supported runtime. In some cases; such as security issues that require a backwards-incompatible update; or software that does not support a long-term support (LTS) schedule; advance notice might not be possible. After a runtime is deprecated; Lambda might retire it completely at any time by disabling invocation. Deprecated runtimes are not eligible for security updates or technical support.", + "categories": ["security", "compliance"], + "risk": "Utilizing deprecated runtimes in AWS Lambda functions could introduce severe risk. When deprecated, runtimes may become unsupported and receive no security updates, posing a potential security threat. Additionally, AWS may stop function invocation at any time without notice, causing functionality failures.", "severity": "medium", "url": "https://docs.aws.amazon.com/lambda/latest/dg/runtime-support-policy.html", "detect": { "resoto": "is(aws_lambda_function) and function_runtime in [python3.6, python2.7, dotnetcore2.1, ruby2.5, nodejs10.x, nodejs8.10, nodejs4.3, nodejs6.10, dotnetcore1.0, dotnetcore2.0, nodejs4.3-edge, nodejs]" }, "remediation": { - "text": "Test new runtimes as they are made available. Implement them in production as soon as possible.", + "text": "Upgrade AWS Lambda functions to the latest, supported runtimes. Continually test and facilitate the implementation of new runtimes as they become available, ensuring the highest level of security and performance.", "url": "https://docs.aws.amazon.com/lambda/latest/dg/runtime-support-policy.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS Lambda-Bereitstellungen unterstützte Runtimes verwenden", + "risk": "Die Verwendung veralteter Runtimes in AWS Lambda-Funktionen könnte erhebliche Risiken mit sich bringen. Veraltete Runtimes werden möglicherweise nicht mehr unterstützt und erhalten keine Sicherheitsupdates mehr, was eine potenzielle Sicherheitsbedrohung darstellt. Darüber hinaus kann AWS die Funktionen jederzeit ohne Vorankündigung außer Betrieb setzen, was zu Funktionsausfällen führen kann.", + "remediation": "Aktualisieren Sie AWS Lambda-Funktionen auf die neuesten unterstützten Runtimes. Testen und ermöglichen Sie kontinuierlich die Implementierung neuer Runtimes, wenn diese verfügbar werden, um das höchste Maß an Sicherheit und Leistung zu gewährleisten." + } + } + }, + { + "name": "no_secrets_in_variables", + "title": "Ensure Secrets aren't Stored in AWS Lambda Environment Variables", + "result_kinds": ["aws_lambda_function"], + "categories": ["security"], + "risk": "Hard-coded passwords significantly amplify the risks of password guessing and unauthorized access. Consequently, if secrets are stored in the environment variables, they can potentially be exploited by malicious actors.", + "severity": "critical", + "url": "https://docs.aws.amazon.com/lambda/latest/dg/runtime-support-policy.html", + "detect": { + "resoto_cmd": "search is(aws_lambda_function) and function_environment.variables not in [null, {}] | detect-secrets --path function_environment.variables --with-secrets" + }, + "remediation": { + "text": "Leverage AWS Secrets Manager to securely provide database credentials to your Lambda functions. This approach eliminates the need to hard-code secrets in your code or the environment variables, thus improving the security of your AWS deployments.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/lambda-functions.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Geheimnisse nicht in AWS Lambda Umgebungsvariablen gespeichert werden", + "risk": "Hardcodierte Passwörter erhöhen signifikant die Risiken von Passwortraten und unbefugtem Zugriff. Wenn Geheimnisse in den Umgebungsvariablen gespeichert sind, können sie potenziell von bösartigen Akteuren ausgenutzt werden.", + "remediation": "Nutzen Sie AWS Secrets Manager, um Datenbankanmeldeinformationen sicher an Ihre Lambda-Funktionen bereitzustellen. Dieser Ansatz beseitigt die Notwendigkeit, Geheimnisse im Code oder in den Umgebungsvariablen fest zu kodieren und verbessert somit die Sicherheit Ihrer AWS-Bereitstellungen." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_opensearch.json b/resotocore/resotocore/static/report/checks/aws/aws_opensearch.json new file mode 100644 index 0000000000..47c99adb1b --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_opensearch.json @@ -0,0 +1,143 @@ +{ + "provider": "aws", + "service": "opensearch", + "checks": [ + { + "name": "domain_does_not_use_internal_user_database", + "title": "Ensure that Amazon OpenSearch Service domains do not use the internal user database", + "result_kinds": ["aws_opensearch_domain"], + "categories": ["security", "compliance"], + "risk": "Using the Internal User Database for production environments is risky as it lacks the security benefits of Federated authentication.", + "severity": "medium", + "detect": { + "resoto": "is(aws_opensearch_domain) and advanced_security_options.internal_user_database_enabled==true" + }, + "remediation": { + "text": "To fix this issue, remove the users from the internal user database and utilize Cognito for authentication instead.", + "url": "https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html" + }, + "url": "https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon OpenSearch Service-Domänen nicht die interne Benutzerdatenbank verwenden", + "risk": "Die Verwendung der internen Benutzerdatenbank für Produktionsumgebungen ist riskant, da sie die Sicherheitsvorteile der föderierten Authentifizierung nicht bietet.", + "remediation": "Um das Problem zu beheben, entfernen Sie die Benutzer aus der internen Benutzerdatenbank und nutzen Sie stattdessen Cognito zur Authentifizierung.\nWeitere Informationen finden Sie unter: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html" + } + } + }, + { + "name": "domain_is_not_publicly_accessible", + "title": "Ensure that Amazon OpenSearch/Elasticsearch domains are not publicly accessible or have open policy access", + "result_kinds": ["aws_opensearch_domain"], + "categories": ["security", "compliance"], + "risk": "Failure to address this issue may result in exposing sensitive data to unauthorized individuals or malicious actors.", + "severity": "medium", + "detect": { + "resoto": "is(aws_opensearch_domain) and access_policies.Statement[*].{Effect==\"Allow\" and (Principal.AWS=\"*\" or Principal=\"*\") and (Condition==null or Condition.IpAddress.`aws:SourceIp`[] in [\"*\", \"0.0.0.0/0\"])}" + }, + "remediation": { + "text": "To resolve this issue, ensure that Amazon OpenSearch/Elasticsearch domains are not set as Public and restrict access through VPC endpoints.", + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html" + }, + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon OpenSearch/Elasticsearch-Domains nicht öffentlich zugänglich oder mit offenen Zugriffsrichtlinien versehen sind", + "risk": "Das Nichtbeheben dieses Problems kann dazu führen, dass sensible Daten unbefugten Personen oder bösartigen Akteuren zugänglich gemacht werden.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass Amazon OpenSearch/Elasticsearch-Domains nicht öffentlich zugänglich sind und beschränken Sie den Zugriff über VPC-Endpoints." + } + } + }, + { + "name": "domain_uses_cognito_authentication", + "title": "Ensure that Amazon OpenSearch Service domains have Amazon Cognito authentication enabled.", + "result_kinds": ["aws_opensearch_domain"], + "categories": ["security", "compliance"], + "risk": "Not using Amazon Cognito authentication leaves the domain vulnerable to unauthorized access and compromises the security of the OpenSearch Service.", + "severity": "medium", + "detect": { + "resoto": "is(aws_opensearch_domain) and cognito_options==null" + }, + "remediation": { + "text": "To address this issue, configure Amazon Cognito authentication. This helps protect the domain by providing secure user authentication and access control.", + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html" + }, + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon OpenSearch Service-Domänen die Amazon Cognito-Authentifizierung aktiviert haben.", + "risk": "Die Nichtverwendung der Amazon Cognito-Authentifizierung macht die Domäne anfällig für unbefugten Zugriff und gefährdet die Sicherheit des OpenSearch Service.", + "remediation": "Um dieses Problem zu beheben, konfigurieren Sie die Amazon Cognito-Authentifizierung. Dadurch wird die Domäne durch sichere Benutzerauthentifizierung und Zugriffskontrolle geschützt." + } + } + }, + { + "name": "audit_logs_enabled", + "title": "Ensure that Amazon OpenSearch Service domains have audit logs enabled", + "result_kinds": ["aws_opensearch_domain"], + "categories": ["security", "compliance"], + "risk": "If audit logs are not enabled, monitoring of service use and threat analysis is not possible.", + "severity": "low", + "detect": { + "resoto": "is(aws_opensearch_domain) and log_publishing_options.AUDIT_LOGS.enabled in [null, false]" + }, + "remediation": { + "text": "Ensure that you enable logging information about Amazon OpenSearch Service operations.", + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/audit-logs.html" + }, + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/audit-logs.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Audit-Logs für Amazon OpenSearch Service-Domains aktiviert sind", + "risk": "Wenn Audit-Logs nicht aktiviert sind, ist die Überwachung der Service-Nutzung und die Bedrohungsanalyse nicht möglich.", + "remediation": "Stellen Sie sicher, dass Sie Protokollinformationen über die Amazon OpenSearch Service-Vorgänge aktivieren." + } + } + }, + { + "name": "update_available", + "title": "Ensure that Amazon OpenSearch Service domains are running the latest version.", + "result_kinds": ["aws_opensearch_domain"], + "categories": [], + "risk": "Failure to update Amazon Opensearch Service domains to the latest version can leave them vulnerable to security risks and prevent them from benefiting from new features and improvements.", + "severity": "low", + "detect": { + "resoto": "is(aws_opensearch_domain) and service_software_options.update_available==true" + }, + "remediation": { + "text": "To ensure that your Amazon OpenSearch Service domains are running the latest version, regularly check the Notifications panel in the console for available updates. You can also receive notifications through Amazon EventBridge. It is recommended to apply the updates promptly to maintain the security and functionality of your domains.", + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-service-software.html" + }, + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-service-software.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon OpenSearch Service-Domänen die neueste Version ausführen.", + "risk": "Wenn Amazon OpenSearch Service-Domänen nicht auf die neueste Version aktualisiert werden, können sie anfällig für Sicherheitsrisiken sein und von neuen Funktionen und Verbesserungen nicht profitieren.", + "remediation": "Um sicherzustellen, dass Ihre Amazon OpenSearch Service-Domänen die neueste Version ausführen, überprüfen Sie regelmäßig das Benachrichtigungsfenster in der Konsole auf verfügbare Updates. Sie können auch Benachrichtigungen über Amazon EventBridge erhalten. Es wird empfohlen, die Updates umgehend anzuwenden, um die Sicherheit und Funktionalität Ihrer Domänen aufrechtzuerhalten." + } + } + }, + { + "name": "encryption_at_rest_enabled", + "title": "Ensure that encryption at rest is enabled for Amazon OpenSearch Service domains", + "result_kinds": ["aws_opensearch_domain"], + "categories": [], + "risk": "Encrypting data at rest is critical to protect sensitive data from unauthorized access.", + "severity": "low", + "detect": { + "resoto": "is(aws_opensearch_domain) and encryption_at_rest_options.enabled=false" + }, + "remediation": { + "text": "To fix this issue, enable encryption at rest for your Amazon OpenSearch Service domain using AWS KMS to store and manage your encryption keys and the Advanced Encryption Standard algorithm with 256-bit keys (AES-256) for encryption.", + "url": "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Verschlüsselung in Ruhe für Amazon OpenSearch Service-Domänen aktiviert ist", + "risk": "Die Verschlüsselung von Daten in Ruhe ist entscheidend, um sensible Daten vor unbefugtem Zugriff zu schützen.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie die Verschlüsselung in Ruhe für Ihre Amazon OpenSearch Service-Domäne. Verwenden Sie dazu AWS KMS, um Ihre Verschlüsselungsschlüssel zu speichern und zu verwalten, und den Advanced Encryption Standard-Algorithmus mit 256-Bit-Schlüsseln (AES-256) zur Verschlüsselung." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_organizations.json b/resotocore/resotocore/static/report/checks/aws/aws_organizations.json new file mode 100644 index 0000000000..daefc0f8dc --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_organizations.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "organizations", + "checks": [ + { + "name": "account_part_of_organizations", + "title": "Ensure AWS Account is Part of an AWS Organization", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "Not being part of an AWS Organization limits centralized management and control over AWS accounts, making it difficult to enforce security policies consistently. It can also result in increased costs due to resource usage inefficiencies and hinder account usage and access tracking.", + "severity": "high", + "detect": { + "resoto": "is(aws_account) and is_organization_member=False" + }, + "remediation": { + "text": "Create or Join an AWS Organization", + "url": "https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_create.html" + }, + "url": "https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_create.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass das AWS-Konto Teil einer AWS-Organisation ist", + "risk": "Die Nichtzugehörigkeit zu einer AWS-Organisation schränkt die zentralisierte Verwaltung und Kontrolle über AWS-Konten ein, was es schwierig macht, Sicherheitsrichtlinien konsistent durchzusetzen. Dies kann auch zu erhöhten Kosten aufgrund ineffizienter Ressourcennutzung führen und die Kontoverwendung und -zugriffsverfolgung beeinträchtigen.", + "remediation": "Erstellen oder Beitritt zu einer AWS-Organisation" + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_rds.json b/resotocore/resotocore/static/report/checks/aws/aws_rds.json index 6ab47685ad..350021b0d2 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_rds.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_rds.json @@ -4,47 +4,140 @@ "checks": [ { "name": "storage_encrypted", - "title": "Check if RDS instances storage is encrypted.", + "title": "Ensure that RDS instances storage is encrypted.", "result_kinds": ["aws_rds_instance"], "categories": ["security", "compliance"], - "risk": "If not enabled sensitive information at rest is not protected.", + "risk": "If storage encryption is not enabled, sensitive data stored at rest is unprotected. This poses a serious security risk as unauthorized parties may access or compromise your data.", "severity": "medium", "detect": { "resoto": "is(aws_rds_instance) and volume_encrypted==false" }, "remediation": { - "text": "Enable Encryption.", + "text": "You should enable storage encryption for the RDS instances. This provides enhancement to your data security by preventing unauthorized access to the underlying storage. Use industry standard AES-256 encryption algorithm and ensure all logs, backups, and snapshots are included in the encryption.", "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encryption.html" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Encrypting.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Speicher von RDS-Instanzen verschlüsselt ist.", + "risk": "Wenn die Speicherverschlüsselung nicht aktiviert ist, sind gespeicherte sensible Daten ungeschützt. Dies stellt ein ernsthaftes Sicherheitsrisiko dar, da unbefugte Personen auf Ihre Daten zugreifen oder diese kompromittieren können.", + "remediation": "Sie sollten die Speicherverschlüsselung für die RDS-Instanzen aktivieren. Dies verbessert die Datensicherheit, indem unbefugter Zugriff auf den zugrunde liegenden Speicher verhindert wird. Verwenden Sie den branchenüblichen AES-256-Verschlüsselungsalgorithmus und stellen Sie sicher, dass alle Protokolle, Backups und Snapshots in die Verschlüsselung einbezogen werden." + } } }, { "name": "auto_minor_version_upgrade", - "title": "Ensure RDS instances have minor version upgrade enabled.", + "title": "Ensure Minor Version Upgrade is Enabled for RDS Instances", "result_kinds": ["aws_rds_instance"], "categories": ["security", "compliance"], - "risk": "Auto Minor Version Upgrade is a feature that you can enable to have your database automatically upgraded when a new minor database engine version is available. Minor version upgrades often patch security vulnerabilities and fix bugs and therefore should be applied.", + "risk": "Disabling Auto Minor Version Upgrade feature exposes the AWS database to potential threats including security vulnerabilities and bugs. Regular updates ensure optimal performance of your system and protects against such threats.", "severity": "low", "detect": { "resoto": "is(aws_rds_instance) and rds_auto_minor_version_upgrade==false" }, "remediation": { - "text": "Enable auto minor version upgrade for all databases and environments.", + "text": "To mitigate the risk, activate Auto Minor Version Upgrade feature for all databases and environments. This will ensure that your AWS database system is up-to-date.", "url": "https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql" + }, + "url": "https://aws.amazon.com/blogs/database/best-practices-for-upgrading-amazon-rds-to-major-and-minor-versions-of-postgresql", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Aktualisierung der Nebenversion für RDS-Instanzen aktiviert ist", + "risk": "Das Deaktivieren der Funktion zur automatischen Aktualisierung der Nebenversion birgt potenzielle Risiken für die AWS-Datenbank, einschließlich Sicherheitslücken und Fehler. Regelmäßige Updates gewährleisten eine optimale Leistung Ihres Systems und schützen vor solchen Bedrohungen.", + "remediation": "Um das Risiko zu mindern, aktivieren Sie die Funktion zur automatischen Aktualisierung der Nebenversion für alle Datenbanken und Umgebungen. Dadurch wird sichergestellt, dass Ihr AWS-Datenbanksystem auf dem neuesten Stand ist." + } + } + }, + { + "name": "db_instance_cloudwatch_logs_enabled", + "title": "Ensure that AWS RDS Instances Have CloudWatch Logs Exports Enabled", + "result_kinds": ["aws_rds_instance"], + "categories": ["security", "compliance"], + "risk": "The absence of CloudWatch Logs exports for RDS instances limits visibility into crucial performance or security metrics, possibly concealing operational issues or threats. It also makes the task of diagnosing problems significantly harder.", + "severity": "medium", + "detect": { + "resoto": "is(aws_rds_instance) and rds_enabled_cloudwatch_logs_exports==[]" + }, + "remediation": { + "text": "Navigate to the RDS instance configuration and click 'Modify'. Enable 'Enable CloudWatch Logs exports.' and specify the CloudWatch Logs group for the RDS Instance. Save the changes to implement the new settings.", + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.Concepts.html#USER_LogAccess.Procedural.Enabling" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Configuring.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS RDS-Instanzen CloudWatch-Protokolle aktiviert haben", + "risk": "Das Fehlen von CloudWatch-Protokollen für RDS-Instanzen begrenzt die Sichtbarkeit wichtiger Leistungs- oder Sicherheitsmetriken, was potenziell betriebliche Probleme oder Bedrohungen verbirgt. Es erschwert auch die Aufgabe der Problembehandlung erheblich.", + "remediation": "Navigieren Sie zur Konfiguration der RDS-Instanz und klicken Sie auf 'Ändern'. Aktivieren Sie 'CloudWatch-Protokollierungen exportieren' und geben Sie die CloudWatch-Protokollgruppe für die RDS-Instanz an. Speichern Sie die Änderungen, um die neuen Einstellungen zu implementieren." + } } }, { "name": "no_public_access", - "title": "Ensure there are no Public Accessible RDS instances.", + "title": "Ensure that all Amazon RDS Instances are Secured from Public Access.", "result_kinds": ["aws_rds_instance"], "categories": ["security", "compliance"], - "risk": "Publicly accessible databases could expose sensitive data to bad actors.", + "risk": "Granting public access to databases exposes sensitive information, making the system vulnerable to malicious attacks and data breaches.", "severity": "critical", "detect": { "resoto": "is(aws_rds_instance) and db_publicly_accessible==true" }, "remediation": { - "text": "Do not allow public access.", + "text": "Ensure that each Amazon RDS instance is properly configured to disallow public access. Use options group and DB parameter group settings to enforce this configuration.", "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Configuring.html" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_RDS_Security.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass alle Amazon RDS-Instanzen vor öffentlichem Zugriff geschützt sind.", + "risk": "Das Gewähren öffentlichen Zugriffs auf Datenbanken stellt sensible Informationen bloß und macht das System anfällig für bösartige Angriffe und Datenverstöße.", + "remediation": "Stellen Sie sicher, dass jede Amazon RDS-Instanz ordnungsgemäß konfiguriert ist, um öffentlichen Zugriff zu verhindern. Verwenden Sie Optionsgruppen- und DB-Parametergruppeneinstellungen, um diese Konfiguration durchzusetzen." + } + } + }, + { + "name": "snapshot_not_public", + "title": "Ensure that Amazon RDS Snapshots are Not Publicly Available", + "result_kinds": ["aws_rds_cluster_snapshot", "aws_rds_snapshot"], + "categories": ["security", "compliance"], + "risk": "If RDS snapshots are publicly available, sensitive data may be exposed, increasing the risk of data breach and non-compliance with data privacy regulations.", + "severity": "critical", + "detect": { + "resoto": "is(aws_rds_cluster_snapshot, aws_rds_snapshot) and rds_attributes.restore[*]==\"all\"" + }, + "remediation": { + "text": "To remediate, disable public availability for all RDS snapshots. Ensure that only necessary and authorized AWS accounts have snapshot sharing permissions.", + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ShareSnapshot.html" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ShareSnapshot.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon RDS-Snapshots nicht öffentlich verfügbar sind", + "risk": "Wenn RDS-Snapshots öffentlich verfügbar sind, können sensible Daten offengelegt werden, was das Risiko eines Datenlecks und Verstöße gegen Datenschutzbestimmungen erhöht.", + "remediation": "Um das Problem zu beheben, deaktivieren Sie die öffentliche Verfügbarkeit für alle RDS-Snapshots. Stellen Sie sicher, dass nur notwendige und autorisierte AWS-Konten Berechtigungen zum Freigeben von Snapshots haben." + } + } + }, + { + "name": "db_instance_automatic_minor_version_upgrade_enabled", + "title": "Ensure Automatic Minor Version Upgrades are Enabled for RDS Instances", + "result_kinds": ["aws_rds_instance"], + "categories": ["security", "compliance"], + "risk": "Not enabling automatic minor version upgrades poses a critical risk as it can leave your infrastructure vulnerable to security flaws and bugs. Accelerated patching of these vulnerabilities is crucial for maintaining robust security.", + "severity": "critical", + "detect": { + "resoto": "is(aws_rds_instance) and rds_auto_minor_version_upgrade=false" + }, + "remediation": { + "text": "To fix this issue, make sure to enable the Auto Minor Version Upgrade feature in your RDS instances. This allows your database to be automatically upgraded when a new minor version is available, ensuring up-to-date and secure operations.", + "url": "https://aws.amazon.com/about-aws/whats-new/2018/12/amazon-rds-enhances-auto-minor-version-upgrades/" + }, + "url": "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass automatische Upgrades für kleinere Versionen für RDS-Instanzen aktiviert sind", + "risk": "Das Nichtaktivieren automatischer Upgrades für kleinere Versionen birgt ein erhebliches Risiko, da Ihre Infrastruktur anfällig für Sicherheitslücken und Fehler sein kann. Eine beschleunigte Patchung dieser Schwachstellen ist entscheidend für die Aufrechterhaltung einer robusten Sicherheit.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass die Funktion 'Auto Minor Version Upgrade' in Ihren RDS-Instanzen aktiviert ist. Dadurch wird Ihre Datenbank automatisch aktualisiert, wenn eine neue kleinere Version verfügbar ist, was einen aktuellen und sicheren Betrieb gewährleistet." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_redshift.json b/resotocore/resotocore/static/report/checks/aws/aws_redshift.json new file mode 100644 index 0000000000..ac62d8d966 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_redshift.json @@ -0,0 +1,75 @@ +{ + "provider": "aws", + "service": "redshift", + "checks": [ + { + "name": "cluster_audit_logging_enabled", + "title": "Ensure Cluster Audit Logging is Enabled for Optimal Compliance and Incident Investigation", + "result_kinds": ["aws_redshift_cluster"], + "categories": ["security", "compliance"], + "risk": "Failure to enable cluster audit logging in Amazon Redshift could compromise security by not providing a detailed record of database activities. This could affect effective monitoring, incident investigation, and compliance.", + "severity": "medium", + "detect": { + "resoto": "is(aws_redshift_cluster) and redshift_logging_status.logging_enabled=false" + }, + "remediation": { + "text": "To enable audit logging, go to 'Cluster details' tab -> click 'Modify'. Next, under 'Audit logging', enable or adjust the settings as needed, then save the changes.", + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing-console.html" + }, + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Audit-Protokollierung für den Cluster aktiviert ist, um optimale Compliance und Untersuchung von Vorfällen zu gewährleisten", + "risk": "Die Nichtaktivierung der Audit-Protokollierung für den Cluster in Amazon Redshift kann die Sicherheit gefährden, indem keine detaillierte Aufzeichnung der Datenbankaktivitäten bereitgestellt wird. Dies kann sich auf die wirksame Überwachung, Untersuchung von Vorfällen und Compliance auswirken.", + "remediation": "Um die Audit-Protokollierung zu aktivieren, gehen Sie zum Tab 'Clusterdetails' -> Klicken Sie auf 'Ändern'. Aktivieren oder passen Sie unter 'Audit-Protokollierung' die Einstellungen nach Bedarf an und speichern Sie die Änderungen." + } + } + }, + { + "name": "cluster_publicly_accessible", + "title": "Ensure Redshift Clusters are Not Publicly Accessible", + "result_kinds": ["aws_redshift_cluster"], + "categories": ["security", "compliance"], + "risk": "Having Redshift clusters publicly accessible can lead to unauthorized access and potential data breaches. This exposes sensitive data and risks noncompliance with industry regulations.", + "severity": "high", + "detect": { + "resoto": "is(aws_redshift_cluster) and redshift_publicly_accessible=true and redshift_endpoint.address!=null" + }, + "remediation": { + "text": "Identify all publicly accessible Redshift clusters. For each cluster, review the necessity of public accessibility. If unjustified, revise the settings by disallowing public access to enhance security.", + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-console.html#modify-cluster-access" + }, + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html", + "localizations": { + "de": { + "title": "Redshift-Cluster dürfen nicht öffentlich erreichbar sein", + "risk": "Öffentlich erreichbare Redshift-Cluster können zu unbefugtem Zugriff und potenziellen Datenlecks führen. Dadurch werden sensible Daten offengelegt und es besteht das Risiko der Nichtkonformität mit branchenspezifischen Vorschriften.", + "remediation": "Ermitteln Sie alle öffentlich zugänglichen Redshift-Cluster. Überprüfen Sie für jeden Cluster die Notwendigkeit der öffentlichen Erreichbarkeit. Wenn nicht gerechtfertigt, ändern Sie die Einstellungen, um den öffentlichen Zugriff zu untersagen und die Sicherheit zu verbessern." + } + } + }, + { + "name": "version_upgrade_enabled", + "title": "Ensure Automatic Upgrades are Enabled for Redshift Clusters", + "result_kinds": ["aws_redshift_cluster"], + "categories": ["security", "compliance"], + "risk": "Disabling automatic upgrades for Redshift clusters exposes them to security vulnerabilities due to outdated versions, potentially leading to data breaches and non-compliance with regulations.", + "severity": "high", + "detect": { + "resoto": "is(aws_redshift_cluster) and redshift_allow_version_upgrade=false" + }, + "remediation": { + "text": "To mitigate this risk, enable automatic upgrades in the AWS Management Console under the 'Maintenance' tab for each Redshift cluster. This ensures that your Redshift clusters are always operating on the latest, security patch included, version.", + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-maintenance-settings" + }, + "url": "https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass automatische Upgrades für Redshift-Cluster aktiviert sind", + "risk": "Das Deaktivieren automatischer Upgrades für Redshift-Cluster macht sie anfällig für Sicherheitslücken aufgrund veralteter Versionen und kann zu Datenverstößen und Nichteinhaltung von Vorschriften führen.", + "remediation": "Um dieses Risiko zu minimieren, aktivieren Sie automatische Upgrades in der AWS Management Console im 'Maintenance'-Tab für jeden Redshift-Cluster. Dadurch gewährleisten Sie, dass Ihre Redshift-Cluster immer mit der neuesten Version, einschließlich Sicherheitspatches, betrieben werden." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_route53.json b/resotocore/resotocore/static/report/checks/aws/aws_route53.json new file mode 100644 index 0000000000..7fa9421dd8 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_route53.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "route53", + "checks": [ + { + "name": "zone_query_logging_enabled", + "title": "Ensure Activation of DNS Query Logging in Route 53 for Enhanced Monitoring and Security", + "result_kinds": ["aws_route53_zone"], + "categories": ["security", "compliance"], + "risk": "Not enabling query logging in Route 53 zones can obscure visibility into DNS query traffic, making it difficult to detect unusual patterns or malicious activities. Such a lack of visibility increases the risk of undetected security threats and hampers incident response efforts, potentially leading to prolonged exposure to cyber attacks or misuse.", + "severity": "medium", + "detect": { + "resoto": "is(aws_route53_zone) and zone_logging_config==null" + }, + "remediation": { + "text": "To enable query logging, navigate to the Route 53 console, select the desired hosted zone, and go to the Query logging tab. Click on 'Configure query logging', then choose a log destination (CloudWatch Logs, S3, or Kinesis Data Firehose) and configure the necessary settings. Ensure that the chosen logging service has the necessary permissions to receive logs. Finally, confirm and save your settings to start logging DNS queries.", + "url": "https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html" + }, + "url": "https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html", + "localizations": { + "de": { + "title": "Aktivierung der DNS-Abfrageprotokollierung in Route 53 zur Verbesserung der Überwachung und Sicherheit gewährleisten", + "risk": "Die Nichtaktivierung der Abfrageprotokollierung in Route 53-Zonen kann die Sichtbarkeit des DNS-Abfrageverkehrs beeinträchtigen und es schwierig machen, ungewöhnliche Muster oder bösartige Aktivitäten zu erkennen. Ein Mangel an Sichtbarkeit erhöht das Risiko unentdeckter Sicherheitsbedrohungen und behindert die Reaktion auf Vorfälle. Dadurch kann es zu einer längeren Exposition gegenüber Cyberangriffen oder Missbrauch kommen.", + "remediation": "Um die Abfrageprotokollierung zu aktivieren, gehen Sie zur Route 53-Konsole, wählen Sie die gewünschte gehostete Zone aus und wechseln Sie zum Tab 'Abfrageprotokollierung'. Klicken Sie auf 'Abfrageprotokollierung konfigurieren' und wählen Sie dann ein Protokollziel (CloudWatch Logs, S3 oder Kinesis Data Firehose) aus und konfigurieren Sie die erforderlichen Einstellungen. Stellen Sie sicher, dass der ausgewählte Protokolldienst die erforderlichen Berechtigungen zum Empfangen von Protokollen hat. Bestätigen und speichern Sie abschließend Ihre Einstellungen, um die Protokollierung der DNS-Abfragen zu starten." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_s3.json b/resotocore/resotocore/static/report/checks/aws/aws_s3.json index 6dbb77e8d3..56de3c875c 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_s3.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_s3.json @@ -4,13 +4,10 @@ "checks": [ { "name": "bucket_default_encryption", - "title": "Check if S3 buckets have default encryption (SSE) enabled or use a bucket policy to enforce it.", + "title": "Ensure S3 buckets have default encryption (SSE) enabled or use a bucket policy to enforce it", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "Amazon S3 default encryption provides a way to set the default encryption behavior for an S3 bucket. This will ensure data-at-rest is encrypted.", + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, there is a risk of data-at-rest being unencrypted, which can pose a threat to the confidentiality and integrity of stored objects.", "severity": "medium", "detect": { "resoto": "is(aws_s3_bucket) and not bucket_encryption_rules[*].sse_algorithm!=null" @@ -19,20 +16,23 @@ "action": { "aws_cli": "aws s3api put-bucket-encryption --bucket {{name}} --server-side-encryption-configuration '{'Rules': [{'ApplyServerSideEncryptionByDefault': {'SSEAlgorithm': 'AES256'}}]}'" }, - "text": "Ensure that S3 buckets has encryption at rest enabled.", + "text": "To fix the issue, ensure that S3 buckets have encryption at rest enabled using default encryption (SSE) or enforcing it through a bucket policy.", "url": "https://aws.amazon.com/blogs/security/how-to-prevent-uploads-of-unencrypted-objects-to-amazon-s3/" }, - "internal_notes": "" + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass S3-Buckets standardmäßige Verschlüsselung (SSE) aktiviert haben oder erzwingen Sie dies über eine Bucket-Richtlinie", + "risk": "Wenn das Problem nicht gelöst wird, besteht die Gefahr, dass Daten im Ruhezustand unverschlüsselt sind, was eine Bedrohung für die Vertraulichkeit und Integrität der gespeicherten Objekte darstellen kann.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass S3-Buckets die Verschlüsselung im Ruhezustand aktiviert haben, indem Sie die standardmäßige Verschlüsselung (SSE) verwenden oder sie über eine Bucket-Richtlinie erzwingen." + } + } }, { "name": "bucket_no_mfa_delete", - "title": "Check if S3 bucket MFA Delete is not enabled.", + "title": "Ensure S3 bucket MFA Delete is enabled.", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "Your security credentials are compromised or unauthorized access is granted.", + "categories": ["security", "compliance"], + "risk": "Without enabling MFA Delete for an S3 bucket, your security credentials are at risk of being compromised, and unauthorized access may be granted.", "severity": "medium", "detect": { "resoto": "is(aws_s3_bucket) and bucket_mfa_delete=false" @@ -41,70 +41,154 @@ "action": { "aws_cli": "aws s3api put-bucket-versioning --bucket {{name}} --versioning-configuration MFADelete=Enabled --mfa 'arn:aws:iam::00000000:mfa/root-account-mfa-device 123456'" }, - "text": "Adding MFA delete to an S3 bucket, requires additional authentication when you change the version state of your bucket or you delete and object version adding another layer of security in the event your security credentials are compromised or unauthorized access is granted.", + "text": "To enhance the security of your S3 bucket, enable MFA delete. This requires additional authentication when changing the version state of your bucket or deleting an object version, adding an extra layer of security in case your security credentials are compromised or unauthorized access is granted.", "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html" }, - "internal_notes": "" + "localizations": { + "de": { + "title": "Sicherstellen, dass MFA Delete für den S3-Bucket aktiviert ist.", + "risk": "Ohne die Aktivierung von MFA Delete für einen S3-Bucket sind Ihre Sicherheitsanmeldedaten gefährdet und es besteht die Möglichkeit einer unbefugten Zugriffsgewährung.", + "remediation": "Aktivieren Sie MFA Delete für Ihren S3-Bucket, um die Sicherheit zu verbessern. Dadurch ist bei Änderungen des Versionierungsstatus des Buckets oder beim Löschen einer Objektversion eine zusätzliche Authentifizierung erforderlich. Dadurch wird eine zusätzliche Sicherheitsebene hinzugefügt, falls Ihre Sicherheitsanmeldedaten gefährdet sind oder unbefugter Zugriff gewährt wird." + } + } }, { "name": "bucket_secure_transport_policy", - "title": "Check if S3 buckets have secure transport policy.", + "title": "Ensure S3 buckets have secure transport policy.", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "If HTTPS is not enforced on the bucket policy, communication between clients and S3 buckets can use unencrypted HTTP. As a result, sensitive information could be transmitted in clear text over the network or internet.", + "categories": ["security", "compliance"], + "risk": "Without enforcing HTTPS on the bucket policy, communication between clients and S3 buckets may use unencrypted HTTP, posing a risk of transmitting sensitive information in clear text over the network or internet.", "severity": "medium", "detect": { "resoto": "is(aws_s3_bucket) and not bucket_policy.Statement[*].{Effect=Deny and (Action=s3:PutObject or Action=\"s3:*\" or Action=\"*\") and Condition.Bool.`aws:SecureTransport`== \"false\" }" }, "remediation": { - "text": "Ensure that S3 buckets has encryption in transit enabled.", + "text": "To fix the issue, ensure that S3 buckets have encryption in transit enabled to enforce secure communication.", "url": "https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-policy-for-config-rule/" }, - "internal_notes": "" + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass S3-Buckets über eine sichere Transportrichtlinie verfügen.", + "risk": "Ohne die Durchsetzung von HTTPS in der Bucket-Richtlinie kann die Kommunikation zwischen Clients und S3-Buckets unverschlüsseltes HTTP verwenden. Dies birgt das Risiko, dass sensible Informationen im Klartext über das Netzwerk oder das Internet übertragen werden.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass S3-Buckets die Verschlüsselung während der Übertragung aktiviert haben, um eine sichere Kommunikation durchzusetzen." + } + } }, { "name": "macie_is_enabled", - "title": "Check if Amazon Macie is enabled.", + "title": "Ensure Amazon Macie is enabled", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "Amazon Macie is a fully managed data security and data privacy service that uses machine learning and pattern matching to help you discover, monitor and protect your sensitive data in AWS.", + "categories": ["security", "compliance"], + "risk": "If Amazon Macie is not enabled, sensitive data in AWS may be at risk of unauthorized access or exposure. Amazon Macie provides automated sensitive data discovery and helps protect sensitive data by using machine learning and pattern matching.", "severity": "medium", "detect": { "manual": "Check if Amazon Macie is enabled." }, "remediation": { - "text": "Enable Amazon Macie and create appropriate jobs to discover sensitive data.", + "text": "To fix this issue, enable Amazon Macie in the AWS Management Console and create appropriate jobs to discover and protect sensitive data.", "url": "https://aws.amazon.com/macie/getting-started/" }, - "internal_notes": "" + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon Macie aktiviert ist", + "risk": "Wenn Amazon Macie nicht aktiviert ist, kann sensibler Datenbestand in AWS einem unbefugten Zugriff oder einer Offenlegung ausgesetzt sein. Amazon Macie bietet automatisierte Erkennung sensibler Daten und schützt diese durch den Einsatz von maschinellem Lernen und Mustererkennung.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie Amazon Macie in der AWS Management Console und erstellen Sie entsprechende Aufträge zur Erkennung und zum Schutz sensibler Daten." + } + } + }, + { + "name": "bucket_policy_public_write_access", + "title": "Ensure S3 buckets do not have policies that allow write access to the public.", + "result_kinds": ["aws_s3_bucket"], + "categories": ["security", "compliance"], + "risk": "If the issue is not solved, non-intended users can put objects in a given bucket, leading to unauthorized data modifications or leaks.", + "severity": "critical", + "detect": { + "resoto": "is(aws_s3_bucket) and bucket_policy!=null and bucket_public_access_block_configuration.restrict_public_buckets==false" + }, + "remediation": { + "text": "To fix the issue, ensure that a proper bucket policy is in place with the principle of least privilege applied.", + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html" + }, + "url": "https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html", + "localizations": { + "de": { + "title": "Sicherstellen, dass S3 Buckets keine Richtlinien haben, die öffentlichen Schreibzugriff erlauben.", + "risk": "Wenn das Problem nicht behoben wird, können nicht beabsichtigte Benutzer Objekte in einen bestimmten Bucket ablegen, was zu unbefugten Datenänderungen oder Lecks führen kann.", + "remediation": "Um das Problem zu beheben, stellen Sie sicher, dass eine angemessene Bucket-Richtlinie mit dem Prinzip des geringsten Privilegs vorhanden ist." + } + } }, { "name": "account_level_public_access_blocks", - "title": "Check S3 Account Level Public Access Block.", + "title": "Ensure S3 Account Level Public Access Block is Enabled", "result_kinds": ["aws_s3_bucket"], - "categories": [ - "security", - "compliance" - ], - "risk": "Public access policies may be applied to sensitive data buckets.", + "categories": ["security", "compliance"], + "risk": "Failure to enable the S3 Account Level Public Access Block may lead to potential unauthorized access to sensitive data stored in S3 buckets.", "severity": "high", "detect": { "resoto": "is(aws_s3_bucket) {account_setting: <-[0:]- is(aws_account) --> is(aws_s3_account_settings)} (bucket_public_access_block_configuration.block_public_acls==false and account_setting.reported.bucket_public_access_block_configuration.block_public_acls==false) or (bucket_public_access_block_configuration.ignore_public_acls==false and account_setting.reported.bucket_public_access_block_configuration.ignore_public_acls==false) or (bucket_public_access_block_configuration.block_public_policy==false and account_setting.reported.bucket_public_access_block_configuration.block_public_policy==false) or (bucket_public_access_block_configuration.restrict_public_buckets==false and account_setting.reported.bucket_public_access_block_configuration.restrict_public_buckets==false)" }, "remediation": { + "text": "To fix this issue, ensure you enable Public Access Block at the account level to prevent the exposure of your data stored in S3. Follow the instructions in the AWS documentation.", + "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html", "action": { "aws_cli": "aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true --account-id {{account_id}}" - }, - "text": "You can enable Public Access Block at the account level to prevent the exposure of your data stored in S3.", + } + }, + "localizations": { + "de": { + "title": "Sicherstellen, dass der S3 Account Level Public Access Block aktiviert ist", + "risk": "Das Versäumnis, den S3 Account Level Public Access Block zu aktivieren, kann zu potenziell unbefugtem Zugriff auf sensible Daten führen, die in S3 Buckets gespeichert sind.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass Sie den Public Access Block auf Kontenebene aktivieren, um die Offenlegung Ihrer in S3 gespeicherten Daten zu verhindern. Befolgen Sie die Anweisungen in der AWS-Dokumentation." + } + } + }, + { + "name": "public_bucket", + "title": "Ensure S3 buckets are not made public", + "result_kinds": ["aws_s3_bucket"], + "categories": ["security", "compliance"], + "risk": "Failure to secure S3 buckets can lead to unauthorized access and potential security breaches in operations.", + "severity": "high", + "detect": { + "resoto": "is(aws_s3_bucket) and bucket_public_access_block_configuration.{block_public_acls!=true or ignore_public_acls!=true or block_public_policy!=true or restrict_public_buckets!=true} or bucket_acl.grants[*].{permission in [READ, READ_ACP] and grantee.uri==\"http://acs.amazonaws.com/groups/global/AllUsers\"}" + }, + "remediation": { + "text": "To fix this issue, update the S3 bucket configurations to disable public settings and ensure bucket policies do not grant all permissions.", "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html" }, - "internal_notes": "" + "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html", + "localizations": { + "de": { + "title": "Stelle sicher, dass S3-Buckets nicht öffentlich zugänglich sind", + "risk": "Die fehlende Absicherung von S3-Buckets kann unberechtigten Zugriff ermöglichen und potenzielle Sicherheitsverstöße in den Betriebsabläufen verursachen.", + "remediation": "Um dieses Problem zu beheben, aktualisiere die Konfiguration des S3-Buckets, um öffentliche Einstellungen zu deaktivieren und sicherzustellen, dass Bucket-Richtlinien keine umfassenden Berechtigungen gewähren." + } + } + }, + { + "name": "bucket_object_logging_enabled", + "title": "Ensure S3 bucket object logging is enabled to aid in identifying security breaches with AWS S3", + "result_kinds": ["aws_s3_bucket"], + "categories": ["security", "compliance"], + "risk": "Enabling S3 bucket object logging is crucial for security and compliance, as it provides audit trails for access and modifications to objects, aiding in identifying unauthorized access or data breaches.", + "severity": "high", + "detect": { + "resoto": "is(aws_s3_bucket) and bucket_logging==null" + }, + "remediation": { + "text": "To fix the issue, select 'Edit', choose a target bucket for the logs, set a prefix if desired, and save the changes.", + "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/logging-with-S3.html" + }, + "url": "https://docs.aws.amazon.com/AmazonS3/latest/userguide/logging-with-S3.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Protokollierung von Objekten in S3-Buckets aktiviert ist, um bei AWS S3-Sicherheitsverstößen zu helfen", + "risk": "Die Aktivierung der Protokollierung von Objekten in S3-Buckets ist für die Sicherheit und Compliance entscheidend, da sie Audit Trails für den Zugriff und Änderungen an Objekten bereitstellt und hilft, unbefugten Zugriff oder Datenverstöße zu identifizieren.", + "remediation": "Um das Problem zu beheben, wählen Sie 'Bearbeiten', wählen Sie einen Zielspeicherort für die Protokolle aus, legen Sie bei Bedarf einen Präfix fest und speichern Sie die Änderungen ab." + } + } } ] } diff --git a/resotocore/resotocore/static/report/checks/aws/aws_sagemaker.json b/resotocore/resotocore/static/report/checks/aws/aws_sagemaker.json index 7df4c78870..b6e5032ca3 100644 --- a/resotocore/resotocore/static/report/checks/aws/aws_sagemaker.json +++ b/resotocore/resotocore/static/report/checks/aws/aws_sagemaker.json @@ -4,20 +4,208 @@ "checks": [ { "name": "notebook_root_access", - "title": "Check if Amazon SageMaker Notebook instances have root access disabled", - "result_kind": ["aws_sagemaker_notebook"], - "categories": [ - "security", - "compliance" - ], - "risk": "Users with root access have administrator privileges. Users can access and edit all files on a notebook instance with root access enabled.", + "title": "Ensure Root Access is Disabled for SageMaker Notebooks", + "result_kinds": ["aws_sagemaker_notebook"], + "categories": ["security", "compliance"], + "risk": "Enabling root access in SageMaker notebooks can lead to security vulnerabilities, as it grants full control over the notebook environment.", "severity": "medium", "detect": { "resoto": "is(aws_sagemaker_notebook) and notebook_root_access==Enabled" }, "remediation": { - "text": "Set the RootAccess field to Disabled. You can also disable root access for users when you create or update a notebook instance in the Amazon SageMaker console.", - "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access.html" + "text": "To fix this issue and enforce security best practices, disable root access for SageMaker notebooks. This can be done by configuring the RootAccess parameter to 'Disabled' in the SageMaker console or through the API.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-root-access-disable.html" + }, + "url": "https://aws.amazon.com/sagemaker/", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass der Root-Zugriff für SageMaker Notebooks deaktiviert ist", + "risk": "Die Aktivierung des Root-Zugriffs in SageMaker Notebooks kann zu Sicherheitslücken führen, da dadurch die vollständige Kontrolle über die Notebook-Umgebung gewährt wird.", + "remediation": "Um dieses Problem zu beheben und Sicherheitsbest Practices durchzusetzen, deaktivieren Sie den Root-Zugriff für SageMaker-Notebooks. Dies kann über die Konfiguration des RootAccess-Parameters auf 'Disabled' in der SageMaker-Konsole oder über die API erfolgen." + } + } + }, + { + "name": "notebook_with_direct_internet_access", + "title": "Ensure SageMaker Notebooks Do Not Have Direct Internet Access", + "result_kinds": ["aws_sagemaker_notebook"], + "categories": ["security", "compliance"], + "risk": "Ensuring SageMaker notebooks do not have direct internet access is essential to mitigate security threats and prevent potential data breaches.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_notebook) and notebook_direct_internet_access==\"Enabled\"" + }, + "remediation": { + "text": "To fix this issue, configure SageMaker notebooks to use a VPC without direct internet access. This setup enhances network security and significantly reduces the risk of unauthorized external access.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-interface-vpc-endpoint.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-interface-vpc-endpoint.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass SageMaker-Notebooks keinen direkten Internetzugang haben", + "risk": "Es ist von entscheidender Bedeutung, sicherzustellen, dass SageMaker-Notebooks keinen direkten Internetzugang haben, um Sicherheitsbedrohungen zu minimieren und potenzielle Datenverstöße zu verhindern.", + "remediation": "Um dieses Problem zu beheben, konfigurieren Sie SageMaker-Notebooks so, dass sie ein VPC ohne direkten Internetzugang verwenden. Diese Einrichtung erhöht die Netzwerksicherheit und reduziert das Risiko unbefugten externen Zugriffs erheblich." + } + } + }, + { + "name": "model_isolation_enabled", + "title": "Ensure Network Isolation is Enabled for SageMaker Models", + "result_kinds": ["aws_sagemaker_model"], + "categories": ["security", "compliance"], + "risk": "Failure to enable network isolation for SageMaker models may result in unauthorized access to data or cyber attacks.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_model) and model_enable_network_isolation=false" + }, + "remediation": { + "text": "To fix this issue, enable network isolation for SageMaker models to prevent unauthorized access to data and ensure data security.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/model-vpc-security.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/model-vpc-security.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Netzwerk-Isolierung für SageMaker-Modelle aktiviert ist", + "risk": "Die Nichtaktivierung der Netzwerk-Isolierung für SageMaker-Modelle kann zu unbefugtem Zugriff auf Daten oder Cyberangriffen führen.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie die Netzwerk-Isolierung für SageMaker-Modelle, um unbefugten Zugriff auf Daten zu verhindern und die Datensicherheit zu gewährleisten." + } + } + }, + { + "name": "model_vpc_settings_enabled", + "title": "Ensure SageMaker Studio Classic Notebooks are Configured in a VPC", + "result_kinds": ["aws_sagemaker_model"], + "categories": ["security", "compliance"], + "risk": "Models without VPC isolation are vulnerable to unauthorized data access or cyber attacks. Restricting traffic access by launching Studio in a Virtual Private Cloud (VPC) is crucial to ensure data security.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_model) and model_vpc_config.subnets in [null, []]" + }, + "remediation": { + "text": "Connect SageMaker Studio Classic Notebooks to a Virtual Private Cloud (VPC) for external resource access. Follow the instructions in the official AWS SageMaker documentation to configure VPC settings.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" + }, + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass SageMaker Studio Classic Notebooks in einem VPC konfiguriert sind", + "risk": "Modelle ohne VPC-Isolierung sind anfällig für unbefugten Zugriff auf Daten oder Cyberangriffe. Die Einschränkung des Netzwerkzugriffs durch das Ausführen von Studio in einem Virtual Private Cloud (VPC) ist entscheidend, um die Datensicherheit zu gewährleisten.", + "remediation": "Verbinden Sie SageMaker Studio Classic Notebooks mit einem Virtual Private Cloud (VPC) für den Zugriff auf externe Ressourcen. Befolgen Sie die Anweisungen in der offiziellen AWS SageMaker-Dokumentation, um VPC-Einstellungen zu konfigurieren." + } + } + }, + { + "name": "notebook_vpc_settings_enabled", + "title": "Ensure VPC is configured for SageMaker Notebook", + "result_kinds": ["aws_sagemaker_notebook"], + "categories": ["security", "compliance"], + "risk": "Using a SageMaker Notebook without VPC isolation increases the risk of unauthorized data access and cyber attacks.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_notebook) with(empty, <-- is(aws_ec2_subnet))" + }, + "remediation": { + "text": "To mitigate the risk, restrict the traffic that can access the SageMaker Notebook by launching it in a Virtual Private Cloud (VPC) of your choosing.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/studio-notebooks-and-internet-access.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/vpc-connect-to-classic-notebook.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass VPC für SageMaker Notebook konfiguriert ist", + "risk": "Die Verwendung eines SageMaker Notebooks ohne VPC-Isolierung erhöht das Risiko eines unbefugten Zugriffs auf Daten und von Cyberangriffen.", + "remediation": "Um das Risiko zu mindern, beschränken Sie den Datenverkehr, der auf das SageMaker Notebook zugreifen kann, indem Sie es in einem Virtual Private Cloud (VPC) Ihrer Wahl starten." + } + } + }, + { + "name": "training_job_vpc_settings_enabled", + "title": "Ensure VPC is configured for SageMaker Training Jobs", + "result_kinds": ["aws_sagemaker_training_job"], + "categories": ["security", "compliance"], + "risk": "Training Jobs without VPC isolation may be vulnerable to unauthorized data access or cyber attacks. Security engineers must ensure that SageMaker training jobs are configured to use a Virtual Private Cloud (VPC) to mitigate this risk.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_training_job) and training_job_enable_network_isolation=false" + }, + "remediation": { + "text": "To fix this issue, security engineers should restrict the traffic that can access SageMaker training jobs by launching them in a Virtual Private Cloud (VPC) of their choosing. This can be done by following the instructions in the AWS documentation on how to configure a VPC interface endpoint for SageMaker.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass VPC für SageMaker-Training-Jobs konfiguriert ist", + "risk": "Training-Jobs ohne VPC-Isolierung können anfällig für unbefugten Datenzugriff oder Cyberangriffe sein. Sicherheitsingenieure müssen sicherstellen, dass SageMaker-Training-Jobs so konfiguriert sind, dass sie ein Virtual Private Cloud (VPC) verwenden, um dieses Risiko zu mindern.", + "remediation": "Um dieses Problem zu beheben, sollten Sicherheitsingenieure den Datenverkehr einschränken, der auf SageMaker-Training-Jobs zugreifen kann, indem sie diese in einer von ihnen gewählten Virtual Private Cloud (VPC) starten. Dies kann durch Befolgung der Anweisungen in der AWS-Dokumentation zur Konfiguration eines VPC-Schnittstellenendpunkts für SageMaker erfolgen." + } + } + }, + { + "name": "notebook_encryption_enabled", + "title": "Ensure Encryption is Enabled for SageMaker Notebooks", + "result_kinds": ["aws_sagemaker_notebook"], + "categories": ["security", "compliance"], + "risk": "Failure to enable encryption for SageMaker notebooks can lead to unauthorized data access and potential data breaches. By utilizing KMS keys, organizations can add a critical layer of security through encryption.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_notebook) with(empty, --> is(aws_kms_key))" + }, + "remediation": { + "text": "To secure data at rest, enable KMS-based encryption for SageMaker notebooks. Configure the notebooks to use AWS KMS keys for both S3 (input/output) and EBS volume encryption.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Verschlüsselung für SageMaker Notebooks aktiviert ist", + "risk": "Die Nichtaktivierung der Verschlüsselung für SageMaker-Notebooks kann zu unbefugtem Zugriff auf Daten und potenziellen Datenverstößen führen. Durch die Verwendung von KMS-Schlüsseln können Organisationen durch Verschlüsselung eine wichtige Sicherheitsebene hinzufügen.", + "remediation": "Um Daten in Ruhe zu sichern, aktivieren Sie die KMS-basierte Verschlüsselung für SageMaker-Notebooks. Konfigurieren Sie die Notebooks so, dass sie AWS KMS-Schlüssel sowohl für die S3-(Ein-/Ausgabe-) als auch die EBS-Verschlüsselung verwenden." + } + } + }, + { + "name": "training_job_encryption_enabled", + "title": "Ensure Encryption for SageMaker Training Jobs", + "result_kinds": ["aws_sagemaker_training_job"], + "categories": ["security", "compliance"], + "risk": "SageMaker training jobs without encryption are vulnerable to data interception and unauthorized access. Encryption ensures data confidentiality during training processes.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_training_job) and training_job_enable_inter_container_traffic_encryption=false" + }, + "remediation": { + "text": "Enable TLS 1.2 encryption for inter-container traffic in SageMaker training jobs to protect data in transit. This ensures secure communication between the training containers.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/train-vpc.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Verschlüsselung für SageMaker Training Jobs aktiviert ist", + "risk": "SageMaker Training Jobs ohne Verschlüsselung sind anfällig für Dateninterception und unbefugten Zugriff. Verschlüsselung gewährleistet die Vertraulichkeit von Daten während des Trainingsprozesses.", + "remediation": "Aktivieren Sie die TLS 1.2-Verschlüsselung für den Kommunikationsverkehr zwischen den Containern in SageMaker Training Jobs, um die Daten während der Übertragung zu schützen. Dadurch wird eine sichere Kommunikation zwischen den Training Containern gewährleistet." + } + } + }, + { + "name": "training_job_volume_encryption_enabled", + "title": "Ensure encryption is enabled for SageMaker Training Jobs volumes", + "result_kinds": ["aws_sagemaker_training_job"], + "categories": ["security", "compliance"], + "risk": "Unencrypted data poses a risk of unauthorized data access and potential data breaches. Utilizing KMS keys adds a critical layer of security through encryption.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sagemaker_training_job) and training_job_resource_config.volume_kms_key_id==null" + }, + "remediation": { + "text": "Enable volume encryption for SageMaker Training Jobs by specifying a KMS key during job creation.", + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html#resources-security-network-encryption" + }, + "url": "https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html#resources-security-network-encryption", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die Verschlüsselung für SageMaker-Trainingsjobs-Volumes aktiviert ist", + "risk": "Unverschlüsselte Daten bergen das Risiko unbefugten Datenzugriffs und potenzieller Datenverstöße. Die Verwendung von KMS-Schlüsseln bietet eine kritische Sicherheitsebene durch Verschlüsselung.", + "remediation": "Aktivieren Sie die Volumenverschlüsselung für SageMaker-Trainingsjobs, indem Sie bei der Joberstellung einen KMS-Schlüssel angeben." + } } } ] diff --git a/resotocore/resotocore/static/report/checks/aws/aws_secretsmanager.json b/resotocore/resotocore/static/report/checks/aws/aws_secretsmanager.json new file mode 100644 index 0000000000..81a3ee73ef --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_secretsmanager.json @@ -0,0 +1,104 @@ +{ + "provider": "aws", + "service": "secretsmanager", + "checks": [ + { + "name": "secret_rotation_enabled", + "title": "Ensure Secrets Manager secrets have automatic rotation enabled", + "result_kinds": ["aws_secretsmanager_secret"], + "categories": ["compliance"], + "risk": "Not enabling automatic rotation for Secrets Manager secrets may lead to extended periods of active secrets and increased business impact if compromised.", + "severity": "medium", + "detect": { + "resoto": "is(aws_secretsmanager_secret) and rotation_enabled==false" + }, + "remediation": { + "text": "To fix this issue, enable automatic rotation for the secret.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html" + }, + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Secrets Manager-Geheimnisse automatisch rotiert werden", + "risk": "Das Nichtaktivieren der automatischen Rotation für Secrets Manager-Geheimnisse kann zu längeren Zeiträumen aktiver Geheimnisse führen und die geschäftlichen Auswirkungen bei Kompromittierung erhöhen.", + "remediation": "Um dieses Problem zu beheben, aktivieren Sie die automatische Rotation für das Geheimnis." + } + } + }, + { + "name": "secret_not_used_90d", + "title": "Ensure no unused Secrets in the last 90 days", + "result_kinds": ["aws_secretsmanager_secret"], + "categories": ["compliance"], + "risk": "If not solved, there is a risk of potential unauthorized access, lack of tracking or auditing, non-compliance with data retention policies, and increased exposure time for potential breaches.", + "severity": "medium", + "detect": { + "resoto": "is(aws_secretsmanager_secret) and last_accessed_date < {{secret_too_old_age.from_now}}" + }, + "default_values": { + "secret_too_old_age": "90d" + }, + "remediation": { + "text": "To fix the issue, retrieve the details of a secret and view the LastAccessedDate to identify unused secrets.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html" + }, + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass in den letzten 90 Tagen keine ungenutzten Secrets vorhanden sind", + "risk": "Wenn nicht behoben, besteht die Gefahr eines potenziellen unbefugten Zugriffs, fehlender Verfolgung oder Auditierung, Nichtkonformität mit Datenaufbewahrungsrichtlinien und verlängerter Expositionsdauer für potenzielle Sicherheitsverletzungen.", + "remediation": "Um das Problem zu beheben, rufen Sie die Details eines Secrets ab und überprüfen Sie das LastAccessedDate, um ungenutzte Secrets zu identifizieren." + } + } + }, + { + "name": "secret_not_changed_90d", + "title": "Ensure Secrets are Rotated Every 90 Days", + "result_kinds": ["aws_secretsmanager_secret"], + "categories": ["compliance"], + "risk": "If secrets are not changed in the last 90 days, it indicates that they are no longer in use and can be removed.", + "severity": "medium", + "detect": { + "resoto": "is(aws_secretsmanager_secret) and last_changed_date<{{secret_too_old_age.from_now}}" + }, + "default_values": { + "secret_too_old_age": "90d" + }, + "remediation": { + "text": "Set up automated or manual remediation to rotate old/expired credentials.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html" + }, + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Geheimnisse alle 90 Tage geändert werden", + "risk": "Wenn Geheimnisse in den letzten 90 Tagen nicht geändert wurden, deutet dies darauf hin, dass sie nicht mehr verwendet werden und entfernt werden können.", + "remediation": "Richten Sie automatisierte oder manuelle Maßnahmen ein, um alte/abgelaufene Berechtigungen zu ändern." + } + } + }, + { + "name": "secret_rotated_as_scheduled", + "title": "Ensure secrets rotation is error-free and timely", + "result_kinds": ["aws_account"], + "categories": ["security", "compliance"], + "risk": "Failure to solve this issue may result in errors or issues with secrets rotation, compromising the security and compliance of the AWS cloud infrastructure.", + "severity": "medium", + "detect": { + "manual": "To verify if secrets are being rotated manually, use the AWS Secrets CLI to describe Secrets and check the LastRotatedDate for any issues with manual/automatic rotation." + }, + "remediation": { + "text": "Retrieve the details of a secret using the DescribeSecret API call to view the LastRotatedDate and assess if any corrective action is required.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DescribeSecret.html" + }, + "url": "https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DescribeSecret.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die regelmäßige Rotation von Geheimnissen fehlerfrei und rechtzeitig erfolgt", + "risk": "Das Nichtbeheben dieses Problems kann zu Fehlern oder Problemen bei der Rotation von Geheimnissen führen und die Sicherheit und Compliance der AWS-Cloudinfrastruktur gefährden.", + "remediation": "Rufen Sie die Details eines Geheimnisses über den API-Aufruf DescribeSecret ab, um das LastRotatedDate anzuzeigen und zu prüfen, ob Maßnahmen erforderlich sind." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_sns.json b/resotocore/resotocore/static/report/checks/aws/aws_sns.json new file mode 100644 index 0000000000..a8fbc0b2de --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_sns.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "sns", + "checks": [ + { + "name": "encryption_at_rest_enabled", + "title": "Ensure that Amazon SNS Topics are Encrypted at Rest Using KMS CMKs", + "result_kinds": ["aws_sns_topic"], + "categories": ["security", "compliance"], + "risk": "If encryption at rest is not enabled, sensitive information may be at risk of unauthorized access. A failure to protect this data can lead to potential security breaches and non-compliance with data privacy regulations.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sns_topic) with(empty, --> is(aws_kms_key))" + }, + "remediation": { + "text": "Activate Encryption at Rest using KMS CMKs for Amazon SNS topics. Opt for CMKs for superior management and privacy controls. This will protect messages stored at rest in SNS topics with keys managed in AWS Key Management Service.", + "url": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html" + }, + "url": "https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass Amazon SNS-Themen mit KMS-CMKs verschlüsselt sind", + "risk": "Wenn die Verschlüsselung ruhender Daten nicht aktiviert ist, können sensible Informationen einem unbefugten Zugriff ausgesetzt sein. Ein Versäumnis beim Schutz dieser Daten kann zu potenziellen Sicherheitsverletzungen und Datenschutzverstößen führen.", + "remediation": "Aktivieren Sie die Verschlüsselung ruhender Daten mit KMS-CMKs für Amazon SNS-Themen. Entscheiden Sie sich für CMKs, um eine überlegene Verwaltung und Datenschutzkontrollen zu gewährleisten. Dadurch werden Nachrichten, die in SNS-Themen gespeichert sind, mit in AWS Key Management Service verwalteten Schlüsseln geschützt." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_sqs.json b/resotocore/resotocore/static/report/checks/aws/aws_sqs.json new file mode 100644 index 0000000000..da7b0fa4d6 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_sqs.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "sqs", + "checks": [ + { + "name": "server_side_encryption_enabled", + "title": "Ensure that Server-Side Encryption is Enabled for SQS Queues", + "result_kinds": ["aws_sqs_queue"], + "categories": ["security", "compliance"], + "risk": "If server-side encryption is not enabled for SQS queues, sensitive information in transit could be compromised. This leads to risks of data leakage, breach of compliance regulations, and potential damage to the organization's reputation.", + "severity": "medium", + "detect": { + "resoto": "is(aws_sqs_queue) and sqs_kms_master_key_id==null" + }, + "remediation": { + "text": "To mitigate this risk, ensure server-side encryption is turned on for all Amazon SQS queues. If possible, leverage a Customer Master Key (CMK) for enhanced management and privacy benefits.", + "url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html" + }, + "url": "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-sse-existing-queue.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass die serverseitige Verschlüsselung für SQS-Warteschlangen aktiviert ist", + "risk": "Wenn die serverseitige Verschlüsselung für SQS-Warteschlangen nicht aktiviert ist, kann vertrauliche Information während der Übertragung kompromittiert werden. Dadurch besteht das Risiko von Datenlecks, Verstößen gegen Compliance-Vorschriften und potenziellem Schaden für den Ruf der Organisation.", + "remediation": "Um dieses Risiko zu minimieren, stellen Sie sicher, dass die serverseitige Verschlüsselung für alle Amazon SQS-Warteschlangen aktiviert ist. Nutzen Sie falls möglich einen Kundenschlüssel (CMK) für verbessertes Management und mehr Privatsphäre." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_ssm.json b/resotocore/resotocore/static/report/checks/aws/aws_ssm.json new file mode 100644 index 0000000000..2074008591 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_ssm.json @@ -0,0 +1,99 @@ +{ + "provider": "aws", + "service": "ssm", + "checks": [ + { + "name": "managed_instance_compliance_patch_compliant", + "title": "Ensure Managed EC2 Instances Adhere to SSM Patch Manager Policies", + "result_kinds": ["aws_ec2_instance"], + "categories": ["security", "compliance"], + "risk": "If EC2 instances are not configured with SSM Patch Manager, they might miss crucial security patches and updates. This lack of patching can lead to potential security vulnerabilities and non-compliance with security standards.", + "severity": "medium", + "detect": { + "manual": "Navigate to AWS System Manager -> Patch Manager to verify the configuration." + }, + "remediation": { + "text": "To address this issue, ensure that all EC2 instances are under the SSM Patch Manager policy. Set up patching operations using the AWS Systems Manager's Quick Setup feature. This configuration provides centralized control over patching operations and enables automatic patch application.", + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-create-a-patch-policy.html" + }, + "source": "saad", + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-patch-configure.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass verwaltete EC2-Instanzen den Richtlinien des SSM Patch Managers entsprechen", + "risk": "Wenn EC2-Instanzen nicht mit dem SSM Patch Manager konfiguriert sind, können wichtige Sicherheitspatches und Updates verpasst werden. Dieses Fehlen von Patches kann zu potenziellen Sicherheitslücken und Nichteinhaltung von Sicherheitsstandards führen.", + "remediation": "Um dieses Problem zu beheben, stellen Sie sicher, dass alle EC2-Instanzen unter der SSM Patch Manager-Richtlinie stehen. Richten Sie Patching-Operationen mit der Quick Setup-Funktion des AWS Systems Managers ein. Diese Konfiguration bietet eine zentrale Kontrolle über Patching-Operationen und ermöglicht die automatische Patch-Anwendung." + } + } + }, + { + "name": "no_secrets_in_content", + "title": "Ensure SSM Documents Do Not Contain Secrets.", + "result_kinds": ["aws_ssm_document"], + "categories": ["security", "compliance"], + "risk": "If secrets get hard-coded into SSM documents, bad actors or malware could exploit these to gain lateral access to other services, thereby compromising the security of the entire AWS ecosystem.", + "severity": "medium", + "detect": { + "resoto_cmd": "search is(aws_ssm_document) and content!=null | detect-secrets --path content --with-secrets" + }, + "remediation": { + "text": "Utilize AWS Secrets Manager service to safely store and retrieve passwords and sensitive information. Avoid hard-coding secrets in SSM Documents.", + "url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html" + }, + "url": "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-secretsmanager-secret-generatesecretstring.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass SSM-Dokumente keine Geheimnisse enthalten.", + "risk": "Wenn Geheimnisse in SSM-Dokumenten fest codiert werden, können böswillige Akteure oder Malware diese ausnutzen, um seitlichen Zugriff auf andere Dienste zu erlangen und damit die Sicherheit des gesamten AWS-Ökosystems zu gefährden.", + "remediation": "Verwenden Sie den AWS Secrets Manager-Dienst, um Passwörter und sensible Informationen sicher zu speichern und abzurufen. Vermeiden Sie es, Geheimnisse in SSM-Dokumenten fest zu codieren." + } + } + }, + { + "name": "document_is_shared", + "title": "Ensure that AWS Systems Manager (SSM) documents are not inappropriately or inadvertently shared.", + "result_kinds": ["aws_ssm_document"], + "categories": ["security", "compliance"], + "risk": "Accidental sharing of SSM Documents poses a security risk. They might contain sensitive data, tokens, or secrets that if fallen into wrong hands could lead to unauthorized access or data breaches. It\u2019s crucial to manage this properly.", + "severity": "high", + "detect": { + "resoto": "is(aws_ssm_document) and document_shared_with_accounts not in [null, []]" + }, + "remediation": { + "text": "To mitigate this risk, review the document\u2019s contents before sharing. Enable the 'Block public sharing' feature for SSM Documents, and conform sharing only to trusted accounts via preferred AWS Region.", + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-managing-shared.html" + }, + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-doc-sharing.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass AWS Systems Manager (SSM)-Dokumente nicht unangemessen oder unbeabsichtigt geteilt werden.", + "risk": "Die versehentliche Weitergabe von SSM-Dokumenten birgt ein Sicherheitsrisiko. Sie könnten sensible Daten, Tokens oder Geheimnisse enthalten, die, wenn sie in die falschen Hände gelangen, zu unbefugtem Zugriff oder Datenverletzungen führen könnten. Es ist entscheidend, dies ordnungsgemäß zu verwalten.", + "remediation": "Um dieses Risiko zu mindern, überprüfen Sie den Inhalt des Dokuments, bevor Sie es teilen. Aktivieren Sie die Funktion 'Block public sharing' für SSM-Dokumente und beschränken Sie die Weitergabe nur auf vertrauenswürdige Konten über die bevorzugte AWS-Region." + } + } + }, + { + "name": "resource_non_compliant", + "title": "Ensure AWS Systems Manager Compliance across Multiple Resources", + "result_kinds": ["aws_ec2_instance", "aws_dynamodb_table", "aws_ssm_document", "aws_s3_bucket"], + "categories": ["security", "compliance"], + "risk": "Non-compliance of AWS resources (EC2, DynamoDB, SSM, S3) with AWS Systems Manager policies could expose security vulnerabilities, and incite operational inconsistencies. This could infringe regulatory compliance laws risking system integrity, data safety, and reliability.", + "severity": "high", + "detect": { + "resoto": "is(aws_ssm_resource_compliance) --> is(aws_ec2_instance, aws_dynamodb_table, aws_ssm_document, aws_s3_bucket)" + }, + "remediation": { + "text": "To rectify non-compliance, identify the unmet AWS Systems Manager standards for each resource. Ensure proper setup of SSM Agent for EC2 instances, verify table settings for DynamoDB against best practices, fix S3 bucket's access controls and encryption settings. Enforce compliance policies, perform consistent audits, and leverage AWS Systems Manager automation to mend non-compliant resources.", + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-compliance.html" + }, + "url": "https://docs.aws.amazon.com/systems-manager/latest/userguide/%system-name%.html", + "localizations": { + "de": { + "title": "Stellen Sie die AWS Systems Manager-Konformität über mehrere Ressourcen sicher", + "risk": "Die Nicht-Konformität von AWS-Ressourcen (EC2, DynamoDB, SSM, S3) mit den AWS Systems Manager-Richtlinien kann Sicherheitsrisiken aufdecken und operationelle Inkonsistenzen verursachen. Dies könnte gegen gesetzliche Compliance-Vorschriften verstoßen und somit die Systemintegrität, Datensicherheit und Zuverlässigkeit gefährden.", + "remediation": "Um die Nicht-Konformität zu beheben, identifizieren Sie die nicht erfüllten AWS Systems Manager-Standards für jede Ressource. Stellen Sie sicher, dass der SSM-Agent für EC2-Instanzen ordnungsgemäß eingerichtet ist, überprüfen Sie die Tabelleneinstellungen für DynamoDB anhand bewährter Verfahren, beheben Sie die Zugriffskontrollen und Verschlüsselungseinstellungen für den S3-Bucket. Erzwingen Sie Konformitätsrichtlinien, führen Sie regelmäßige Audits durch und nutzen Sie die AWS Systems Manager-Automatisierung, um nicht-konforme Ressourcen zu beheben." + } + } + } + ] +} diff --git a/resotocore/resotocore/static/report/checks/aws/aws_wafv2.json b/resotocore/resotocore/static/report/checks/aws/aws_wafv2.json new file mode 100644 index 0000000000..6a576e17e7 --- /dev/null +++ b/resotocore/resotocore/static/report/checks/aws/aws_wafv2.json @@ -0,0 +1,29 @@ +{ + "provider": "aws", + "service": "wafv2", + "checks": [ + { + "name": "web_acl_logging_enabled", + "title": "Ensure that Logging is Enabled for Every Web ACL to Improve Security Insight", + "result_kinds": ["aws_waf_web_acl"], + "categories": ["security", "compliance"], + "risk": "Without Web ACL logging, comprehensive traffic insights, specifically the inspected and blocked requests, are lost. This absence of crucial data impedes security analysis, auditing, and the identification of threats or misconfigurations.", + "severity": "medium", + "detect": { + "resoto": "is(aws_waf_web_acl) and logging_configuration==null" + }, + "remediation": { + "text": "To remedy this, navigate to the 'WAF and Shield' service in AWS, select the relevant ACL, go to 'Logging', click 'Edit' and configure logging by selecting an S3 bucket.", + "url": "https://docs.aws.amazon.com/waf/latest/developerguide/logging.html" + }, + "url": "https://docs.aws.amazon.com/waf/latest/developerguide/logging-web-acl.html", + "localizations": { + "de": { + "title": "Stellen Sie sicher, dass für jeden Web ACL das Logging aktiviert ist, um die Sicherheitsüberwachung zu verbessern", + "risk": "Ohne Web ACL-Protokollierung gehen umfassende Einblicke in den Datenverkehr verloren, insbesondere in überprüfte und blockierte Anfragen. Das Fehlen dieser wichtigen Daten beeinträchtigt die Sicherheitsanalyse, Überprüfung und die Identifizierung von Bedrohungen oder Fehlkonfigurationen.", + "remediation": "Um dieses Problem zu beheben, navigieren Sie zum Dienst 'WAF und Shield' in AWS, wählen Sie das relevante ACL aus, gehen Sie zu 'Protokollierung', klicken Sie auf 'Bearbeiten' und konfigurieren Sie das Logging, indem Sie einen S3-Bucket auswählen." + } + } + } + ] +} diff --git a/resotocore/resotocore/web/api.py b/resotocore/resotocore/web/api.py index 2590cb4fea..89728a47f5 100644 --- a/resotocore/resotocore/web/api.py +++ b/resotocore/resotocore/web/api.py @@ -228,6 +228,7 @@ def __add_routes(self, prefix: str) -> None: # Graph based model operations web.get(prefix + "/graph/{graph_id}/model", require(self.get_model, r)), web.patch(prefix + "/graph/{graph_id}/model", require(self.update_model, a)), + web.put(prefix + "/graph/{graph_id}/model", require(self.update_model, a)), web.get(prefix + "/graph/{graph_id}/model/uml", require(self.model_uml, r)), # CRUD Graph operations web.get(prefix + "/graph", require(self.list_graphs, r)), @@ -928,8 +929,9 @@ async def get_model(self, request: Request, deps: TenantDependencies) -> StreamR async def update_model(self, request: Request, deps: TenantDependencies) -> StreamResponse: graph_id = GraphName(request.match_info.get("graph_id", "resoto")) js = await self.json_from_request(request) + replace = request.method == "PUT" kinds: List[Kind] = from_js(js, List[Kind]) - model = await deps.model_handler.update_model(graph_id, kinds) + model = await deps.model_handler.update_model(graph_id, kinds, replace) return await single_result(request, to_js(model, strip_nulls=True)) async def get_node(self, request: Request, deps: TenantDependencies) -> StreamResponse: diff --git a/resotocore/tests/resotocore/db/arango_query_test.py b/resotocore/tests/resotocore/db/arango_query_test.py index 0360ec28b6..c203eeaaa5 100644 --- a/resotocore/tests/resotocore/db/arango_query_test.py +++ b/resotocore/tests/resotocore/db/arango_query_test.py @@ -116,12 +116,15 @@ def test_context(foo_model: Model, graph_db: GraphDB) -> None: aql, bind_vars = to_query(graph_db, QueryModel(parse_query(query).on_section("reported"), foo_model)) # query unfolds all nested loops assert aql == ( - "LET filter0 = (LET nested_distinct0 = (FOR m0 in `ns` FOR pre0 IN TO_ARRAY(m0.reported.nested) " - "FOR pre1 IN TO_ARRAY(pre0.inner) " - "FOR pre2 IN TO_ARRAY(m0.reported.parents) " - "FILTER ((@b0 IN m0.kinds) and ((pre0.name == @b1) and (pre1.name == @b2))) and (pre2.some_int == @b3) " - "RETURN DISTINCT m0) FOR m1 in nested_distinct0 " - 'RETURN m1) FOR result in filter0 RETURN UNSET(result, ["flat"])' + "LET filter0 = (LET nested_distinct0 = (FOR m0 in `ns` FOR pre0 IN " + "APPEND(TO_ARRAY(m0.reported.nested), {_internal: true}) FOR pre1 IN " + "APPEND(TO_ARRAY(pre0.inner), {_internal: true}) FOR pre2 IN " + "APPEND(TO_ARRAY(m0.reported.parents), {_internal: true}) FILTER ((@b0 IN " + "m0.kinds) and (((pre0.name == @b1) and ((pre1.name == @b2 AND " + "pre1._internal!=true)) AND pre0._internal!=true))) and ((pre2.some_int == " + "@b3 AND pre2._internal!=true)) RETURN DISTINCT m0) FOR m1 in " + "nested_distinct0 RETURN m1) FOR result in filter0 RETURN UNSET(result, " + '["flat"])' ) # coercing works correctly for context terms assert bind_vars["b1"] == "true" # true is coerced to a string diff --git a/resotocore/tests/resotocore/db/entitydb.py b/resotocore/tests/resotocore/db/entitydb.py index 7bbb8a012d..8922176e48 100644 --- a/resotocore/tests/resotocore/db/entitydb.py +++ b/resotocore/tests/resotocore/db/entitydb.py @@ -47,6 +47,10 @@ async def delete_value(self, value: T) -> None: key = self.key_fn(value) self.items.pop(key, None) + async def delete_many(self, keys: List[K]) -> None: + for key in keys: + self.items.pop(key, None) + async def create_update_schema(self) -> None: pass diff --git a/resotocore/tests/resotocore/model/__init__.py b/resotocore/tests/resotocore/model/__init__.py index 7dca0ad006..c7b7ea6819 100644 --- a/resotocore/tests/resotocore/model/__init__.py +++ b/resotocore/tests/resotocore/model/__init__.py @@ -33,6 +33,6 @@ async def uml_image( ) -> bytes: raise NotImplementedError - async def update_model(self, graph_name: GraphName, kinds: List[Kind]) -> Model: + async def update_model(self, graph_name: GraphName, kinds: List[Kind], replace: bool) -> Model: self.model = Model.from_kinds(kinds) return self.model diff --git a/resotocore/tests/resotocore/model/model_handler_test.py b/resotocore/tests/resotocore/model/model_handler_test.py index 8a12419230..6367831de6 100644 --- a/resotocore/tests/resotocore/model/model_handler_test.py +++ b/resotocore/tests/resotocore/model/model_handler_test.py @@ -2,9 +2,28 @@ from resotocore.db.db_access import DbAccess from resotocore.ids import GraphName +from resotocore.model.model import Model, ComplexKind, predefined_kinds from resotocore.model.model_handler import ModelHandlerDB +@pytest.mark.asyncio +async def test_update_delete(db_access: DbAccess, person_model: Model) -> None: + # step 0: clean slate + name = GraphName("ns") + mdb = await db_access.get_graph_model_db(name) + await mdb.wipe() + pdk = len(predefined_kinds) + handler = ModelHandlerDB(db_access, "http://localhost:8000") + # step 1: put current model + model = await handler.update_model(name, list(person_model.kinds.values()), True) + assert len(model) == len(person_model) + # step 2: update with only one kind + boo = ComplexKind("boo", [], []) + model = await handler.update_model(name, [boo], True) + assert len(model) == pdk + 1 + assert len([a async for a in mdb.keys()]) == pdk + 1 + + @pytest.mark.asyncio async def test_uml_generation(db_access: DbAccess) -> None: handler = ModelHandlerDB(db_access, "http://localhost:8000") diff --git a/resotocore/tests/resotocore/model/model_test.py b/resotocore/tests/resotocore/model/model_test.py index e869262d94..11dadeff45 100644 --- a/resotocore/tests/resotocore/model/model_test.py +++ b/resotocore/tests/resotocore/model/model_test.py @@ -292,6 +292,7 @@ def test_model_checking(person_model: Model) -> None: def test_property_path() -> None: + PropertyPath.from_string("Condition.IpAddress.`aws:SourceIp`[]") p1 = PropertyPath(["a", None, "c", None]) p2 = PropertyPath(["a", "b", "c", "d"]) p3 = PropertyPath(["a", "b"]) diff --git a/resotocore/tests/resotocore/report/inspector_service_test.py b/resotocore/tests/resotocore/report/inspector_service_test.py index 02944da671..ee7caee4a4 100644 --- a/resotocore/tests/resotocore/report/inspector_service_test.py +++ b/resotocore/tests/resotocore/report/inspector_service_test.py @@ -170,7 +170,8 @@ async def test_predefined_checks(inspector_service: InspectorService) -> None: checks = ReportCheckCollectionConfig.from_files() assert len(checks) > 0 for name, check in checks.items(): - assert (await inspector_service.validate_check_collection_config({CheckConfigRoot: check})) is None + validation = await inspector_service.validate_check_collection_config({CheckConfigRoot: check}) + assert validation is None, str(validation) async def test_predefined_benchmarks(inspector_service: InspectorService) -> None: @@ -179,7 +180,8 @@ async def test_predefined_benchmarks(inspector_service: InspectorService) -> Non for name, check in benchmarks.items(): config = {BenchmarkConfigRoot: check} cfg_id = ConfigId(name) - assert (await inspector_service.validate_benchmark_config(cfg_id, config)) is None + validation = await inspector_service.validate_benchmark_config(cfg_id, config) + assert validation is None, f"Benchmark: {name}" + str(validation) benchmark = BenchmarkConfig.from_config(ConfigEntity(cfg_id, config)) assert benchmark.clouds == ["aws"] diff --git a/resotolib/resotolib/core/actions.py b/resotolib/resotolib/core/actions.py index 04caa6a9d7..5d3695aa40 100644 --- a/resotolib/resotolib/core/actions.py +++ b/resotolib/resotolib/core/actions.py @@ -1,6 +1,6 @@ import threading import time -from contextlib import suppress +from contextlib import suppress, AbstractContextManager from logging import Logger from queue import Queue @@ -56,7 +56,7 @@ def info(self, message: str, logger: Optional[Logger] = None) -> None: def error(self, message: str, logger: Optional[Logger] = None) -> None: if logger: - logger.error(self.context_str + message) + logger.error(self.context_str + message, exc_info=True) self._info_message("error", message) @property @@ -84,6 +84,19 @@ def child_context(self, *context: str) -> "CoreFeedback": return self.with_context(*(self.context + list(context))) +class SuppressWithFeedback(AbstractContextManager[None]): + def __init__(self, message: str, feedback: CoreFeedback, logger: Optional[Logger] = None) -> None: + self.message = message + self.feedback = feedback + self.logger = logger + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Optional[bool]: + if exc_type is not None: + self.feedback.error(f"{self.message}: {exc_val}", self.logger) + return True # suppress exception + return None + + class CoreActions(threading.Thread): def __init__( self, diff --git a/resotoshell/resotoshell/promptsession.py b/resotoshell/resotoshell/promptsession.py index 156f732291..d54b0d8ff9 100644 --- a/resotoshell/resotoshell/promptsession.py +++ b/resotoshell/resotoshell/promptsession.py @@ -1,11 +1,12 @@ from __future__ import annotations + import pathlib import re import shutil from abc import ABC from re import Pattern from shutil import get_terminal_size -from typing import Iterable, Optional, List, Dict, Union, Tuple, Callable, Any +from typing import Iterable, Optional, List, Dict, Union, Tuple, Callable, Any, Set from attr import evolve from attrs import define, field @@ -814,7 +815,7 @@ async def core_metadata( log.debug("Fetching core metadata..") model = await client.model() - def path(p: Property) -> List[str]: + def path(p: Property, visited: Set[str]) -> List[str]: kind = p.kind name = p.name result = [name] @@ -822,17 +823,18 @@ def path(p: Property) -> List[str]: kind = kind[:-2] name += "[*]" result.append(name) - + key = f"{name}:{kind}" # prevent circular references kd = model.kinds.get(kind) - if kd is not None and kd.properties: - result.extend(name + "." + pp for prop in kd.properties for pp in path(prop)) + if key not in visited and kd is not None and kd.properties: + visited.add(key) + result.extend(name + "." + pp for prop in kd.properties for pp in path(prop, visited)) return result aggregate_roots = {k: v for k, v in model.kinds.items() if v.aggregate_root and v.properties is not None} # filter out all dynamically created kinds visible_kinds = sorted(k for k, v in aggregate_roots.items() if value_in_path(v.metadata, ["dynamic"]) is None) - known_props = {p for v in aggregate_roots.values() for prop in v.properties or [] for p in path(prop)} + known_props = {p for v in aggregate_roots.values() for prop in v.properties or [] for p in path(prop, set())} info = await client.cli_info() cmds = [ from_json(cmd, CommandInfo) diff --git a/resotoworker/resotoworker/resotocore.py b/resotoworker/resotoworker/resotocore.py index bd58e60758..72ca0f980a 100644 --- a/resotoworker/resotoworker/resotocore.py +++ b/resotoworker/resotoworker/resotocore.py @@ -28,7 +28,7 @@ def create_graph_and_update_model(self, tempdir: str) -> None: resotocore_graph = self._config.resotoworker.graph dump_json = self._config.resotoworker.debug_dump_json self.create_graph(base_uri, resotocore_graph) - self.update_model(base_uri, dump_json=dump_json, tempdir=tempdir) + self.update_model(base_uri, resotocore_graph, dump_json=dump_json, tempdir=tempdir) def send_to_resotocore(self, graph: Graph, task_id: str, tempdir: str) -> None: if not ArgumentParser.args.resotocore_uri: @@ -76,10 +76,11 @@ def create_graph(self, resotocore_base_uri: str, resotocore_graph: str) -> None: def update_model( self, resotocore_base_uri: str, + resotocore_graph: str, dump_json: bool = False, tempdir: Optional[str] = None, ) -> None: - model_uri = f"{resotocore_base_uri}/model" + model_uri = f"{resotocore_base_uri}/graph/{resotocore_graph}/model" log.debug(f"Updating model via {model_uri}") @@ -102,7 +103,7 @@ def update_model( for attempt in Retrying(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(10)): with attempt: - request = requests.Request(method="PATCH", url=model_uri, data=model_json, headers=headers) + request = requests.Request(method="PUT", url=model_uri, data=model_json, headers=headers) r = self._send_request(request) if r.status_code != 200: log.error(r.content)