diff --git a/test/architecture_diagrams/pdf/weathertop-comp-1.pdf b/test/architecture_diagrams/pdf/weathertop-comp-1.pdf index 91f42e2c77e..0cd04778c4d 100644 Binary files a/test/architecture_diagrams/pdf/weathertop-comp-1.pdf and b/test/architecture_diagrams/pdf/weathertop-comp-1.pdf differ diff --git a/test/architecture_diagrams/pdf/weathertop-comp-2.pdf b/test/architecture_diagrams/pdf/weathertop-comp-2.pdf index 6560b5752eb..696a90e9718 100644 Binary files a/test/architecture_diagrams/pdf/weathertop-comp-2.pdf and b/test/architecture_diagrams/pdf/weathertop-comp-2.pdf differ diff --git a/test/architecture_diagrams/pdf/weathertop-high-level-architecture.pdf b/test/architecture_diagrams/pdf/weathertop-high-level-architecture.pdf index 2889b977806..d4a8cdfbadc 100644 Binary files a/test/architecture_diagrams/pdf/weathertop-high-level-architecture.pdf and b/test/architecture_diagrams/pdf/weathertop-high-level-architecture.pdf differ diff --git a/test/architecture_diagrams/png/weathertop-comp-2.png b/test/architecture_diagrams/png/weathertop-comp-2.png index d6e29262556..cde8fb0552f 100644 Binary files a/test/architecture_diagrams/png/weathertop-comp-2.png and b/test/architecture_diagrams/png/weathertop-comp-2.png differ diff --git a/test/architecture_diagrams/png/weathertop-high-level-architecture.png b/test/architecture_diagrams/png/weathertop-high-level-architecture.png index d77e33c12b5..b2e94f82e02 100644 Binary files a/test/architecture_diagrams/png/weathertop-high-level-architecture.png and b/test/architecture_diagrams/png/weathertop-high-level-architecture.png differ diff --git a/test/config/README.md b/test/config/README.md new file mode 100644 index 00000000000..ae228d4f9e0 --- /dev/null +++ b/test/config/README.md @@ -0,0 +1,12 @@ +# Config +This directory contains the configuration for this stack. + +### [resources.yaml](resources.yaml) +Names of AWS resources in this testing stack with cross-account configuration. +* `topic_name` [String] Where scheduled testing events are published. Allows cross-account `Publish`. +* `bucket_name` [String] Where testing results are published. Allows cross-account `PutObject`. + +### [targets.yaml](targets.yaml). +Name and configuration for specific SDK languages. Includes: +* `account_id` [String] The AWS Account where a set of AWS SDK test are running +* `status` [String] Whether the AWS SDK tests are running diff --git a/test/config/resources.yaml b/test/config/resources.yaml new file mode 100644 index 00000000000..c59c24fde3a --- /dev/null +++ b/test/config/resources.yaml @@ -0,0 +1,2 @@ +topic_name: "aws-weathertop-central-sns-fanout-topic" +bucket_name: "aws-weathertop-central-log-bucket" diff --git a/test/config/targets.yaml b/test/config/targets.yaml new file mode 100644 index 00000000000..43cd3aa4080 --- /dev/null +++ b/test/config/targets.yaml @@ -0,0 +1,36 @@ +cpp: + account_id: "770244195820" + status: "disabled" +dotnetv3: + account_id: "441997275833" + status: "disabled" +gov2: + account_id: "234521034040" + status: "disabled" +javascriptv3: + account_id: "875008041426" + status: "disabled" +javav2: + account_id: "814548047983" # back-up "667348412466" + status: "disabled" +kotlin: + account_id: "814548047983" # back-up "667348412466" + status: "disabled" +php: + account_id: "733931915187" + status: "disabled" +python: + account_id: "664857444588" + status: "disabled" +ruby: + account_id: "616362385685" + status: "disabled" +rust_dev_preview: + account_id: "050288538048" + status: "disabled" +sap-abap: + account_id: "099736152523" + status: "disabled" +swift: + account_id: "637397754108" + status: "disabled" \ No newline at end of file diff --git a/test/eventbridge_rule_with_sns_fanout/README.md b/test/eventbridge_rule_with_sns_fanout/README.md index 8734ba81767..47fc3a7a244 100644 --- a/test/eventbridge_rule_with_sns_fanout/README.md +++ b/test/eventbridge_rule_with_sns_fanout/README.md @@ -18,13 +18,10 @@ Specifically, it deploys a scheduled Amazon EventBridge rule that publishes a me * AWS access key and secret for AWS user with permissions to create the preceding resources * Successfully written [system parameters](#storing-system-parameters) -### Storing system parameters +### Updating configuration data -Before you get started, execute [store_system_parameters.py](store_system_params.py) as described in the code comments: +Before you get started, update [config/resources.yaml](../config/resources.yaml) and [config/targets.yaml](../config/targets.yaml) to include logical names representing test targets and their corresponding AWS Account ID and enabled status. -``` -python3 store_system_parameters.py -``` --- ## AWS CDK setup and deployment diff --git a/test/eventbridge_rule_with_sns_fanout/app.py b/test/eventbridge_rule_with_sns_fanout/app.py index 645841fa824..2f0b75eb5e3 100644 --- a/test/eventbridge_rule_with_sns_fanout/app.py +++ b/test/eventbridge_rule_with_sns_fanout/app.py @@ -1,15 +1,18 @@ #!/usr/bin/env python3 -from aws_cdk import App -import aws_cdk as cdk +import os +import aws_cdk as cdk +from aws_cdk import App from producer_stack.producer_stack import ProducerStack -import os - app = App() -ProducerStack(app, "ProducerStack", - env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), - ) +ProducerStack( + app, + "ProducerStack", + env=cdk.Environment( + account=os.getenv("CDK_DEFAULT_ACCOUNT"), region=os.getenv("CDK_DEFAULT_REGION") + ), +) -app.synth() \ No newline at end of file +app.synth() diff --git a/test/eventbridge_rule_with_sns_fanout/producer_stack/__init__.py b/test/eventbridge_rule_with_sns_fanout/producer_stack/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/eventbridge_rule_with_sns_fanout/producer_stack/producer_stack.py b/test/eventbridge_rule_with_sns_fanout/producer_stack/producer_stack.py index 1e1cdb2b07b..0896be3dda4 100644 --- a/test/eventbridge_rule_with_sns_fanout/producer_stack/producer_stack.py +++ b/test/eventbridge_rule_with_sns_fanout/producer_stack/producer_stack.py @@ -1,96 +1,115 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 -import boto3 -from aws_cdk import ( - aws_iam as iam, - aws_events as events, - aws_events_targets as targets, - aws_sns as sns, - aws_kinesis as kinesis, - aws_sns_subscriptions as subscriptions, - aws_logs as logs, - Aws, - Stack -) +import yaml +from aws_cdk import Aws, CfnOutput, Duration, Size, Stack +from aws_cdk import aws_events as events +from aws_cdk import aws_events_targets as targets +from aws_cdk import aws_iam as iam +from aws_cdk import aws_s3 as s3 +from aws_cdk import aws_sns as sns from constructs import Construct + class ProducerStack(Stack): def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) + acct_config = self.get_yaml_config("../../config/targets.yaml") + resource_config = self.get_yaml_config("../../config/resources.yaml") + topic_name = resource_config["topic_name"] + bucket_name = resource_config["bucket_name"] + topic = self.init_get_topic(topic_name) + self.sns_permissions(topic) + self.init_subscribe_permissions(topic, acct_config) + self.init_publish_permissions(topic, acct_config) + bucket = self.init_create_bucket(bucket_name) + self.init_cross_account_log_role(acct_config, bucket) - client = boto3.client('ssm') - - onboarded_languages = [ - 'ruby', - 'javav2', - # 'javascriptv3' - 'gov2', - 'python', - 'dotnetv3', - # 'kotlin' - # 'rust_dev_preview' - # 'swift' - # 'cpp' - # 'gov2' - # 'sap-abap' - ] - - account_ids = [] - for language_name in onboarded_languages: - response = client.get_parameter(Name=f'/account-mappings/{language_name}', WithDecryption=True) - account_ids.append(response['Parameter']['Value']) + def get_yaml_config(self, filepath): + with open(filepath, "r") as file: + data = yaml.safe_load(file) + return data - # Create a new Amazon Simple Notification Service (Amazon SNS) topic. - topic = sns.Topic(self, "fanout-topic") + def init_get_topic(self, topic_name): + topic = sns.Topic(self, "fanout-topic", topic_name=topic_name) + return topic - # Create a new Amazon EventBridge rule. + def init_rule(self, topic): rule = events.Rule( self, "trigger-rule", schedule=events.Schedule.cron( - # Uncomment after testing. - # minute="0", - # hour="22", - # week_day="FRI", + minute="0", + hour="22", + week_day="FRI", ), ) - - # Add a target to the EventBridge rule to publish a message to the SNS topic. rule.add_target(targets.SnsTopic(topic)) + def sns_permissions(self, topic): # Set up base Amazon SNS permissions. sns_permissions = iam.PolicyStatement() sns_permissions.add_any_principal() sns_permissions.add_actions( - "SNS:Publish", - "SNS:RemovePermission", - "SNS:SetTopicAttributes", - "SNS:DeleteTopic", - "SNS:ListSubscriptionsByTopic", - "SNS:GetTopicAttributes", - "SNS:AddPermission", - "SNS:Subscribe" - ) + "SNS:AddPermission", + "SNS:DeleteTopic", + "SNS:GetTopicAttributes", + "SNS:ListSubscriptionsByTopic", + "SNS:SetTopicAttributes", + "SNS:Subscribe", + "SNS:RemovePermission", + "SNS:Publish", + ) sns_permissions.add_resources(topic.topic_arn) - sns_permissions.add_condition("StringEquals", {"AWS:SourceOwner": Aws.ACCOUNT_ID}) + sns_permissions.add_condition( + "StringEquals", {"AWS:SourceOwner": Aws.ACCOUNT_ID} + ) topic.add_to_resource_policy(sns_permissions) - # Set up cross-account Subscription permissions for every onboarded language. + def init_subscribe_permissions(self, topic, target_accts): subscribe_permissions = iam.PolicyStatement() - subscribe_permissions.add_arn_principal(f'arn:aws:iam::{Aws.ACCOUNT_ID}:root') - for id in account_ids: - subscribe_permissions.add_arn_principal(f'arn:aws:iam::{id}:root') + subscribe_permissions.add_arn_principal(f"arn:aws:iam::{Aws.ACCOUNT_ID}:root") + for language in target_accts.keys(): + if "enabled" in str(target_accts[language]["status"]): + subscribe_permissions.add_arn_principal( + f"arn:aws:iam::{str(target_accts[language]['account_id'])}:root" + ) subscribe_permissions.add_actions("SNS:Subscribe") subscribe_permissions.add_resources(topic.topic_arn) topic.add_to_resource_policy(subscribe_permissions) - # Set up cross-account Publish permissions for every onboarded language. + def init_publish_permissions(self, topic, target_accts): publish_permissions = iam.PolicyStatement() - publish_permissions.add_arn_principal(f'arn:aws:iam::{Aws.ACCOUNT_ID}:root') - for id in account_ids: - subscribe_permissions.add_arn_principal(f'arn:aws:iam::{id}:root') + publish_permissions.add_arn_principal(f"arn:aws:iam::{Aws.ACCOUNT_ID}:root") + for language in target_accts.keys(): + publish_permissions.add_arn_principal( + f"arn:aws:iam::{str(target_accts[language]['account_id'])}:root" + ) publish_permissions.add_actions("SNS:Publish") publish_permissions.add_service_principal("events.amazonaws.com") publish_permissions.add_resources(topic.topic_arn) topic.add_to_resource_policy(publish_permissions) + + def init_create_bucket(self, bucket_name): + bucket = s3.Bucket( + self, + bucket_name, + bucket_name=bucket_name, + versioned=False, + block_public_access=s3.BlockPublicAccess.BLOCK_ALL, + ) + return bucket + + def init_cross_account_log_role(self, target_accts, bucket): + languages = target_accts.keys() + if len(languages) > 0: + # Define policy that allows cross-account Amazon SNS and Amazon SQS access. + statement = iam.PolicyStatement() + statement.add_actions("s3:PutObject", "s3:PutObjectAcl") + statement.add_resources(f"{bucket.bucket_arn}/*") + for language in languages: + if "enabled" in str(target_accts[language]["status"]): + statement.add_arn_principal( + f"arn:aws:iam::{str(target_accts[language]['account_id'])}:role/LogsLambdaExecutionRole" + ) + bucket.add_to_resource_policy(statement) diff --git a/test/eventbridge_rule_with_sns_fanout/requirements.txt b/test/eventbridge_rule_with_sns_fanout/requirements.txt index eee10ce3450..2c97bd3e5c2 100644 --- a/test/eventbridge_rule_with_sns_fanout/requirements.txt +++ b/test/eventbridge_rule_with_sns_fanout/requirements.txt @@ -1,4 +1,7 @@ +boto3 aws-cdk-lib>=2.0.0 constructs>=10.0.0 types-boto3 types-setuptools +random +pyyaml \ No newline at end of file diff --git a/test/eventbridge_rule_with_sns_fanout/store_system_parameters.py b/test/eventbridge_rule_with_sns_fanout/store_system_parameters.py deleted file mode 100644 index 987a3424bf1..00000000000 --- a/test/eventbridge_rule_with_sns_fanout/store_system_parameters.py +++ /dev/null @@ -1,34 +0,0 @@ -import boto3 - - -# This file writes key-value mappings to AWS Parameter Store. -def generate_parameter_store_values(account_id_mappings): - # Create an SSM client using boto3 library - ssm = boto3.client('ssm') - - # Iterate through each key-value pair in the account_id_mappings list - for key, value in account_id_mappings: - try: - # Check if the parameter with the specified name (key) already exists in Parameter Store - ssm.get_parameter(Name=key, WithDecryption=True) - - # If the parameter already exists, update its value with the new value - print(f"Parameter '{key}' already exists. Updating the value...") - ssm.put_parameter(Name=key, Value=value, Type='SecureString', Overwrite=True) - except ssm.exceptions.ParameterNotFound: - # If the parameter does not exist, create a new parameter with the given key-value pair - print(f"Parameter '{key}' does not exist. Creating a new parameter...") - ssm.put_parameter(Name=key, Value=value, Type='SecureString') - -if __name__ == "__main__": - # List of key-value pairs representing parameter names and their corresponding values - mappings = [ - ('/account-mappings/ruby', '260778392212'), - ('/account-mappings/dotnetv3', '565846806325'), - ('/account-mappings/javav2', '814548047983'), - ('/account-mappings/python', '489398071715'), - ('/account-mappings/gov2', '489398071715') - ] - - # Call the function to generate or update the parameter values in Parameter Store - generate_parameter_store_values(mappings) diff --git a/test/public_ecr_repositories/app.py b/test/public_ecr_repositories/app.py index 7563cfc633f..2e7da12e743 100644 --- a/test/public_ecr_repositories/app.py +++ b/test/public_ecr_repositories/app.py @@ -2,12 +2,16 @@ import os import aws_cdk as cdk - -from public_ecr_repositories_stack.public_ecr_repositories_stack import PublicEcrRepositoriesStack +from public_ecr_repositories_stack.public_ecr_repositories_stack import \ + PublicEcrRepositoriesStack app = cdk.App() -PublicEcrRepositoriesStack(app, "PublicEcrRepositoriesStack", - env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), - ) +PublicEcrRepositoriesStack( + app, + "PublicEcrRepositoriesStack", + env=cdk.Environment( + account=os.getenv("CDK_DEFAULT_ACCOUNT"), region=os.getenv("CDK_DEFAULT_REGION") + ), +) app.synth() diff --git a/test/public_ecr_repositories/public_ecr_repositories_stack/public_ecr_repositories_stack.py b/test/public_ecr_repositories/public_ecr_repositories_stack/public_ecr_repositories_stack.py index 18830747faa..2de01114a39 100644 --- a/test/public_ecr_repositories/public_ecr_repositories_stack/public_ecr_repositories_stack.py +++ b/test/public_ecr_repositories/public_ecr_repositories_stack/public_ecr_repositories_stack.py @@ -1,38 +1,30 @@ -from aws_cdk import ( - Stack, - aws_ecr as ecr -) +from aws_cdk import Stack +from aws_cdk import aws_ecr as ecr from constructs import Construct +import yaml class PublicEcrRepositoriesStack(Stack): - def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) - languages = [ - 'ruby', - 'javav2', - 'javascriptv3', - 'gov2', - 'python', - 'dotnetv3', - 'kotlin', - 'rust_dev_preview', - 'swift', - 'cpp', - 'sap-abap' - ] - - for language in languages: + target_accts = self.get_yaml_config("../../config/targets.yaml") - usage_text = f'This image provides a pre-built SDK for {language} environment and is recommended for local testing of SDK for {language} example code. It is not intended for production usage. For detailed and up-to-date steps on running this image, see https://github.com/awsdocs/aws-doc-sdk-examples/blob/main/{language}/README.md#docker-image-beta.' + for language in target_accts.keys(): + usage_text = f"This image provides a pre-built SDK for {language} environment and is recommended for local testing of SDK for {language} example code. It is not intended for production usage. For detailed and up-to-date steps on running this image, see https://github.com/awsdocs/aws-doc-sdk-examples/blob/main/{language}/README.md#docker-image-beta." repository_description = f'This image provides a pre-built for SDK for {language} environment and is recommended for local testing of SDK for {language} example code."' - ecr.CfnPublicRepository(self, f"{language}", + ecr.CfnPublicRepository( + self, + f"{language}", repository_name=language, repository_catalog_data={ "UsageText": usage_text, "OperatingSystems": ["Linux"], "Architectures": ["x86", "ARM"], - "RepositoryDescription": repository_description - } + "RepositoryDescription": repository_description, + }, ) + + def get_yaml_config(self, filepath): + with open(filepath, "r") as file: + data = yaml.safe_load(file) + return data diff --git a/test/sqs_lambda_to_batch_fargate/.gitignore b/test/sqs_lambda_to_batch_fargate/.gitignore index 2ea578c8980..aa0c68db66b 100644 --- a/test/sqs_lambda_to_batch_fargate/.gitignore +++ b/test/sqs_lambda_to_batch_fargate/.gitignore @@ -9,4 +9,5 @@ __pycache__ # CDK asset staging directory .cdk.staging cdk.out -ada.log \ No newline at end of file +ada.log +cdk.context.json \ No newline at end of file diff --git a/test/sqs_lambda_to_batch_fargate/__init__.py b/test/sqs_lambda_to_batch_fargate/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/sqs_lambda_to_batch_fargate/app.py b/test/sqs_lambda_to_batch_fargate/app.py index db352204353..1c386307b61 100644 --- a/test/sqs_lambda_to_batch_fargate/app.py +++ b/test/sqs_lambda_to_batch_fargate/app.py @@ -1,15 +1,18 @@ #!/usr/bin/env python3 -from aws_cdk import App -import aws_cdk as cdk +import os +import aws_cdk as cdk +from aws_cdk import App from consumer_stack.consumer_stack import ConsumerStack -import os - app = App() -ConsumerStack(app, "ConsumerStack", - env=cdk.Environment(account=os.getenv('CDK_DEFAULT_ACCOUNT'), region=os.getenv('CDK_DEFAULT_REGION')), - ) +ConsumerStack( + app, + f"ConsumerStack-{os.getenv('LANGUAGE_NAME')}", + env=cdk.Environment( + account=os.getenv("CDK_DEFAULT_ACCOUNT"), region=os.getenv("CDK_DEFAULT_REGION") + ), +) app.synth() diff --git a/test/sqs_lambda_to_batch_fargate/cdk.context.json b/test/sqs_lambda_to_batch_fargate/cdk.context.json deleted file mode 100644 index be0f4603e74..00000000000 --- a/test/sqs_lambda_to_batch_fargate/cdk.context.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "vpc-provider:account=260778392212:filter.isDefault=true:region=us-east-1:returnAsymmetricSubnets=true": { - "vpcId": "vpc-0c4a8ac37436f701a", - "vpcCidrBlock": "172.31.0.0/16", - "availabilityZones": [], - "subnetGroups": [ - { - "name": "Public", - "type": "Public", - "subnets": [ - { - "subnetId": "subnet-0155e9561863fdcca", - "cidr": "172.31.32.0/20", - "availabilityZone": "us-east-1a", - "routeTableId": "rtb-0453fcf7da42da759" - }, - { - "subnetId": "subnet-099591e1bbcc9328c", - "cidr": "172.31.0.0/20", - "availabilityZone": "us-east-1b", - "routeTableId": "rtb-0453fcf7da42da759" - }, - { - "subnetId": "subnet-01956184cb64b1999", - "cidr": "172.31.80.0/20", - "availabilityZone": "us-east-1c", - "routeTableId": "rtb-0453fcf7da42da759" - }, - { - "subnetId": "subnet-069f1c45108e64148", - "cidr": "172.31.16.0/20", - "availabilityZone": "us-east-1d", - "routeTableId": "rtb-0453fcf7da42da759" - }, - { - "subnetId": "subnet-05ddd6f4a04ad984c", - "cidr": "172.31.48.0/20", - "availabilityZone": "us-east-1e", - "routeTableId": "rtb-0453fcf7da42da759" - }, - { - "subnetId": "subnet-06a225ba1d4ccfa85", - "cidr": "172.31.64.0/20", - "availabilityZone": "us-east-1f", - "routeTableId": "rtb-0453fcf7da42da759" - } - ] - } - ] - }, - "vpc-provider:account=808326389482:filter.isDefault=true:region=us-east-1:returnAsymmetricSubnets=true": { - "vpcId": "vpc-01ba5951652359b1e", - "vpcCidrBlock": "172.31.0.0/16", - "availabilityZones": [], - "subnetGroups": [ - { - "name": "Public", - "type": "Public", - "subnets": [ - { - "subnetId": "subnet-01bff8ca0688ecaa1", - "cidr": "172.31.16.0/20", - "availabilityZone": "us-east-1a", - "routeTableId": "rtb-06473eb82596c88b9" - }, - { - "subnetId": "subnet-006b4b85100dc8477", - "cidr": "172.31.32.0/20", - "availabilityZone": "us-east-1b", - "routeTableId": "rtb-06473eb82596c88b9" - }, - { - "subnetId": "subnet-05354b5d36d03dede", - "cidr": "172.31.0.0/20", - "availabilityZone": "us-east-1c", - "routeTableId": "rtb-06473eb82596c88b9" - }, - { - "subnetId": "subnet-0ceb9d90b1f31e337", - "cidr": "172.31.80.0/20", - "availabilityZone": "us-east-1d", - "routeTableId": "rtb-06473eb82596c88b9" - }, - { - "subnetId": "subnet-07c70ddf43d609a6a", - "cidr": "172.31.48.0/20", - "availabilityZone": "us-east-1e", - "routeTableId": "rtb-06473eb82596c88b9" - }, - { - "subnetId": "subnet-0f2db8cea8a43dcda", - "cidr": "172.31.64.0/20", - "availabilityZone": "us-east-1f", - "routeTableId": "rtb-06473eb82596c88b9" - } - ] - } - ] - } -} diff --git a/test/sqs_lambda_to_batch_fargate/consumer_stack/__init__.py b/test/sqs_lambda_to_batch_fargate/consumer_stack/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/sqs_lambda_to_batch_fargate/consumer_stack/consumer_stack.py b/test/sqs_lambda_to_batch_fargate/consumer_stack/consumer_stack.py index 01dc0f3c527..c5af9b62054 100644 --- a/test/sqs_lambda_to_batch_fargate/consumer_stack/consumer_stack.py +++ b/test/sqs_lambda_to_batch_fargate/consumer_stack/consumer_stack.py @@ -1,60 +1,58 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 -from aws_cdk import ( - aws_iam as iam, - aws_events as events, - aws_events_targets as targets, - aws_lambda as _lambda, - aws_lambda_event_sources as _event_sources, - aws_sqs as sqs, - aws_sns as sns, - aws_sns_subscriptions as subs, - aws_logs as logs, - aws_ec2 as ec2, - aws_ecs as ecs, - aws_ecr as ecr, - aws_batch as batch, - aws_batch_alpha as batch_alpha, - aws_kinesis as kinesis, - Aws, - Stack, - Size -) -from constructs import Construct -import json import os +from aws_cdk import Aws, Duration, Size, Stack +from aws_cdk import aws_batch_alpha as batch_alpha +from aws_cdk import aws_ec2 as ec2 +from aws_cdk import aws_ecs as ecs +from aws_cdk import aws_events as events +from aws_cdk import aws_events_targets as targets +from aws_cdk import aws_iam as iam +from aws_cdk import aws_lambda as _lambda +from aws_cdk import aws_lambda_event_sources as _event_sources +from aws_cdk import aws_s3 as s3 +from aws_cdk import aws_sns as sns +from aws_cdk import aws_sns_subscriptions as subs +from aws_cdk import aws_sqs as sqs +from constructs import Construct + # Raises KeyError if environment variable doesn't exist. language_name = os.environ["LANGUAGE_NAME"] producer_account_id = os.environ["PRODUCER_ACCOUNT_ID"] -fanout_topic_name = os.environ["FANOUT_TOPIC_NAME"] + class ConsumerStack(Stack): def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) - ############################################# - ## ## - ## RESOURCES ## - ## ## - ############################################# + ##################################################### + ## ## + ## FANOUT COMPONENTS ## + ## (SQS, SNS, and Subscriptions) ## + ## ## + ##################################################### # Locate Amazon Simple Notification Service (Amazon SNS) topic in the producer account. - fanout_topic_arn = f'arn:aws:sns:us-east-1:{producer_account_id}:{fanout_topic_name}' - sns_topic = sns.Topic.from_topic_arn(self, fanout_topic_name, topic_arn=fanout_topic_arn) - - container_image = ecs.EcrImage.from_registry(f"public.ecr.aws/b4v4v1s0/{language_name}:latest") + fanout_topic_name = "aws-weathertop-central-sns-fanout-topic" + fanout_topic_arn = ( + f"arn:aws:sns:us-east-1:{producer_account_id}:{fanout_topic_name}" + ) + sns_topic = sns.Topic.from_topic_arn( + self, fanout_topic_name, topic_arn=fanout_topic_arn + ) # Define the Amazon Simple Queue Service (Amazon SQS) queue in this account. - sqs_queue = sqs.Queue(self, f'BatchJobQueue-{language_name}') + sqs_queue = sqs.Queue(self, f"BatchJobQueue-{language_name}") # Create an AWS Identity and Access Management (IAM) role for the SNS topic to send messages to the SQS queue. sns_topic_role = iam.Role( - self, f"SNSTopicRole-{language_name}", - assumed_by=iam.ServicePrincipal('sns.amazonaws.com'), - description='Allows the SNS topic to send messages to the SQS queue in this account', - role_name=f'SNSTopicRole-{language_name}' + self, + f"SNSTopicRole-{language_name}", + assumed_by=iam.ServicePrincipal("sns.amazonaws.com"), + description="Allows the SNS topic to send messages to the SQS queue in this account", + role_name=f"SNSTopicRole-{language_name}", ) # Policy to allow existing SNS topic to publish to new SQS queue. @@ -62,26 +60,50 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: effect=iam.Effect.ALLOW, actions=["sqs:SendMessage"], resources=[sqs_queue.queue_arn], - conditions={ - "ArnEquals": { - "aws:SourceArn": fanout_topic_arn - } - } + conditions={"ArnEquals": {"aws:SourceArn": fanout_topic_arn}}, ) - # Execution role for AWS Lambda function to use. - execution_role = iam.Role( - self, f"LambdaExecutionRole-{language_name}", - assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), - description='Allows Lambda function to submit jobs to Batch', - role_name=f'LambdaExecutionRole-{language_name}' + # Create an SNS subscription for the SQS queue. + subs.SqsSubscription(sqs_queue, raw_message_delivery=True).bind(sns_topic) + sns_topic.add_subscription(subs.SqsSubscription(sqs_queue)) + + # Add the Amazon SNS and Amazon SQS policy to the IAM role. + sns_topic_role.add_to_policy(sns_topic_policy) + + # Define policy that allows cross-account Amazon SNS and Amazon SQS access. + statement = iam.PolicyStatement() + statement.add_resources(sqs_queue.queue_arn) + statement.add_actions("sqs:*") + statement.add_arn_principal(f"arn:aws:iam::{producer_account_id}:root") + statement.add_arn_principal(f"arn:aws:iam::{Aws.ACCOUNT_ID}:root") + statement.add_condition("ArnLike", {"aws:SourceArn": fanout_topic_arn}) + sqs_queue.add_to_resource_policy(statement) + + ################################################ + ## ## + ## S3 BUCKET FOR LOGS ## + ## ## + ################################################ + + bucket = s3.Bucket( + self, + "LogBucket", + versioned=False, + block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) + ################################################ + ## ## + ## BATCH FARGATE JOBS ## + ## ## + ################################################ + batch_execution_role = iam.Role( - self, f"BatchExecutionRole-{language_name}", + self, + f"BatchExecutionRole-{language_name}", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), inline_policies={ - "MyCustomPolicy": iam.PolicyDocument( + "BatchLoggingPolicy": iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, @@ -89,9 +111,9 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", - "logs:DescribeLogStreams" + "logs:DescribeLogStreams", ], - resources=["arn:aws:logs:*:*:*"] + resources=["arn:aws:logs:*:*:*"], ) ] ) @@ -102,99 +124,170 @@ def __init__(self, scope: Construct, id: str, **kwargs) -> None: ), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy" - ) - ] + ), + ], ) - # Batch resources commented out due to bug: https://github.com/aws/aws-cdk/issues/24783. - # Using Alpha as workaround. + fargate_environment = batch_alpha.FargateComputeEnvironment( + self, + f"FargateEnv-{language_name}", + vpc=ec2.Vpc.from_lookup(self, "Vpc", is_default=True), + ) - fargate_environment = batch_alpha.FargateComputeEnvironment(self, f"FargateEnv-{language_name}", - vpc=ec2.Vpc.from_lookup(self, "Vpc", is_default=True) + container_image = ecs.EcrImage.from_registry( + f"public.ecr.aws/b4v4v1s0/{language_name}:latest" ) - job_definition = batch_alpha.EcsJobDefinition(self, f"JobDefinition-{language_name}", - container=batch_alpha.EcsFargateContainerDefinition(self, f"ContainerDefinition-{language_name}", + job_definition = batch_alpha.EcsJobDefinition( + self, + f"JobDefinition-{language_name}", + container=batch_alpha.EcsFargateContainerDefinition( + self, + f"ContainerDefinition-{language_name}", image=container_image, execution_role=batch_execution_role, - logging=ecs.AwsLogDriver(stream_prefix=f"weathertop/{language_name}", mode=ecs.AwsLogDriverMode.NON_BLOCKING), assign_public_ip=True, memory=Size.gibibytes(2), - cpu=1 - ) + cpu=1, + ), + timeout=Duration.minutes(500), ) - job_queue = batch_alpha.JobQueue(self, f"JobQueue-{language_name}", - priority=1 - ) + job_queue = batch_alpha.JobQueue(self, f"JobQueue-{language_name}", priority=1) job_queue.add_compute_environment(fargate_environment, 1) - # Define the Lambda function. - function = _lambda.Function(self, f'SubmitBatchJob-{language_name}', - runtime=_lambda.Runtime.PYTHON_3_8, - handler='lambda_handler.lambda_handler', - role=execution_role, - code=_lambda.Code.from_asset('lambda'), - environment={ - 'LANGUAGE_NAME': language_name, - 'JOB_QUEUE': job_queue.job_queue_arn, - 'JOB_DEFINITION': job_definition.job_definition_arn, - 'JOB_NAME': f'job-{language_name}' - } - ) - ################################################# ## ## - ## CONFIGURATION ## + ## BATCH LAMBDA FUNCTION ## + ## (Triggers Batch job from SQS queue) ## ## ## ################################################# - # Add the SQS queue as an event source for the Lambda function. - function.add_event_source(_event_sources.SqsEventSource(sqs_queue)) + # Execution role for AWS Lambda function to use. + execution_role = iam.Role( + self, + f"BatchLambdaExecutionRole-{language_name}", + assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), + description="Allows Lambda function to submit jobs to Batch", + role_name=f"BatchLambdaExecutionRole-{language_name}", + ) - # Create an SNS subscription for the SQS queue. - subs.SqsSubscription(sqs_queue, raw_message_delivery=True).bind(sns_topic) - sns_topic.add_subscription(subs.SqsSubscription(sqs_queue)) + execution_role.add_to_policy( + statement=iam.PolicyStatement(actions=["batch:*"], resources=["*"]) + ) - ########################################## - ## ## - ## ACCESS ## - ## ## - ########################################## + # Attach AWSLambdaBasicExecutionRole to the Lambda function's role + execution_role.add_managed_policy( + policy=iam.ManagedPolicy.from_aws_managed_policy_name( + "service-role/AWSLambdaBasicExecutionRole" + ) + ) - # Add the Amazon SNS and Amazon SQS policy to the IAM role. - sns_topic_role.add_to_policy(sns_topic_policy) + # Define the Lambda function. + function = _lambda.Function( + self, + f"SubmitBatchJob-{language_name}", + runtime=_lambda.Runtime.PYTHON_3_8, + handler="submit_job.handler", + role=execution_role, + code=_lambda.Code.from_asset("lambda"), + environment={ + "LANGUAGE_NAME": language_name, + "JOB_QUEUE": job_queue.job_queue_arn, + "JOB_DEFINITION": job_definition.job_definition_arn, + "JOB_NAME": f"job-{language_name}", + }, + ) + + # Add the SQS queue as an event source for the Lambda function. + function.add_event_source(_event_sources.SqsEventSource(sqs_queue)) # Grant permissions to allow the function to receive messages from the queue. sqs_queue.grant_consume_messages(function) - - # Grant permissions to allow the function to read messages from the queue and to write logs to Amazon CloudWatch. function.add_to_role_policy( statement=iam.PolicyStatement( - actions=['sqs:ReceiveMessage'], - resources=[sqs_queue.queue_arn] + actions=["sqs:ReceiveMessage"], resources=[sqs_queue.queue_arn] ) ) function.add_to_role_policy( statement=iam.PolicyStatement( - actions=['logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents'], - resources=["*"] + actions=[ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + ], + resources=["*"], + ) + ) + + ################################################# + ## ## + ## LOG LAMBDA FUNCTION ## + ## (Processes logs and puts them to S3) ## + ## ## + ################################################# + + # Execution role for AWS Lambda function to use. + execution_role = iam.Role( + self, + f"LogsLambdaExecutionRole", + assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), + description="Allows Lambda function to get logs from CloudWatch", + role_name=f"LogsLambdaExecutionRole", + ) + + # Attach AWSLambdaBasicExecutionRole to the Lambda function's role. + execution_role.add_managed_policy( + policy=iam.ManagedPolicy.from_aws_managed_policy_name( + "service-role/AWSLambdaBasicExecutionRole" ) ) + # Grants ability to get logs from CloudWatch. execution_role.add_to_policy( statement=iam.PolicyStatement( - actions=['batch:*'], - resources=["*"] + actions=["logs:GetLogEvents", "logs:DescribeLogStreams"], + resources=[f"arn:aws:logs:us-east-1:{Aws.ACCOUNT_ID}:*"], ) ) - # Define policy that allows cross-account Amazon SNS and Amazon SQS access. - statement = iam.PolicyStatement() - statement.add_resources(sqs_queue.queue_arn) - statement.add_actions("sqs:*") - statement.add_arn_principal(f'arn:aws:iam::{producer_account_id}:root') - statement.add_arn_principal(f'arn:aws:iam::{Aws.ACCOUNT_ID}:root') - statement.add_condition("ArnLike", {"aws:SourceArn": fanout_topic_arn}) - sqs_queue.add_to_resource_policy(statement) + # Grants ability to get and put to local logs bucket. + execution_role.add_to_policy( + statement=iam.PolicyStatement( + actions=["s3:PutObject", "s3:GetObject"], + resources=[f"{bucket.bucket_arn}/*"], + ) + ) + + # Grants ability to write to cross-account log bucket. + execution_role.add_to_policy( + statement=iam.PolicyStatement( + actions=["s3:PutObject", "s3:PutObjectAcl"], + resources=["arn:aws:s3:::aws-weathertop-central-log-bucket/*"], + ) + ) + + # Define the Lambda function. + lambda_function = _lambda.Function( + self, + "BatchJobCompleteLambda", + runtime=_lambda.Runtime.PYTHON_3_8, + handler="export_logs.handler", + role=execution_role, + code=_lambda.Code.from_asset("lambda"), + environment={ + "LANGUAGE_NAME": language_name, + "BUCKET_NAME": bucket.bucket_name, + "PRODUCER_BUCKET_NAME": "aws-weathertop-central-log-bucket", + }, + ) + + batch_rule = events.Rule( + self, + "BatchAllEventsRule", + event_pattern=events.EventPattern(source=["aws.batch"]), + ) + + # Add the Lambda function as a target for the CloudWatch Event Rule. + batch_rule.add_target(targets.LambdaFunction(lambda_function)) diff --git a/test/sqs_lambda_to_batch_fargate/lambda/__init__.py b/test/sqs_lambda_to_batch_fargate/lambda/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/sqs_lambda_to_batch_fargate/lambda/export_logs.py b/test/sqs_lambda_to_batch_fargate/lambda/export_logs.py new file mode 100644 index 00000000000..fdd08a0440a --- /dev/null +++ b/test/sqs_lambda_to_batch_fargate/lambda/export_logs.py @@ -0,0 +1,71 @@ +import json +import logging +import os +import random + +import boto3 + +s3_client = boto3.client("s3") +s3_resource = boto3.resource("s3") +logs_client = boto3.client("logs") + +logger = logging.getLogger() +logger.setLevel(logging.DEBUG) + +log_group_name = "/aws/batch/job" + + +def handler(event, context): + logger.debug(f"BUCKET_NAME: {os.environ['BUCKET_NAME']}") + logger.debug(f"INCOMING EVENT: {event}") + + status = event["detail"]["status"] + + if "Batch Job State Change" not in event["detail-type"]: + logger.info(f"Non-triggering Batch event: {event['detail-type']}") + return + if "TIMED_OUT" in status: + raise Exception( + "Job timed out. Contact application owner or increase time out threshold" + ) + if status not in ["FAILED", "SUCCEEDED"]: + logger.info(f"Non-triggering Batch status: STATUS: {status}") + return + + try: + get_and_put_logs() + except Exception as e: + logger.error(json.dumps(f"Error: {str(e)}")) + raise e + + +def get_and_put_logs(): + # Get most recent stream + log_streams = logs_client.describe_log_streams( + logGroupName=log_group_name, + orderBy="LastEventTime", + descending=True, + limit=1, + ) + + # Get log events from the stream + log_events = logs_client.get_log_events( + logGroupName=log_group_name, + logStreamName=log_streams["logStreams"][0]["logStreamName"], + startFromHead=True, + ) + + log_file = "\n".join( + [f"{e['timestamp']}, {e['message']}" for e in log_events["events"]] + ) + file_identifier = str(random.randint(10**7, 10**8 - 1)) + + s3_client.upload_fileobj( + log_file, + os.environ["PRODUCER_BUCKET_NAME"], + f"{os.environ['LANGUAGE_NAME']}/{file_identifier}", + ) + + logger.info( + f"Log data saved successfully: {os.environ['LANGUAGE_NAME']}/{file_identifier}" + ) diff --git a/test/sqs_lambda_to_batch_fargate/lambda/lambda_handler.py b/test/sqs_lambda_to_batch_fargate/lambda/lambda_handler.py deleted file mode 100644 index d6036ce4ae6..00000000000 --- a/test/sqs_lambda_to_batch_fargate/lambda/lambda_handler.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -import logging -import boto3 -import os - -logger = logging.getLogger() -logger.setLevel(logging.INFO) - -def lambda_handler(event, context): - try: - # Set up AWS Batch client. - batch = boto3.client('batch') - - # Log key details. - logger.info(f"JOB_NAME: {os.environ['JOB_NAME']}") - logger.info(f"JOB_QUEUE: {os.environ['JOB_QUEUE']}") - logger.info(f"JOB_DEFINITION: {os.environ['JOB_DEFINITION']}") - - # Set up job payload. - payload = { - 'jobName': os.environ['JOB_NAME'], - 'jobQueue': os.environ['JOB_QUEUE'], - 'jobDefinition': os.environ['JOB_DEFINITION'] - } - - # Submit job. - response = batch.submit_job(**payload) - - # Print job ID and status. - logger.info(f"Submitted job {str(response)}") - except Exception as e: - logger.error(f"Job submission failed:\n{e}") diff --git a/test/sqs_lambda_to_batch_fargate/lambda/submit_job.py b/test/sqs_lambda_to_batch_fargate/lambda/submit_job.py new file mode 100644 index 00000000000..697b817daf4 --- /dev/null +++ b/test/sqs_lambda_to_batch_fargate/lambda/submit_job.py @@ -0,0 +1,33 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import logging +import os + +import boto3 + +logger = logging.getLogger() +logger.setLevel(logging.DEBUG) + + +def handler(event, context): + logger.debug(f"INCOMING EVENT: {event}") + try: + # Set up AWS Batch client. + batch_client = boto3.client("batch") + + # Set up job payload. + payload = { + "jobName": os.environ["JOB_NAME"], + "jobQueue": os.environ["JOB_QUEUE"], + "jobDefinition": os.environ["JOB_DEFINITION"], + } + + # Submit job. + response = batch_client.submit_job(**payload) + + # Print job ID and status. + logger.info(f"Submitted job {str(response)}") + except Exception as e: + logger.error(f"Job submission failed:\n{e}") + raise e diff --git a/test/sqs_lambda_to_batch_fargate/requirements.txt b/test/sqs_lambda_to_batch_fargate/requirements.txt index 56c90df4858..d61aa2e8182 100644 --- a/test/sqs_lambda_to_batch_fargate/requirements.txt +++ b/test/sqs_lambda_to_batch_fargate/requirements.txt @@ -2,3 +2,4 @@ aws-cdk-lib==2.88.0 constructs>=10.0.0 types-boto3 types-setuptools +aws_cdk.aws_batch_alpha