diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index e04adbb4..1ea6840f 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=linux/arm64 mcr.microsoft.com/vscode/devcontainers/go:dev-1.22 +FROM --platform=linux/arm64 mcr.microsoft.com/vscode/devcontainers/go:dev-1.23 RUN apt-get update -y && \ # Docker curl -fsSL https://get.docker.com | sh && \ diff --git a/.dockerignore b/.dockerignore index 22150c8d..d671f79c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,12 +2,11 @@ .git .github .vscode -build/config build/container build/scripts !build/scripts/aws/lambda/extension.zip build/terraform -examples +**/examples .dockerignore .gitignore .golangci.yml diff --git a/.github/ISSUE_TEMPLATE/adopters.yaml b/.github/ISSUE_TEMPLATE/adopters.yaml new file mode 100644 index 00000000..0f8fcd7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/adopters.yaml @@ -0,0 +1,45 @@ +name: Become an Adopter +description: Add the name of your organization to the list of adopters. +title: '[organization] has adopted Substation!' +body: + - type: markdown + attributes: + value: | + Thank you for supporting Substation! By adding your organization to the list of adopters, you help raise awareness for the project and grow our community of users. Please fill out the information below to be added to the [list of adopters](https://github.com/brexhq/substation/blob/main/ADOPTERS.md). + + - type: input + id: org-name + attributes: + label: Organization Name + description: Name of your organization. + placeholder: ex. Acme Corp + validations: + required: true + - type: input + id: org-url + attributes: + label: Organization Website + description: Link to your organization's website. + placeholder: ex. https://www.example.com + validations: + required: true + - type: dropdown + id: stage + attributes: + label: Stage of Adoption + description: What is your current stage of adoption? + options: + - We're learning about Substation + - We're testing Substation + - We're using Substation in production + - We're driving broad adoption of Substation + default: 0 + validations: + required: true + - type: textarea + id: use-case + attributes: + label: Description of Use + description: Write one or two sentences about how your organization is using Substation. + validations: + required: true diff --git a/.github/workflows/code.yml b/.github/workflows/code.yml index 40fe0522..8518ff36 100644 --- a/.github/workflows/code.yml +++ b/.github/workflows/code.yml @@ -2,57 +2,60 @@ name: code on: pull_request: - branches: [ main ] + branches: [main] jobs: go: runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 1 - - - name: setup - uses: actions/setup-go@v2 - with: - go-version: 1.22 - - - name: tests - run: go test -timeout 30s -v ./... - - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - version: latest + - name: Checkout Repository + uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: 1.22 + + - name: Testing + run: go test -timeout 30s -v ./... + + - name: Linting + uses: golangci/golangci-lint-action@v3 + with: + version: latest python: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: setup + - name: Checkout Repository + uses: actions/checkout@v2 + + - name: Setup Python uses: actions/setup-python@v2 with: - python-version: '3.x' + python-version: "3.x" - - run: | + - name: Linting + run: | pip3 install black find -name *.py | xargs black --check jsonnet: runs-on: ubuntu-latest steps: - - name: checkout + - name: Checkout Repository uses: actions/checkout@v2 with: fetch-depth: 1 - - name: setup + - name: Setup Go uses: actions/setup-go@v2 with: go-version: 1.22 - - name: compile + - name: Compiling run: | go install github.com/google/go-jsonnet/cmd/jsonnet@latest sh build/scripts/config/compile.sh diff --git a/ADOPTERS.md b/ADOPTERS.md new file mode 100644 index 00000000..0a99053e --- /dev/null +++ b/ADOPTERS.md @@ -0,0 +1,7 @@ +# Adopters + +If you're using Substation in your organization, please try to add your company name to this list. By [adding your name to this list](https://github.com/brexhq/substation/issues/new?assignees=&labels=&template=adopters.md&title=), you help raise awareness for the project and grow our community of users! + +| Organization | Contact | Description of Use | +|--------------|---------|--------------------| +| [Brex](https://www.brex.com) | [@jshlbrd](https://github.com/jshlbrd) | All security event and audit logs (~5 TB/day) used by Security are handled by Substation. | diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..27dc80e5 --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,296 @@ +# Migration + +Use this as a guide for migrating between major versions of Substation. + +## v2.0.0 + +### Applications (cmd/) + +#### AWS Lambda Handlers + +Multiple AWS Lambda handlers were renamed to better reflect the AWS service they interact with: +- Renamed `AWS_KINESIS_DATA_FIREHOSE` to `AWS_DATA_FIREHOSE`. +- Renamed `AWS_KINESIS` to `AWS_KINESIS_DATA_STREAM`. +- Renamed `AWS_DYNAMODB` to `AWS_DYNAMODB_STREAM`. + +v1.x.x: + +```hcl +module "node" { + source = "build/terraform/aws/lambda" + + config = { + name = "node" + description = "Substation node that is invoked by a Kinesis Data Stream." + image_uri = "123456789012.dkr.ecr.us-east-1.amazonaws.com/substation:v1.0.0" + image_arm = true + + env = { + "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" + "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS" + } + } +} +``` + +v2.x.x: + +```hcl +module "node" { + source = "build/terraform/aws/lambda" + + config = { + name = "node" + description = "Substation node that is invoked by a Kinesis Data Stream." + image_uri = "123456789012.dkr.ecr.us-east-1.amazonaws.com/substation:v2.0.0" + image_arm = true + + env = { + "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" + "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" + } + } +} +``` + +### Conditions (condition/) + +#### Conditioner Interface + +The `Inspector` interface was renamed to `Conditioner` to standardize the naming convention used across the project. + +#### `meta.condition` Condition + +This is replaced by the `meta.all`, `meta.any`, and `meta.none` conditions. + +v1.x.x: + +```jsonnet +sub.cnd.all([ + sub.cnd.str.eq({ value: 'FOO' }), + sub.cnd.meta.condition({ condition: sub.cnd.any([ + sub.cnd.str.eq({ value: 'BAR' }), + sub.cnd.str.eq({ value: 'BAZ' }), + ]) }), +]), +``` + +v2.x.x: + +```jsonnet +sub.cnd.all([ + sub.cnd.str.eq({ value: 'FOO' }), + sub.cnd.any([ + sub.cnd.str.eq({ value: 'BAR' }), + sub.cnd.str.eq({ value: 'BAZ' }), + ]), +]), +``` + +#### `meta.for_each` Condition + +This is replaced by the `meta.all`, `meta.any`, and `meta.none` conditions. If the `object.source_key` value is an array, then the data is treated as a list of elements. + +v1.x.x: + +```jsonnet +sub.cnd.meta.for_each({ + object: { source_key: 'field' }, + type: 'any', + inspector: sub.cnd.str.eq({ value: 'FOO' }), +}) +``` + +v2.x.x: + +```jsonnet +sub.cnd.meta.any([{ + object: { source_key: 'field' }, + conditions: [ sub.cnd.str.eq({ value: 'FOO' }) ], +}]) +``` + +#### `meta.negate` Condition + +This is replaced by the `meta.none` Condition. + +v1.x.x: + +```jsonnet +sub.cnd.meta.negate({ inspector: sub.cnd.str.eq({ value: 'FOO' }) }) +``` + +v2.x.x: + +```jsonnet +sub.cnd.meta.none({ conditions: [ sub.cnd.str.eq({ value: 'FOO' }) ] }) +``` + + +```jsonnet +sub.cnd.none([ sub.cnd.str.eq({ value: 'FOO' }) ]) +``` + +#### `meta.err` Condition + +This is removed and was not replaced. Remove any references to this inspector. + +### Transforms (transforms) + +#### `send.aws.*` Transforms + +The AWS resource fields were replaced by an `aws` object field that contains the sub-fields `arn` and `assume_role_arn`. The region for each AWS client is derived from either the resource ARN or assumed role ARN. + +v1.x.x: + +```jsonnet +sub.tf.send.aws.s3({ + bucket_name: 'substation', + file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.json' }, +}), +``` + +v2.x.x: + +```jsonnet +sub.tf.send.aws.s3({ + aws: { arn: 'arn:aws:s3:::substation' }, + file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.json' }, +}), +``` + +**NOTE: This change also applies to every configuration that relies on an AWS resource.** + +#### `meta.*` Transforms + +The `transform` field is removed from all transforms and was replaced with the `transforms` field. + +v1.x.x: + +```jsonnet +sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.all([ + sub.cnd.str.eq({ obj: { source_key: 'field' }, value: 'FOO' }), + ]), + transform: sub.tf.obj.insert({ object: { target_key: 'field' }, value: 'BAR' }), + }, +]}) +``` + +v2.x.x: + +```jsonnet +sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.str.eq({ obj: { source_key: 'field' }, value: 'FOO' }), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'field' }, value: 'BAR' }) + ], + }, +]}) +``` + +#### `meta.retry` Transform + +Retry settings were removed from all transforms and replaced by the `meta.retry` transform. It is recommended to create a reusable pattern for common retry scenarios. + +v1.x.x: + +```jsonnet +sub.tf.send.aws.sqs({ + arn: 'arn:aws:sqs:us-east-1:123456789012:substation', + retry: { count: 3 }, +}) +``` + +v2.x.x: + +```jsonnet +sub.tf.meta.retry({ + retry: { count: 3, delay: '1s' }, + transforms: [ + sub.tf.send.aws.sqs({ + aws: { arn: 'arn:aws:sqs:us-east-1:123456789012:substation' }, + }), + ], +}) +``` + +**NOTE: For AWS services, retries for the client can be configured in Terraform by using the AWS_MAX_ATTEMPTS environment variable. This is used _in addition_ the `meta.retry` transform.** + +#### `meta.pipeline` Transform + +This is removed and was not replaced. Remove any references to this transform and replace it with the `transforms` field used in other meta transforms. + +#### `send.aws.dynamodb` Transform + +The `send.aws.dynamodb` transform was renamed to `send.aws.dynamodb.put`. + +v1.x.x: + +```jsonnet +sub.tf.send.aws.dynamodb({ + table_name: 'substation', +}), +``` + +v2.x.x: + +```jsonnet +sub.tf.send.aws.dynamodb.put({ + aws: { arn: 'arn:aws:dynamodb:us-east-1:123456789012:table/substation' }, +}), +``` + +#### `enrich.aws.dynamodb` Transform + +The `enrich.aws.dynamodb` transform was renamed to `enrich.aws.dynamodb.query`, and had these additional changes: +- `PartitionKey` and `SortKey` now reference the column names in the DynamoDB table and are nested under the `Attributes` field. +- By default, the value retrieved from `Object.SourceKey` is used as the `PartitionKey` value. If the `SortKey` is provided and the value from `Object.SourceKey` is an array, then the first element is used as the `PartitionKey` value and the second element is used as the `SortKey` value. +- The `KeyConditionExpression` field was removed because this is now a derived value. + +v1.x.x: + +```jsonnet +// In v1.x.x, the DynamoDB column names must always be 'PK' and/or 'SK'. +sub.tf.obj.cp({ object: { src: 'id', trg: 'meta ddb.PK' } }), +sub.transform.enrich.aws.dynamodb({ + object: { source_key: 'meta ddb', target_key: 'user' }, + table_name: 'substation', + partition_key: 'PK', + key_condition_expression: 'PK = :PK', +}), +``` + +v2.x.x: + +```jsonnet +sub.transform.enrich.aws.dynamodb.query({ + object: { source_key: 'id', target_key: 'user' }, + aws: { arn: 'arn:aws:dynamodb:us-east-1:123456789012:table/substation' }, + attributes: { + partition_key: 'PK', + }, +}), +``` + +#### `send.aws.kinesis_data_firehose` Transform + +The `send.aws.kinesis_data_firehose` transform was renamed to `send.aws.data_firehose`. + +v1.x.x: + +```jsonnet +sub.tf.send.aws.kinesis_data_firehose({ + stream_name: 'substation', +}), +``` + +v2.x.x: + +```jsonnet +sub.tf.send.aws.data_firehose({ + aws: { arn: 'arn:aws:kinesis:us-east-1:123456789012:stream/substation' }, +}), +``` diff --git a/README.md b/README.md index cab9dc61..b6802053 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,17 @@ ![Substation Banner](.github/media/substation_banner.png) -

Substation is a toolkit for routing, normalizing, and enriching security event and audit logs.

+

Substation is a toolkit for routing, normalizing, and enriching security event and audit logs.

-[Releases][releases]   |   [Docs][docs]   |   [Quickstart][quickstart]   |   [Announcement (2022)][announcement]   |   [v1.0 Release (2024)][v1_release] +[Releases][releases]   |   [Documentation][docs]   |   [Quickstart][quickstart]   |   [Adopters][adopters]   |   [Announcement (2022)][announcement]   |   [v1.0 Release (2024)][v1_release] + +![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/brexhq/substation/code.yml?style=for-the-badge) +![GitHub Release](https://img.shields.io/github/v/release/brexhq/substation?sort=semver&style=for-the-badge&link=https%3A%2F%2Fgithub.com%2Fbrexhq%2Fsubstation%2Freleases%2Flatest) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/brexhq/substation?style=for-the-badge) +![GitHub Created At](https://img.shields.io/github/created-at/brexhq/substation?style=for-the-badge&label=created) +![GitHub License](https://img.shields.io/github/license/brexhq/substation?style=for-the-badge)
@@ -149,7 +155,7 @@ Substation excels at formatting, normalizing, and enriching event logs. For exam Substation can route data to several destinations from a single process and, unlike most other data pipeline systems, data transformation and routing are functionally equivalent -- this means that data can be transformed or routed in any order. -In this configuration, data is: +In this configuration, data is: - Written to AWS S3 - Printed to stdout @@ -171,8 +177,8 @@ local is_false = sub.cnd.str.eq({ object: { source_key: 'field3' }, value: 'fals { transforms: [ // Pre-transformed data is written to an object in AWS S3 for long-term storage. - sub.tf.send.aws.s3({ bucket_name: 'example-bucket-name' }), - // The JSON array is split into individual events that go through + sub.tf.send.aws.s3({ aws: { arn: 'arn:aws:s3:::example-bucket-name' } }), + // The JSON array is split into individual events that go through // the remaining transforms. Each event is printed to stdout. sub.tf.agg.from.array(), sub.tf.send.stdout(), @@ -195,11 +201,15 @@ local sub = import 'substation.libsonnet'; // the event is written to an object in AWS S3. sub.tf.meta.switch({ cases: [ { - condition: sub.cnd.any(sub.cnd.str.eq({ object: { source_key: 'field3' }, value: 'false' })), - transform: sub.tf.send.http.post({ url: 'https://example-http-endpoint.com' }), + condition: sub.cnd.str.eq({ object: { source_key: 'field3' }, value: 'false' }), + transforms: [ + sub.tf.send.http.post({ url: 'https://example-http-endpoint.com' }), + ], }, { - transform: sub.tf.send.aws.s3({ bucket_name: 'example-bucket-name' }), + transforms: [ + sub.tf.send.aws.s3({ aws: { arn: 'arn:aws:s3:::example-bucket-name' } }), + ], }, ] }), // The event is always available to any remaining transforms. @@ -409,7 +419,7 @@ module "node" { env = { "SUBSTATION_CONFIG" : "https://localhost:2772/applications/substation/environments/example/configurations/node" "SUBSTATION_DEBUG" : true - # This Substation node will ingest data from API Gateway. More nodes can be + # This Substation node will ingest data from API Gateway. More nodes can be # deployed to ingest data from other sources, such as Kinesis or SQS. "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" } @@ -447,29 +457,15 @@ docker build -t substation-dev .devcontainer/ && \ docker run -v $(pwd):/workspaces/substation/ -w /workspaces/substation -v /var/run/docker.sock:/var/run/docker.sock -it substation-dev ``` -To try the system locally, run this from the [examples](examples) directory: -```sh -sh .devcontainer/post_start.sh && \ -cd examples && \ -make -s quickstart -``` +To test the system locally, run this from the project root: -To try the system in the cloud, choose an [AWS example](examples/terraform/aws) to deploy: -```sh -sh .devcontainer/post_start.sh && \ -cd examples && \ -aws configure && \ -make -s check && \ -make -s build && \ -make -s deploy EXAMPLE=terraform/aws/dynamodb/cdc -``` - -After testing is complete, the cloud deployment should be destroyed: -```sh -make -s destroy EXAMPLE=terraform/aws/dynamodb/cdc +```bash +sh build/scripts/config/compile.sh && \ +go build -o ./examples/substation-file ./cmd/development/substation-file/ && \ +./examples/substation-file -config ./examples/transform/aggregate/summarize/config.json -file ./examples/transform/aggregate/summarize/data.jsonl ``` -**We do not recommend managing cloud deployments from a local machine using the examples Makefile. Production deployments should use a CI/CD pipeline with a remote state backend, such as Terraform, to manage infrastructure.** +The [Terraform documentation](build/terraform/aws/) includes guidance for deploying Substation to AWS. ## Licensing @@ -479,5 +475,6 @@ Substation and its associated code is released under the terms of the [MIT Licen [releases]:https://github.com/brexhq/substation/releases "Substation Releases" [docs]:https://substation.readme.io/docs "Substation Documentation" [quickstart]:https://substation.readme.io/recipes/1-minute-quickstart "Substation Quickstart" +[adopters]:https://github.com/brexhq/substation/blob/main/ADOPTERS.md "Substation Adopters" [announcement]:https://medium.com/brexeng/announcing-substation-188d049d979b "Substation Announcement Post" [v1_release]:https://medium.com/brexeng/releasing-substation-v1-0-4d0314cbc45b "Substation v1.0 Release Post" diff --git a/VERSIONING.md b/VERSIONING.md index 64c8997b..492ad33a 100644 --- a/VERSIONING.md +++ b/VERSIONING.md @@ -8,6 +8,7 @@ Substation uses [Semantic Versioning 2.0](https://semver.org/). Versions are man - message/* - transform/* - substation.go +- substation.libsonnet - go.mod Some features may be labeled as "experimental" in the documentation. These features are not subject to the same versioning guarantees as the rest of the project and may be changed or removed at any time. diff --git a/build/config/README.md b/build/config/README.md deleted file mode 100644 index 21d7a485..00000000 --- a/build/config/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# config - -Contains importable [Jsonnet](https://jsonnet.org/) functions and patterns for building configurations. diff --git a/build/config/substation_test.jsonnet b/build/config/substation_test.jsonnet deleted file mode 100644 index 8e9be890..00000000 --- a/build/config/substation_test.jsonnet +++ /dev/null @@ -1,74 +0,0 @@ -local sub = import 'substation.libsonnet'; - -local src = 'a'; -local trg = 'b'; - -local transform = sub.transform.object.copy(settings={ obj: { src: src, trg: trg } }); -local inspector = sub.condition.format.json(); - -{ - condition: { - number: { - equal_to: sub.condition.number.equal_to({ obj: { src: src }, value: 1 }), - less_than: sub.condition.number.less_than({ obj: { src: src }, value: 1 }), - greater_than: sub.condition.number.greater_than({ obj: { src: src }, value: 1 }), - }, - }, - transform: { - send: { - aws: { - s3: sub.transform.send.aws.s3({ bucket: 'my-bucket' }), - }, - http: { - post: sub.transform.send.http.post({ - url: 'http://localhost:8080', - hdr: [{ key: 'Content-Type', value: 'application/json' }], - }), - }, - }, - string: { - repl: sub.transform.string.repl({ - obj: { src: src, trg: trg }, - pattern: 'a', - repl: 'b', - }), - replace: sub.transform.string.replace({ - object: { source_key: src, target_key: trg }, - pattern: 'a', - replacement: 'b', - }), - split: sub.transform.string.split({ - object: { source_key: src, target_key: trg }, - sep: '.', - }), - }, - }, - helpers: { - make_array: sub.helpers.make_array(src), - key: { - append: sub.helpers.object.append(src, trg), - append_array: sub.helpers.object.append_array(src), - get_element: sub.helpers.object.get_element(src, 1), - }, - }, - pattern: { - condition: { - obj: sub.pattern.condition.obj(src), - negate: sub.pattern.condition.negate(inspector), - network: { - ip: { - internal: sub.pattern.condition.network.ip.internal(src), - }, - }, - logic: { - len: { - eq_zero: sub.pattern.condition.number.length.eq_zero(src), - gt_zero: sub.pattern.condition.number.length.gt_zero(src), - }, - }, - }, - transform: { - conditional: sub.pattern.transform.conditional(inspector, transform), - }, - }, -} diff --git a/build/container/README.md b/build/container/README.md index 33d12567..9201bcd5 100644 --- a/build/container/README.md +++ b/build/container/README.md @@ -4,8 +4,22 @@ This directory contains Docker build files for applications. Containers should b ## aws -Images stored in AWS ECR should be built using environment variables: `docker build -f build/container/aws/lambda/substation/Dockerfile -t $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/substation:latest .` +We recommend building images from within the development container by running these commands: -We recommend tagging images with the Semantic Version of each release: `docker tag foo $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/substation:$(git describe --abbrev=0 --tags)` +```bash -To specify the instruction set architecture, use the ARCH build arg: `docker build --build-arg ARCH=$ARCH -f build/container/aws/lambda/substation/Dockerfile -t $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/substation:latest-$ARCH .` +# Set environment variables. +export SUBSTATION_VERSION=$(git describe --abbrev=0 --tags) # Uses the release as the image tag. +export AWS_ARCHITECTURE=arm64 # Either "arm64" or "x86_64". +export AWS_ACCOUNT_ID=012345678901 +export AWS_REGION=us-east-1 + +# Build for the appropriate architecture and AWS region. We recommend using arm64 for AWS Lambda. +bash build/scripts/aws/lambda/get_appconfig_extension.sh +docker buildx build --platform linux/arm64 --build-arg ARCH=$AWS_ARCHITECTURE -f build/container/aws/lambda/substation/Dockerfile -t substation:latest-arm64 . +docker tag substation:latest-arm64 $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/substation:$SUBSTATION_VERSION + +# Push the image to ECR. +aws ecr get-login-password | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com +docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/substation:$SUBSTATION_VERSION +``` diff --git a/build/container/aws/lambda/autoscale/Dockerfile b/build/container/aws/lambda/autoscale/Dockerfile index b46e95f6..8f5e95a0 100644 --- a/build/container/aws/lambda/autoscale/Dockerfile +++ b/build/container/aws/lambda/autoscale/Dockerfile @@ -1,6 +1,7 @@ ARG ARCH=x86_64 +ARG SOURCE="." # default to local source -FROM golang:1.22 as build +FROM golang:1.23 as build ENV GO111MODULE=on ENV CGO_ENABLED=0 @@ -12,17 +13,17 @@ RUN apt-get update && \ # install deps WORKDIR /usr/local/go/src/substation/ -COPY ./go.mod . -COPY ./go.sum . +COPY ${SOURCE}/go.mod . +COPY ${SOURCE}/go.sum . RUN go mod download # copy src, unit test, build app -COPY . /usr/local/go/src/substation/ +COPY ${SOURCE} /usr/local/go/src/substation/ RUN go test -timeout 30s -v ./... WORKDIR /usr/local/go/src/substation/cmd/aws/lambda/autoscale RUN go build -ldflags "-w -s" -o /var/task/main -FROM public.ecr.aws/lambda/provided:al2-$ARCH +FROM public.ecr.aws/lambda/provided:al2023-$ARCH COPY --from=build /opt /opt COPY --from=build /var/task/main /var/task/main ENTRYPOINT ["/var/task/main"] diff --git a/build/container/aws/lambda/substation/Dockerfile b/build/container/aws/lambda/substation/Dockerfile index 8adc8fa1..30c490d1 100644 --- a/build/container/aws/lambda/substation/Dockerfile +++ b/build/container/aws/lambda/substation/Dockerfile @@ -1,6 +1,7 @@ ARG ARCH=x86_64 +ARG SOURCE="." # default to local source -FROM golang:1.22 as build +FROM golang:1.23 as build ENV GO111MODULE=on ENV CGO_ENABLED=0 @@ -12,17 +13,17 @@ RUN apt-get update && \ # install deps WORKDIR /usr/local/go/src/substation/ -COPY ./go.mod . -COPY ./go.sum . +COPY ${SOURCE}/go.mod . +COPY ${SOURCE}/go.sum . RUN go mod download # copy src, unit test, build app -COPY . /usr/local/go/src/substation/ +COPY ${SOURCE} /usr/local/go/src/substation/ RUN go test -timeout 30s -v ./... WORKDIR /usr/local/go/src/substation/cmd/aws/lambda/substation RUN go build -ldflags "-w -s" -o /var/task/main -FROM public.ecr.aws/lambda/provided:al2-$ARCH +FROM public.ecr.aws/lambda/provided:al2023-$ARCH COPY --from=build /opt /opt COPY --from=build /var/task/main /var/task/main ENTRYPOINT ["/var/task/main"] diff --git a/build/container/aws/lambda/validate/Dockerfile b/build/container/aws/lambda/validate/Dockerfile index 4df86cdb..f3787a42 100644 --- a/build/container/aws/lambda/validate/Dockerfile +++ b/build/container/aws/lambda/validate/Dockerfile @@ -1,22 +1,23 @@ ARG ARCH=x86_64 +ARG SOURCE="." # default to local source -FROM golang:1.22 as build +FROM golang:1.23 as build ENV GO111MODULE=on ENV CGO_ENABLED=0 # install deps WORKDIR /usr/local/go/src/substation/ -COPY ./go.mod . -COPY ./go.sum . +COPY ${SOURCE}/go.mod . +COPY ${SOURCE}/go.sum . RUN go mod download # copy src, unit test, build app -COPY . /usr/local/go/src/substation/ +COPY ${SOURCE} /usr/local/go/src/substation/ RUN go test -timeout 30s -v ./... WORKDIR /usr/local/go/src/substation/cmd/aws/lambda/validate RUN go build -ldflags "-w -s" -o /var/task/main -FROM public.ecr.aws/lambda/provided:al2-$ARCH +FROM public.ecr.aws/lambda/provided:al2023-$ARCH COPY --from=build /opt /opt COPY --from=build /var/task/main /var/task/main ENTRYPOINT ["/var/task/main"] diff --git a/build/scripts/aws/README.md b/build/scripts/aws/README.md index 7f88b57a..ac38d7da 100644 --- a/build/scripts/aws/README.md +++ b/build/scripts/aws/README.md @@ -2,37 +2,75 @@ This directory contains scripts that are used to manage Substation deployments in AWS. -## ecr_login.sh +## appconfig -Bash script that is used to log into the AWS Elastic Container Registry (ECR). After logging in, containers can be pushed to ECR using `docker push [image]` . +Contains scripts for uploading, deleting, and validating Substation configurations in AWS AppConfig. These are best used in a CI / CD pipeline, such as GitHub Actions: -## appconfig/appconfig_delete.py +```yaml -Python script that deletes application profiles, including all hosted configurations, in AWS AppConfig. This script has some dependencies: + deploy_substation: + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@master + with: + aws-region: us-east-1 + role-to-assume: arn:aws:iam::012345678901:role/substation_cicd + role-session-name: substation_cicd -* boto3 must be installed -* AWS credentials for reading from and deleting in AppConfig + - name: Checkout Code + uses: actions/checkout@v2 + with: + fetch-depth: 2 -This script can be used after deleting any AWS Lambda that have application profiles. + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: 1.22 -## appconfig/appconfig_upload.py + - name: Install Jsonnet + run: | + go install github.com/google/go-jsonnet/cmd/jsonnet@latest -Python script that manages the upload and deployment of compiled Substation JSON configuration files in AWS AppConfig. This script has some dependencies: + - uses: actions/setup-python@v2 + - name: Deploy Configs + run: | + pip3 install -r requirements.txt + bash compile.sh + python3 appconfig_upload.py + env: + AWS_DEFAULT_REGION: "us-east-1" + # These are required by the appconfig_upload.py script. + AWS_APPCONFIG_DEPLOYMENT_STRATEGY: "Instant" + AWS_APPCONFIG_ENVIRONMENT: "example" + AWS_APPCONFIG_APPLICATION_NAME: "substation" + SUBSTATION_CONFIG_DIRECTORY: "path/to/configs" +``` -* boto3 must be installed -* AWS credentials for reading from and writing to AppConfig -* AppConfig infrastructure must be ready to use (see [examples/aws/terraform/bootstrap.tf](/examples/aws/terraform/bootstrap.tf) for an example) +## dynamodb -This script is intended to be deployed to a CI / CD pipeline (e.g., GitHub Actions, Circle CI, Jenkins, etc.), but can be run locally if needed. See [examples/aws/](/examples/aws/) for example usage. +### bulk_delete_items.py -## s3/s3_rehydration.py +Python script that deletes items from a DynamoDB table based on a JSON Lines file. -Python script that rehydrates data from an S3 bucket into an SNS topic by simulating S3 -object creation events. This script has some dependencies: +## kinesis + +### put_records.py -* boto3 must be installed -* AWS credentials for reading from S3 and writing to SNS +Python script that puts records into a Kinesis stream by reading a text file. Each line in the text file is sent as a record to the Kinesis stream. -## lambda/get_appconfig_extension.sh +## lambda + +### get_appconfig_extension.sh Bash script that is used to download the [AWS AppConfig Lambda extension](https://docs.aws.amazon.com/appconfig/latest/userguide/appconfig-integration-lambda-extensions.html) for any AWS region. This extension is required for deploying Substation to AWS Lambda. + +## s3 + +### s3_rehydration.py + +Python script that rehydrates data from an S3 bucket into an SNS topic by simulating S3 +object creation events. diff --git a/build/scripts/aws/ecr_login.sh b/build/scripts/aws/ecr_login.sh deleted file mode 100644 index 70befa83..00000000 --- a/build/scripts/aws/ecr_login.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -euo pipefail - -if [ -z $AWS_REGION ]; then - >&2 echo "Error: AWS_REGION not set." - exit 1 -fi - -if [ -z $AWS_ACCOUNT_ID ]; then - >&2 echo "Error: AWS_ACCOUNT_ID not set." - exit 1 -fi - -if ! [ -x "$(command -v aws)" ]; then - >&2 echo "Error: AWS CLI is not installed." - exit 1 -fi - -if ! [ -x "$(command -v docker)" ]; then - >&2 echo "Error: Docker is not installed." - exit 1 -fi - -REGISTRY="$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com" -aws ecr get-login-password | \ - docker login --username AWS --password-stdin "$REGISTRY" diff --git a/build/scripts/config/compile.sh b/build/scripts/config/compile.sh index a9c1e607..e09a9232 100644 --- a/build/scripts/config/compile.sh +++ b/build/scripts/config/compile.sh @@ -1,5 +1,5 @@ #!/bin/sh -files=$(find . -name *.jsonnet) +files=$(find . -name "*.jsonnet") for file in $files do diff --git a/build/terraform/aws/README.md b/build/terraform/aws/README.md index e20f806a..2d01ac30 100644 --- a/build/terraform/aws/README.md +++ b/build/terraform/aws/README.md @@ -25,4 +25,132 @@ Due to the potentially endless number of deployment configurations, Substation i * SQS * VPC Networking -Refer to each module's README for more information. Several examples of how to use these modules are available [here](/examples/terraform/aws/). +Refer to each module's README for more information. + +## Guides + +### Bootstrapping Deployments + +Many resources can be shared across a single deployment, such as AppConfig configurations, ECR repositories, and S3 buckets: + +```hcl + +module "appconfig" { + source = "build/terraform/aws/appconfig" + + config = { + name = "substation" + environments = [{ name = "example" }] + } +} + +module "ecr" { + source = "build/terraform/aws/ecr" + + config = { + name = "substation" + force_delete = true + } +} + +module "s3" { + source = "build/terraform/aws/s3" + + config = { + # Bucket names must be globally unique. + name = "substation-xxxxxx" + } + + # Access is granted by providing the role name of a + # resource. This access applies least privilege and + # grants access to dependent resources, such as KMS. + access = [ + module.node.role.name, + ] +} +``` + +### Deploying Nodes + +Substation [nodes](https://substation.readme.io/docs/nodes) are deployed as Lambda functions: + +```hcl + +module "node" { + source = "build/terraform/aws/lambda" + appconfig = module.appconfig + + config = { + name = "node" + description = "Substation node that is invoked by an API Gateway." + image_uri = "${module.ecr.url}:v2.0.0" + image_arm = true + + env = { + "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" + "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" + "SUBSTATION_DEBUG" : true + } + } +} + +module "node_gateway" { + source = "build/terraform/aws/api_gateway/lambda" + lambda = module.node + + config = { + name = "node_gateway" + } + + depends_on = [ + module.node + ] +} +``` + +### Connecting Nodes + +Nodes are connected by AWS services, such as an SQS queue: + +```hcl + +module "sqs" { + source = "build/terraform/aws/sqs" + + config = { + name = "substation" + } + + access = [ + # Writes to the queue. + module.node.role.name, + # Reads from the queue. + module.consumer.role.name, + ] +} + +module "consumer" { + source = "build/terraform/aws/lambda" + appconfig = module.appconfig + + config = { + name = "consumer" + description = "Substation consumer that is invoked by an SQS queue." + image_uri = "${module.ecr.url}:v2.0.0" + image_arm = true + + env = { + "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" + "SUBSTATION_LAMBDA_HANDLER" : "AWS_SQS" + "SUBSTATION_DEBUG" : true + } + } +} + +resource "aws_lambda_event_source_mapping" "consumer" { + event_source_arn = module.sqs.arn + function_name = module.consumer.arn + maximum_batching_window_in_seconds = 10 + batch_size = 100 +} +``` diff --git a/build/terraform/aws/kinesis_data_stream/main.tf b/build/terraform/aws/kinesis_data_stream/main.tf index 000f5382..457ee00c 100644 --- a/build/terraform/aws/kinesis_data_stream/main.tf +++ b/build/terraform/aws/kinesis_data_stream/main.tf @@ -93,7 +93,6 @@ resource "aws_cloudwatch_metric_alarm" "metric_alarm_downscale" { lifecycle { # These are managed by the Autoscale application. - # https://github.com/brexhq/substation/blob/main/internal/aws/cloudwatch/cloudwatch.go ignore_changes = [metric_query, datapoints_to_alarm, evaluation_periods, threshold] } diff --git a/cmd/aws/lambda/autoscale/main.go b/cmd/aws/lambda/autoscale/main.go index 9a83b998..85c32bfa 100644 --- a/cmd/aws/lambda/autoscale/main.go +++ b/cmd/aws/lambda/autoscale/main.go @@ -4,28 +4,87 @@ import ( "context" "fmt" "math" + "os" "strconv" "strings" "time" "github.com/aws/aws-lambda-go/events" "github.com/aws/aws-lambda-go/lambda" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/cloudwatch" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/log" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + ctypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + ktypes "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/tidwall/gjson" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/log" +) + +const ( + // This is the period in seconds that the AWS Kinesis CloudWatch alarms + // will evaluate the metrics over. + kinesisMetricsPeriod = int32(60) ) var ( - cloudwatchAPI cloudwatch.API - kinesisAPI kinesis.API + cloudwatchC *cloudwatch.Client + kinesisC *kinesis.Client + // By default, AWS Kinesis streams must be below the lower threshold for + // 100% of the evaluation period (60 minutes) to scale down. This value can + // be overridden by the environment variable AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS. + kinesisDownscaleDatapoints = int32(60) + // By default, AWS Kinesis streams must be above the upper threshold for + // 100% of the evaluation period (5 minutes) to scale up. This value can + // be overridden by the environment variable AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS. + kinesisUpscaleDatapoints = int32(5) + // By default, AWS Kinesis streams will scale up if the incoming records and bytes + // are above 70% of the threshold. This value can be overridden by the environment + // variable AUTOSCALE_KINESIS_THRESHOLD, but it cannot be less than 40% or greater + // than 90%. + kinesisThreshold = 0.7 ) func init() { - // These must run in the same AWS account and region as the Lambda function. - cloudwatchAPI.Setup(aws.Config{}) - kinesisAPI.Setup(aws.Config{}) + ctx := context.Background() + + awsCfg, err := iconfig.NewAWS(ctx, iconfig.AWS{}) + if err != nil { + panic(fmt.Errorf("init: %v", err)) + } + + cloudwatchC = cloudwatch.NewFromConfig(awsCfg) + kinesisC = kinesis.NewFromConfig(awsCfg) + + if v, found := os.LookupEnv("AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS"); found { + dps, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + + kinesisDownscaleDatapoints = int32(dps) + } + + if v, found := os.LookupEnv("AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS"); found { + dps, err := strconv.Atoi(v) + if err != nil { + panic(err) + } + + kinesisUpscaleDatapoints = int32(dps) + } + + if v, found := os.LookupEnv("AUTOSCALE_KINESIS_THRESHOLD"); found { + threshold, err := strconv.ParseFloat(v, 64) + if err != nil { + panic(err) + } + + if threshold >= 0.4 && threshold <= 0.9 { + kinesisThreshold = threshold + } + } } func main() { @@ -52,14 +111,15 @@ func handler(ctx context.Context, snsEvent events.SNSEvent) error { } log.WithField("alarm", alarmName).WithField("stream", stream).Debug("Parsed Kinesis stream.") - shards, err := kinesisAPI.ActiveShards(ctx, stream) + shards, err := listShards(ctx, stream) if err != nil { return fmt.Errorf("handler: %v", err) } + log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", shards). Info("Retrieved active shard count.") - var newShards int64 + var newShards int32 if strings.Contains(alarmName, "upscale") { newShards = upscale(float64(shards)) } @@ -69,15 +129,15 @@ func handler(ctx context.Context, snsEvent events.SNSEvent) error { log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", newShards).Info("Calculated new shard count.") - tags, err := kinesisAPI.GetTags(ctx, stream) + tags, err := listTags(ctx, stream) if err != nil { return fmt.Errorf("handler: %v", err) } - var minShard, maxShard int64 + var minShard, maxShard int32 for _, tag := range tags { if *tag.Key == "MinimumShards" { - minShard, err = strconv.ParseInt(*tag.Value, 10, 64) + minShard, err := strconv.ParseInt(*tag.Value, 10, 64) if err != nil { return fmt.Errorf("handler: %v", err) } @@ -86,7 +146,7 @@ func handler(ctx context.Context, snsEvent events.SNSEvent) error { } if *tag.Key == "MaximumShards" { - maxShard, err = strconv.ParseInt(*tag.Value, 10, 64) + maxShard, err := strconv.ParseInt(*tag.Value, 10, 64) if err != nil { return fmt.Errorf("handler: %v", err) } @@ -107,7 +167,11 @@ func handler(ctx context.Context, snsEvent events.SNSEvent) error { (time.Since(lastScalingEvent) < 30*time.Minute && strings.Contains(alarmName, "downscale")) { log.WithField("stream", stream).WithField("time", lastScalingEvent).Info("Last scaling event is too recent.") - if err := cloudwatchAPI.UpdateKinesisAlarmState(ctx, alarmName, "Last scaling event is too recent"); err != nil { + if _, err := cloudwatchC.SetAlarmState(ctx, &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String(alarmName), + StateValue: ctypes.StateValueInsufficientData, + StateReason: aws.String("Last scaling event is too recent"), + }); err != nil { return fmt.Errorf("handler: %v", err) } @@ -133,51 +197,281 @@ func handler(ctx context.Context, snsEvent events.SNSEvent) error { return nil } - if err := kinesisAPI.UpdateShards(ctx, stream, newShards); err != nil { - return fmt.Errorf("handler: %v", err) - } - - if err := kinesisAPI.UpdateTag(ctx, stream, "LastScalingEvent", time.Now().Format(time.RFC3339)); err != nil { + if err := updateStream(ctx, stream, newShards); err != nil { return fmt.Errorf("handler: %v", err) } log.WithField("alarm", alarmName).WithField("stream", stream).WithField("count", newShards).Info("Updated shard count.") - if err := cloudwatchAPI.UpdateKinesisDownscaleAlarm(ctx, stream+"_downscale", stream, topicArn, newShards); err != nil { + metrics := []ctypes.MetricDataQuery{ + { + Id: aws.String("m1"), + MetricStat: &ctypes.MetricStat{ + Metric: &ctypes.Metric{ + Namespace: aws.String("AWS/Kinesis"), + MetricName: aws.String("IncomingRecords"), + Dimensions: []ctypes.Dimension{ + { + Name: aws.String("StreamName"), + Value: aws.String(stream), + }, + }, + }, + Period: aws.Int32(kinesisMetricsPeriod), + Stat: aws.String("Sum"), + }, + Label: aws.String("IncomingRecords"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("m2"), + MetricStat: &ctypes.MetricStat{ + Metric: &ctypes.Metric{ + Namespace: aws.String("AWS/Kinesis"), + MetricName: aws.String("IncomingBytes"), + Dimensions: []ctypes.Dimension{ + { + Name: aws.String("StreamName"), + Value: aws.String(stream), + }, + }, + }, + Period: aws.Int32(kinesisMetricsPeriod), + Stat: aws.String("Sum"), + }, + Label: aws.String("IncomingBytes"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("e1"), + Expression: aws.String("FILL(m1,REPEAT)"), + Label: aws.String("FillMissingDataPointsForIncomingRecords"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("e2"), + Expression: aws.String("FILL(m2,REPEAT)"), + Label: aws.String("FillMissingDataPointsForIncomingBytes"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("e3"), + Expression: aws.String( + fmt.Sprintf("e1/(1000*%d*%d)", newShards, kinesisMetricsPeriod), + ), + Label: aws.String("IncomingRecordsPercent"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("e4"), + Expression: aws.String( + fmt.Sprintf("e2/(1048576*%d*%d)", newShards, kinesisMetricsPeriod), + ), + Label: aws.String("IncomingBytesPercent"), + ReturnData: aws.Bool(false), + }, + { + Id: aws.String("e5"), + Expression: aws.String("MAX([e3,e4])"), + Label: aws.String("IncomingMax"), + ReturnData: aws.Bool(true), + }, + } + + downscaleThreshold := kinesisThreshold - 0.35 + if err := updateDownscaleAlarm(ctx, stream, topicArn, downscaleThreshold, metrics); err != nil { return fmt.Errorf("handler: %v", err) } + log.WithField("alarm", stream+"_downscale").WithField("stream", stream).WithField("count", newShards).Debug("Reset CloudWatch alarm.") - if err := cloudwatchAPI.UpdateKinesisUpscaleAlarm(ctx, stream+"_upscale", stream, topicArn, newShards); err != nil { + upscaleThreshold := kinesisThreshold + if err := updateUpscaleAlarm(ctx, stream, topicArn, upscaleThreshold, metrics); err != nil { return fmt.Errorf("handler: %v", err) } + log.WithField("alarm", stream+"_upscale").WithField("stream", stream).WithField("count", newShards).Debug("Reset CloudWatch alarm.") return nil } -func downscale(shards float64) int64 { +func downscale(shards float64) int32 { switch { case shards < 5: - return int64(math.Ceil(shards / 2)) + return int32(math.Ceil(shards / 2)) case shards < 13: - return int64(math.Ceil(shards / 1.75)) + return int32(math.Ceil(shards / 1.75)) case shards < 33: - return int64(math.Ceil(shards / 1.5)) + return int32(math.Ceil(shards / 1.5)) default: - return int64(math.Ceil(shards / 1.25)) + return int32(math.Ceil(shards / 1.25)) } } -func upscale(shards float64) int64 { +func upscale(shards float64) int32 { switch { case shards < 5: - return int64(math.Floor(shards * 2)) + return int32(math.Floor(shards * 2)) case shards < 13: - return int64(math.Floor(shards * 1.75)) + return int32(math.Floor(shards * 1.75)) case shards < 33: - return int64(math.Floor(shards * 1.5)) + return int32(math.Floor(shards * 1.5)) default: - return int64(math.Floor(shards * 1.25)) + return int32(math.Floor(shards * 1.25)) + } +} + +func listShards(ctx context.Context, stream string) (int32, error) { + var shards int32 + + input := kinesis.ListShardsInput{ + StreamName: aws.String(stream), + } + +LOOP: + for { + resp, err := kinesisC.ListShards(ctx, &input) + if err != nil { + return 0, err + } + + for _, s := range resp.Shards { + if end := s.SequenceNumberRange.EndingSequenceNumber; end == nil { + shards++ + } + } + + if resp.NextToken != nil { + input = kinesis.ListShardsInput{ + NextToken: resp.NextToken, + } + } else { + break LOOP + } } + + return shards, nil +} + +func listTags(ctx context.Context, stream string) ([]ktypes.Tag, error) { + var tags []ktypes.Tag + var lastTag string + + for { + input := kinesis.ListTagsForStreamInput{ + StreamName: aws.String(stream), + } + + if lastTag != "" { + input.ExclusiveStartTagKey = aws.String(lastTag) + } + + resp, err := kinesisC.ListTagsForStream(ctx, &input) + if err != nil { + return nil, err + } + + if len(resp.Tags) == 0 { + break + } + + tags = append(tags, resp.Tags...) + lastTag = *resp.Tags[len(resp.Tags)-1].Key + + if !*resp.HasMoreTags { + break + } + } + + return tags, nil +} + +func updateStream(ctx context.Context, stream string, shards int32) error { + _, err := kinesisC.UpdateShardCount(ctx, &kinesis.UpdateShardCountInput{ + StreamName: aws.String(stream), + TargetShardCount: aws.Int32(shards), + ScalingType: ktypes.ScalingTypeUniformScaling, + }) + if err != nil { + return err + } + + for { + resp, err := kinesisC.DescribeStreamSummary(ctx, &kinesis.DescribeStreamSummaryInput{ + StreamName: aws.String(stream), + }) + if err != nil { + return err + } + + if resp.StreamDescriptionSummary.StreamStatus != ktypes.StreamStatusUpdating { + break + } + time.Sleep(1 * time.Second) + } + + if _, err := kinesisC.AddTagsToStream(ctx, &kinesis.AddTagsToStreamInput{ + StreamName: aws.String(stream), + Tags: map[string]string{ + "LastScalingEvent": time.Now().Format(time.RFC3339), + }, + }); err != nil { + return err + } + + return nil +} + +func updateDownscaleAlarm(ctx context.Context, stream, topic string, threshold float64, metrics []ctypes.MetricDataQuery) error { + if _, err := cloudwatchC.PutMetricAlarm(ctx, &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String(stream + "_downscale"), + AlarmDescription: aws.String(stream), + ActionsEnabled: aws.Bool(true), + AlarmActions: []string{topic}, + EvaluationPeriods: aws.Int32(kinesisDownscaleDatapoints), + DatapointsToAlarm: aws.Int32(kinesisDownscaleDatapoints), + Threshold: aws.Float64(threshold), + ComparisonOperator: ctypes.ComparisonOperatorLessThanOrEqualToThreshold, + TreatMissingData: aws.String("ignore"), + Metrics: metrics, + }); err != nil { + return err + } + + if _, err := cloudwatchC.SetAlarmState(ctx, &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String(stream + "_downscale"), + StateValue: ctypes.StateValueInsufficientData, + StateReason: aws.String("Threshold updated"), + }); err != nil { + return err + } + + return nil +} + +func updateUpscaleAlarm(ctx context.Context, stream, topic string, threshold float64, metrics []ctypes.MetricDataQuery) error { + if _, err := cloudwatchC.PutMetricAlarm(ctx, &cloudwatch.PutMetricAlarmInput{ + AlarmName: aws.String(stream + "_upscale"), + AlarmDescription: aws.String(stream), + ActionsEnabled: aws.Bool(true), + AlarmActions: []string{topic}, + EvaluationPeriods: aws.Int32(kinesisUpscaleDatapoints), + DatapointsToAlarm: aws.Int32(kinesisUpscaleDatapoints), + Threshold: aws.Float64(threshold), + ComparisonOperator: ctypes.ComparisonOperatorGreaterThanOrEqualToThreshold, + TreatMissingData: aws.String("ignore"), + Metrics: metrics, + }); err != nil { + return err + } + + if _, err := cloudwatchC.SetAlarmState(ctx, &cloudwatch.SetAlarmStateInput{ + AlarmName: aws.String(stream + "_upscale"), + StateValue: ctypes.StateValueInsufficientData, + StateReason: aws.String("Threshold updated"), + }); err != nil { + return err + } + + return nil } diff --git a/cmd/aws/lambda/substation/api_gateway.go b/cmd/aws/lambda/substation/api_gateway.go index 672c3e6a..0c30717f 100644 --- a/cmd/aws/lambda/substation/api_gateway.go +++ b/cmd/aws/lambda/substation/api_gateway.go @@ -5,8 +5,9 @@ import ( "encoding/json" "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" ) var gateway500Response = events.APIGatewayProxyResponse{StatusCode: 500} diff --git a/cmd/aws/lambda/substation/kinesis_firehose.go b/cmd/aws/lambda/substation/data_firehose.go similarity index 95% rename from cmd/aws/lambda/substation/kinesis_firehose.go rename to cmd/aws/lambda/substation/data_firehose.go index fab949b4..4bc8ff41 100644 --- a/cmd/aws/lambda/substation/kinesis_firehose.go +++ b/cmd/aws/lambda/substation/data_firehose.go @@ -6,8 +6,9 @@ import ( "time" "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" ) type firehoseMetadata struct { diff --git a/cmd/aws/lambda/substation/dynamodb.go b/cmd/aws/lambda/substation/dynamodb.go index 47763ce0..97be54fc 100644 --- a/cmd/aws/lambda/substation/dynamodb.go +++ b/cmd/aws/lambda/substation/dynamodb.go @@ -7,12 +7,14 @@ import ( "time" "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws/dynamodb" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "golang.org/x/sync/errgroup" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/channel" ) type dynamodbMetadata struct { @@ -184,8 +186,8 @@ func dynamodbHandler(ctx context.Context, event events.DynamoDBEvent) error { } } else { var before map[string]interface{} - if err = dynamodbattribute.UnmarshalMap( - dynamodb.ConvertEventsAttributeValueMap(record.Change.OldImage), + if err = attributevalue.UnmarshalMap( + convertEventsAttributeValueMap(record.Change.OldImage), &before, ); err != nil { return err @@ -202,8 +204,8 @@ func dynamodbHandler(ctx context.Context, event events.DynamoDBEvent) error { } } else { var after map[string]interface{} - if err = dynamodbattribute.UnmarshalMap( - dynamodb.ConvertEventsAttributeValueMap(record.Change.NewImage), + if err = attributevalue.UnmarshalMap( + convertEventsAttributeValueMap(record.Change.NewImage), &after, ); err != nil { return err @@ -228,3 +230,52 @@ func dynamodbHandler(ctx context.Context, event events.DynamoDBEvent) error { return nil } + +// convertEventsAttributeValue converts events.DynamoDBAttributeValue to types.AttributeValue. +func convertEventsAttributeValue(v events.DynamoDBAttributeValue) types.AttributeValue { + switch v.DataType() { + case events.DataTypeNull: + return &types.AttributeValueMemberNULL{} + case events.DataTypeBoolean: + return &types.AttributeValueMemberBOOL{Value: v.Boolean()} + case events.DataTypeString: + return &types.AttributeValueMemberS{Value: v.String()} + case events.DataTypeNumber: + return &types.AttributeValueMemberN{Value: v.Number()} + case events.DataTypeBinary: + return &types.AttributeValueMemberB{Value: v.Binary()} + case events.DataTypeStringSet: + return &types.AttributeValueMemberSS{Value: v.StringSet()} + case events.DataTypeNumberSet: + return &types.AttributeValueMemberNS{Value: v.NumberSet()} + case events.DataTypeBinarySet: + return &types.AttributeValueMemberBS{Value: v.BinarySet()} + case events.DataTypeList: + var l []types.AttributeValue + for _, e := range v.List() { + l = append(l, convertEventsAttributeValue(e)) + } + + return &types.AttributeValueMemberL{Value: l} + case events.DataTypeMap: + m := make(map[string]types.AttributeValue) + for k, e := range v.Map() { + m[k] = convertEventsAttributeValue(e) + } + + return &types.AttributeValueMemberM{Value: m} + default: + return nil + } +} + +// convertEventsAttributeValueMap converts a map of events.DynamoDBAttributeValue to a map of dynamodb.AttributeValue. +func convertEventsAttributeValueMap(m map[string]events.DynamoDBAttributeValue) map[string]types.AttributeValue { + av := make(map[string]types.AttributeValue) + + for k, v := range m { + av[k] = convertEventsAttributeValue(v) + } + + return av +} diff --git a/cmd/aws/lambda/substation/kinesis_stream.go b/cmd/aws/lambda/substation/kinesis_data_stream.go similarity index 74% rename from cmd/aws/lambda/substation/kinesis_stream.go rename to cmd/aws/lambda/substation/kinesis_data_stream.go index db58beda..00e86315 100644 --- a/cmd/aws/lambda/substation/kinesis_stream.go +++ b/cmd/aws/lambda/substation/kinesis_data_stream.go @@ -6,12 +6,14 @@ import ( "time" "github.com/aws/aws-lambda-go/events" - "github.com/awslabs/kinesis-aggregation/go/deaggregator" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/awslabs/kinesis-aggregation/go/v2/deaggregator" "golang.org/x/sync/errgroup" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/channel" ) type kinesisStreamMetadata struct { @@ -85,7 +87,7 @@ func kinesisStreamHandler(ctx context.Context, event events.KinesisEvent) error defer ch.Close() eventSourceArn := event.Records[len(event.Records)-1].EventSourceArn - converted := kinesis.ConvertEventsRecords(event.Records) + converted := convertEventsRecords(event.Records) deaggregated, err := deaggregator.DeaggregateRecords(converted) if err != nil { return err @@ -126,3 +128,21 @@ func kinesisStreamHandler(ctx context.Context, event events.KinesisEvent) error return nil } + +func convertEventsRecords(records []events.KinesisEventRecord) []types.Record { + output := make([]types.Record, 0) + + for _, r := range records { + // ApproximateArrivalTimestamp is events.SecondsEpochTime which serializes time.Time + ts := r.Kinesis.ApproximateArrivalTimestamp.UTC() + output = append(output, types.Record{ + ApproximateArrivalTimestamp: &ts, + Data: r.Kinesis.Data, + EncryptionType: types.EncryptionType(r.Kinesis.EncryptionType), + PartitionKey: &r.Kinesis.PartitionKey, + SequenceNumber: &r.Kinesis.SequenceNumber, + }) + } + + return output +} diff --git a/cmd/aws/lambda/substation/lambda.go b/cmd/aws/lambda/substation/lambda.go index 0cfa28d4..bb4bfe67 100644 --- a/cmd/aws/lambda/substation/lambda.go +++ b/cmd/aws/lambda/substation/lambda.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" ) func lambdaHandler(ctx context.Context, event json.RawMessage) ([]json.RawMessage, error) { diff --git a/cmd/aws/lambda/substation/main.go b/cmd/aws/lambda/substation/main.go index 720677ef..44941798 100644 --- a/cmd/aws/lambda/substation/main.go +++ b/cmd/aws/lambda/substation/main.go @@ -8,8 +8,9 @@ import ( "os" "github.com/aws/aws-lambda-go/lambda" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/file" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/internal/file" ) var ( @@ -64,11 +65,11 @@ func main() { switch h := handler; h { case "AWS_API_GATEWAY": lambda.Start(gatewayHandler) - case "AWS_DYNAMODB_STREAM", "AWS_DYNAMODB": // AWS_DYNAMODB is deprecated + case "AWS_DYNAMODB_STREAM": lambda.Start(dynamodbHandler) - case "AWS_KINESIS_DATA_FIREHOSE": + case "AWS_DATA_FIREHOSE": lambda.Start(firehoseHandler) - case "AWS_KINESIS_DATA_STREAM", "AWS_KINESIS": // AWS_KINESIS is deprecated + case "AWS_KINESIS_DATA_STREAM": lambda.Start(kinesisStreamHandler) case "AWS_LAMBDA": lambda.Start(lambdaHandler) diff --git a/cmd/aws/lambda/substation/s3.go b/cmd/aws/lambda/substation/s3.go index fa2c14e3..75afd686 100644 --- a/cmd/aws/lambda/substation/s3.go +++ b/cmd/aws/lambda/substation/s3.go @@ -10,14 +10,17 @@ import ( "time" "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/s3manager" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" "golang.org/x/sync/errgroup" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/bufio" + "github.com/brexhq/substation/v2/internal/channel" + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/media" ) type s3Metadata struct { @@ -92,8 +95,13 @@ func s3Handler(ctx context.Context, event events.S3Event) error { group.Go(func() error { defer ch.Close() - client := s3manager.DownloaderAPI{} - client.Setup(aws.Config{}) + awsCfg, err := iconfig.NewAWS(ctx, iconfig.AWS{}) + if err != nil { + return err + } + + c := s3.NewFromConfig(awsCfg) + client := manager.NewDownloader(c) for _, record := range event.Records { // The S3 object key is URL encoded. @@ -124,7 +132,10 @@ func s3Handler(ctx context.Context, event events.S3Event) error { defer os.Remove(dst.Name()) defer dst.Close() - if _, err := client.Download(ctx, record.S3.Bucket.Name, objectKey, dst); err != nil { + if _, err := client.Download(ctx, dst, &s3.GetObjectInput{ + Bucket: &record.S3.Bucket.Name, + Key: &objectKey, + }); err != nil { return err } @@ -255,8 +266,13 @@ func s3SnsHandler(ctx context.Context, event events.SNSEvent) error { group.Go(func() error { defer ch.Close() - client := s3manager.DownloaderAPI{} - client.Setup(aws.Config{}) + awsCfg, err := iconfig.NewAWS(ctx, iconfig.AWS{}) + if err != nil { + return err + } + + c := s3.NewFromConfig(awsCfg) + client := manager.NewDownloader(c) for _, record := range event.Records { var s3Event events.S3Event @@ -293,7 +309,10 @@ func s3SnsHandler(ctx context.Context, event events.SNSEvent) error { defer os.Remove(dst.Name()) defer dst.Close() - if _, err := client.Download(ctx, record.S3.Bucket.Name, objectKey, dst); err != nil { + if _, err := client.Download(ctx, dst, &s3.GetObjectInput{ + Bucket: &record.S3.Bucket.Name, + Key: &objectKey, + }); err != nil { return err } diff --git a/cmd/aws/lambda/substation/sns.go b/cmd/aws/lambda/substation/sns.go index 5995f24b..2a0cf7cd 100644 --- a/cmd/aws/lambda/substation/sns.go +++ b/cmd/aws/lambda/substation/sns.go @@ -7,10 +7,12 @@ import ( "time" "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" "golang.org/x/sync/errgroup" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/channel" ) type snsMetadata struct { diff --git a/cmd/aws/lambda/substation/sqs.go b/cmd/aws/lambda/substation/sqs.go index 3c557385..96c003ef 100644 --- a/cmd/aws/lambda/substation/sqs.go +++ b/cmd/aws/lambda/substation/sqs.go @@ -6,10 +6,12 @@ import ( "fmt" "github.com/aws/aws-lambda-go/events" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/message" "golang.org/x/sync/errgroup" + + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/channel" ) type sqsMetadata struct { diff --git a/cmd/aws/lambda/validate/main.go b/cmd/aws/lambda/validate/main.go index 760aeefc..c9106462 100644 --- a/cmd/aws/lambda/validate/main.go +++ b/cmd/aws/lambda/validate/main.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-lambda-go/lambda" - "github.com/brexhq/substation" + "github.com/brexhq/substation/v2" ) func main() { diff --git a/cmd/aws/lambda/validate/main_test.go b/cmd/aws/lambda/validate/main_test.go index 85ac3590..8befa72b 100644 --- a/cmd/aws/lambda/validate/main_test.go +++ b/cmd/aws/lambda/validate/main_test.go @@ -6,7 +6,7 @@ import ( "encoding/json" "testing" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) var testCfgs = []struct { @@ -25,7 +25,7 @@ var testCfgs = []struct { ] } `), - errors.ErrInvalidFactoryInput, + iconfig.ErrInvalidFactoryInput, }, { "invalid processor settings", @@ -38,7 +38,7 @@ var testCfgs = []struct { ] } `), - errors.ErrInvalidOption, + iconfig.ErrInvalidOption, }, { "valid config", diff --git a/cmd/development/kinesis-tap/substation/config.jsonnet b/cmd/development/kinesis-tap/substation/config.jsonnet deleted file mode 100644 index 368392a0..00000000 --- a/cmd/development/kinesis-tap/substation/config.jsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/cmd/development/benchmark/substation/main.go b/cmd/development/substation-bench/main.go similarity index 95% rename from cmd/development/benchmark/substation/main.go rename to cmd/development/substation-bench/main.go index a1e58594..6b91d32b 100644 --- a/cmd/development/benchmark/substation/main.go +++ b/cmd/development/substation-bench/main.go @@ -14,11 +14,12 @@ import ( "golang.org/x/sync/errgroup" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/bufio" + "github.com/brexhq/substation/v2/internal/channel" + "github.com/brexhq/substation/v2/internal/file" ) type options struct { diff --git a/examples/cmd/client/file/substation/main.go b/cmd/development/substation-file/main.go similarity index 91% rename from examples/cmd/client/file/substation/main.go rename to cmd/development/substation-file/main.go index 7e3777d3..0aa5f069 100644 --- a/examples/cmd/client/file/substation/main.go +++ b/cmd/development/substation-file/main.go @@ -8,18 +8,17 @@ import ( "fmt" "io" "os" - "runtime" "slices" "time" "golang.org/x/sync/errgroup" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/bufio" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/internal/bufio" + "github.com/brexhq/substation/v2/internal/channel" + "github.com/brexhq/substation/v2/internal/file" + "github.com/brexhq/substation/v2/internal/media" + "github.com/brexhq/substation/v2/message" ) type options struct { @@ -87,7 +86,7 @@ func run(ctx context.Context, opts options) error { group.Go(func() error { tfGroup, tfCtx := errgroup.WithContext(ctx) - tfGroup.SetLimit(runtime.NumCPU()) + tfGroup.SetLimit(1) for message := range ch.Recv() { select { diff --git a/cmd/development/kinesis-tap/substation/README.md b/cmd/development/substation-kinesis-tap/README.md similarity index 73% rename from cmd/development/kinesis-tap/substation/README.md rename to cmd/development/substation-kinesis-tap/README.md index bd33d8c8..858592bc 100644 --- a/cmd/development/kinesis-tap/substation/README.md +++ b/cmd/development/substation-kinesis-tap/README.md @@ -1,6 +1,6 @@ -# kinesis-tap/substation +# substation-kinesis-tap -`kinesis-tap` is a tool for tapping into and transforming data from an AWS Kinesis Data Stream in real-time with Substation. +`substation-kinesis-tap` is a tool for tapping into and transforming data from an AWS Kinesis Data Stream in real-time with Substation. This is intended as a Substation development aid, but it has other uses as well, such as: - Previewing live data in a stream by printing it to the console (default behavior) @@ -12,7 +12,7 @@ Warning: This is a development tool intended to provide temporary access to live ## Usage ``` -% ./substation -h +% ./substation-kinesis-tap -h Usage of ./substation: -config string The Substation configuration file used to transform records (default "./config.json") @@ -24,7 +24,7 @@ Usage of ./substation: Use the `SUBSTATION_DEBUG=1` environment variable to enable debug logging: ``` -% SUBSTATION_DEBUG=1 ./substation -stream-name my-stream +% SUBSTATION_DEBUG=1 ./substation-kinesis-tap -stream-name my-stream DEBU[0000] Retrieved active shards from Kinesis stream. count=2 stream=my-stream DEBU[0001] Retrieved records from Kinesis shard. count=981 shard=0x140004a6f80 stream=my-stream DEBU[0002] Retrieved records from Kinesis shard. count=1055 shard=0x140004a6fe0 stream=my-stream @@ -32,24 +32,24 @@ DEBU[0003] Retrieved records from Kinesis shard. count=2333 shard=0x1400 DEBU[0003] Retrieved records from Kinesis shard. count=1110 shard=0x140004a6fe0 stream=my-stream DEBU[0004] Retrieved records from Kinesis shard. count=2109 shard=0x140004a6f80 stream=my-stream DEBU[0004] Retrieved records from Kinesis shard. count=1094 shard=0x140004a6fe0 stream=my-stream -^CDEBU[0004] Closed connections to the Kinesis stream. -DEBU[0004] Closed Substation pipeline. +^CDEBU[0004] Closed connections to the Kinesis stream. +DEBU[0004] Closed Substation pipeline. DEBU[0004] Flushed Substation pipeline. ``` ## Build -Download, configure, and build the `kinesis-tap` binary with these commands: +Download, configure, and build the `substation-kinesis-tap` binary with these commands: ``` git clone https://github.com/brexhq/substation.git && \ -cd substation/cmd/development/kinesis-tap/substation && \ +cd substation/cmd/development/substation-kinesis-tap && \ jsonnet config.jsonnet > config.json && \ go build . ``` ## Authentication -`kinesis-tap` uses the AWS SDK for Go to authenticate with AWS. The SDK uses the same authentication methods as the AWS CLI, so you can use the same environment variables or configuration files to authenticate. +`substation-kinesis-tap` uses the AWS SDK for Go to authenticate with AWS. The SDK uses the same authentication methods as the AWS CLI, so you can use the same environment variables or configuration files to authenticate. For more information, see the [AWS CLI documentation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). diff --git a/cmd/development/substation-kinesis-tap/config.jsonnet b/cmd/development/substation-kinesis-tap/config.jsonnet new file mode 100644 index 00000000..94c97c51 --- /dev/null +++ b/cmd/development/substation-kinesis-tap/config.jsonnet @@ -0,0 +1,7 @@ +local sub = import '../../../substation.libsonnet'; + +{ + transforms: [ + sub.tf.send.stdout(), + ], +} diff --git a/cmd/development/kinesis-tap/substation/main.go b/cmd/development/substation-kinesis-tap/main.go similarity index 80% rename from cmd/development/kinesis-tap/substation/main.go rename to cmd/development/substation-kinesis-tap/main.go index 5425c403..93e5cbec 100644 --- a/cmd/development/kinesis-tap/substation/main.go +++ b/cmd/development/substation-kinesis-tap/main.go @@ -14,17 +14,19 @@ import ( "syscall" "time" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/awslabs/kinesis-aggregation/go/v2/deaggregator" "golang.org/x/sync/errgroup" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/awslabs/kinesis-aggregation/go/deaggregator" - "github.com/brexhq/substation" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/kinesis" - "github.com/brexhq/substation/internal/channel" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/internal/log" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/channel" + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" + "github.com/brexhq/substation/v2/internal/log" ) type options struct { @@ -141,15 +143,21 @@ func run(ctx context.Context, opts options) error { // The AWS client is configured using environment variables // or the default credentials file. - client := kinesis.API{} - client.Setup(aws.Config{}) + awsCfg, err := iconfig.NewAWS(ctx, iconfig.AWS{}) + if err != nil { + return err + } - res, err := client.ListShards(ctx, opts.StreamName) + client := kinesis.NewFromConfig(awsCfg) + + resp, err := client.ListShards(ctx, &kinesis.ListShardsInput{ + StreamName: &opts.StreamName, + }) if err != nil { return err } - log.WithField("stream", opts.StreamName).WithField("count", len(res.Shards)).Debug("Retrieved active shards from Kinesis stream.") + log.WithField("stream", opts.StreamName).WithField("count", len(resp.Shards)).Debug("Retrieved active shards from Kinesis stream.") var iType string switch opts.StreamOffset { @@ -177,8 +185,12 @@ func run(ctx context.Context, opts options) error { // // Each shard is paginated until the end of the shard is // reached or the context is cancelled. - for _, shard := range res.Shards { - iterator, err := client.GetShardIterator(ctx, opts.StreamName, *shard.ShardId, iType) + for _, shard := range resp.Shards { + iterator, err := client.GetShardIterator(ctx, &kinesis.GetShardIteratorInput{ + StreamName: &opts.StreamName, + ShardId: shard.ShardId, + ShardIteratorType: types.ShardIteratorType(iType), + }) if err != nil { return err } @@ -197,25 +209,27 @@ func run(ctx context.Context, opts options) error { // per shard, so this loop is designed to not overload // the API in case other consumers are reading from the // same shard. - res, err := client.GetRecords(recvCtx, shardIterator) + resp, err := client.GetRecords(recvCtx, &kinesis.GetRecordsInput{ + ShardIterator: &shardIterator, + }) if err != nil { return err } - if res.NextShardIterator == nil { + if resp.NextShardIterator == nil { log.WithField("stream", opts.StreamName).WithField("shard", shard.ShardId).Debug("Reached end of Kinesis shard.") break } - shardIterator = *res.NextShardIterator + shardIterator = *resp.NextShardIterator - if len(res.Records) == 0 { + if len(resp.Records) == 0 { time.Sleep(500 * time.Millisecond) continue } - deagg, err := deaggregator.DeaggregateRecords(res.Records) + deagg, err := deaggregator.DeaggregateRecords(resp.Records) if err != nil { return err } diff --git a/condition/condition.go b/condition/condition.go index d629758a..d3826c36 100644 --- a/condition/condition.go +++ b/condition/condition.go @@ -1,43 +1,33 @@ -// Package condition provides functions for evaluating data. package condition import ( "context" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// errOperatorMissingInspectors is returned when an Operator that requires -// inspectors is created with no inspectors. -var errOperatorMissingInspectors = fmt.Errorf("missing inspectors") + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" -type Config struct { - Operator string `json:"operator"` - Inspectors []config.Config `json:"inspectors"` -} + iconfig "github.com/brexhq/substation/v2/internal/config" +) -type inspector interface { - Inspect(context.Context, *message.Message) (bool, error) +type Conditioner interface { + Condition(context.Context, *message.Message) (bool, error) } -// newInspector returns a configured Inspector from an Inspector configuration. -func newInspector(ctx context.Context, cfg config.Config) (inspector, error) { //nolint: cyclop, gocyclo // ignore cyclomatic complexity +func New(ctx context.Context, cfg config.Config) (Conditioner, error) { //nolint: cyclop, gocyclo // ignore cyclomatic complexity switch cfg.Type { + // Meta inspectors. + case "all", "meta_all": + return newMetaAll(ctx, cfg) + case "any", "meta_any": + return newMetaAny(ctx, cfg) + case "none", "meta_none": + return newMetaNone(ctx, cfg) // Format inspectors. case "format_mime": return newFormatMIME(ctx, cfg) case "format_json": return newFormatJSON(ctx, cfg) - // Meta inspectors. - case "meta_condition": - return newMetaCondition(ctx, cfg) - case "meta_for_each": - return newMetaForEach(ctx, cfg) - case "meta_negate": - return newMetaNegate(ctx, cfg) // Network inspectors. case "network_ip_global_unicast": return newNetworkIPGlobalUnicast(ctx, cfg) @@ -97,138 +87,6 @@ func newInspector(ctx context.Context, cfg config.Config) (inspector, error) { / case "utility_random": return newUtilityRandom(ctx, cfg) default: - return nil, fmt.Errorf("condition: new_inspector: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) - } -} - -func newInspectors(ctx context.Context, conf ...config.Config) ([]inspector, error) { - var inspectors []inspector - for _, c := range conf { - insp, err := newInspector(ctx, c) - if err != nil { - return nil, err - } - inspectors = append(inspectors, insp) - } - return inspectors, nil -} - -type Operator interface { - Operate(context.Context, *message.Message) (bool, error) -} - -// New returns a configured Operator from an Operator configuration. -func New(ctx context.Context, cfg Config) (Operator, error) { - inspectors, err := newInspectors(ctx, cfg.Inspectors...) - if err != nil { - return nil, err - } - - switch cfg.Operator { - case "all": - return &opAll{inspectors}, nil - case "any": - return &opAny{inspectors}, nil - case "none": - return &opNone{inspectors}, nil - default: - return &opEmpty{}, nil - } -} - -type opAll struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if all inspectors return true, otherwise it returns false. -func (o *opAll) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return false if any check fails - if !ok { - return false, nil - } + return nil, fmt.Errorf("condition %s: %w", cfg.Type, iconfig.ErrInvalidFactoryInput) } - - // return tue if all checks pass - return true, nil -} - -type opAny struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if any inspectors return true, otherwise it returns false. -func (o *opAny) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return true if any check passes - if ok { - return true, nil - } - } - - // return false if all checks fail - return false, nil -} - -type opNone struct { - Inspectors []inspector `json:"inspectors"` -} - -// Operate returns true if all inspectors return false, otherwise it returns true. -func (o *opNone) Operate(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - if len(o.Inspectors) == 0 { - return false, fmt.Errorf("condition: operate: inspectors %+v: %v", o, errOperatorMissingInspectors) - } - - for _, i := range o.Inspectors { - ok, err := i.Inspect(ctx, msg) - if err != nil { - return false, err - } - - // return false if any check passes - if ok { - return false, nil - } - } - - // return true if all checks fail - return true, nil -} - -type opEmpty struct{} - -// Operate always returns true. -func (o *opEmpty) Operate(ctx context.Context, msg *message.Message) (bool, error) { - return true, nil } diff --git a/condition/condition_example_test.go b/condition/condition_example_test.go deleted file mode 100644 index ac89581f..00000000 --- a/condition/condition_example_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package condition_test - -import ( - "context" - "fmt" - - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -func ExampleOperator() { - ctx := context.TODO() - - // Multiple inspectors can be chained together with an operator. - // This example uses the "all" operator, which requires all inspectors to - // return true for the operator to return true. - cfg := condition.Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "number_length_less_than", - Settings: map[string]interface{}{ - "value": 10, - }, - }, - { - Type: "string_contains", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - }, - } - - // Operators are retrieved from the factory and - // applied to a message. - op, err := condition.New(ctx, cfg) - if err != nil { - // handle err - panic(err) - } - - msg := message.New().SetData([]byte("fizzy")) - if err != nil { - // handle err - panic(err) - } - - ok, err := op.Operate(ctx, msg) - if err != nil { - // handle err - panic(err) - } - - // Output: true - fmt.Println(ok) -} diff --git a/condition/condition_test.go b/condition/condition_test.go deleted file mode 100644 index cfe88b36..00000000 --- a/condition/condition_test.go +++ /dev/null @@ -1,505 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var allTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "format_mime", - []config.Config{ - { - Type: "format_mime", - Settings: map[string]interface{}{ - "type": "application/x-gzip", - }, - }, - }, - []byte{80, 75, 3, 4}, - false, - }, - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - }, - []byte("foo"), - true, - }, - { - "pattern", - []config.Config{ - { - Type: "string_match", - Settings: map[string]interface{}{ - "pattern": "^foo$", - }, - }, - }, - []byte("foo"), - true, - }, - { - "content", - []config.Config{ - { - Type: "format_mime", - Settings: map[string]interface{}{ - "type": "application/x-gzip", - }, - }, - }, - []byte{80, 75, 3, 4}, - false, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - []byte("foo"), - true, - }, - { - "string length", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - []byte("foo"), - true, - }, - // this test joins multiple ANY operators with an ALL operator, implementing the following logic: - // if ( "foo" starts with "f" OR "foo" ends with "b" ) AND ( len("foo") == 3 ) then return true - { - "condition", - []config.Config{ - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []config.Config{ - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - { - Type: "string_ends_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - }, - }, - }, - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestAll(t *testing.T) { - ctx := context.TODO() - - for _, test := range allTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - cfg := Config{ - Operator: "all", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - }) - } -} - -func benchmarkAll(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opAll{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkAll(b *testing.B) { - for _, test := range allTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkAll(b, test.conf, message) - }, - ) - } -} - -var anyTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "baz", - }, - }, - }, - []byte("foo"), - true, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 5, - }, - }, - }, - []byte("foo"), - true, - }, - { - "string length", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - }, - []byte("foo"), - true, - }, - // this test joins multiple ALL operators with an ANY operator, implementing the following logic: - // if ( len("foo") == 4 AND "foo" starts with "f" ) OR ( len("foo") == 3 ) then return true - { - "condition", - []config.Config{ - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 4, - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "f", - }, - }, - }, - }, - }, - }, - { - Type: "meta_condition", - Settings: map[string]interface{}{ - "condition": map[string]interface{}{ - "operator": "all", - "inspectors": []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "value": 3, - }, - }, - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestAny(t *testing.T) { - ctx := context.TODO() - - for _, test := range anyTests { - message := message.New().SetData(test.test) - - cfg := Config{ - Operator: "any", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - } -} - -func benchmarkAny(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opAny{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkAny(b *testing.B) { - for _, test := range anyTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkAny(b, test.conf, message) - }, - ) - } -} - -var noneTests = []struct { - name string - conf []config.Config - test []byte - expected bool -}{ - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "baz", - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - []byte("foo"), - true, - }, - { - "string", - []config.Config{ - { - Type: "string_equal_to", - Settings: map[string]interface{}{ - "value": "foo", - }, - }, - { - Type: "string_starts_with", - Settings: map[string]interface{}{ - "value": "b", - }, - }, - }, - []byte("foo"), - false, - }, - { - "length", - []config.Config{ - { - Type: "number_length_equal_to", - Settings: map[string]interface{}{ - "type": "equals", - "value": 0, - }, - }, - { - Type: "meta_negate", - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "value": "f", - }, - }, - }, - }, - }, - []byte("foo"), - true, - }, -} - -func TestNone(t *testing.T) { - ctx := context.TODO() - - for _, test := range noneTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.test) - cfg := Config{ - Operator: "none", - Inspectors: test.conf, - } - - op, err := New(ctx, cfg) - if err != nil { - t.Error(err) - } - - ok, err := op.Operate(ctx, message) - if err != nil { - t.Error(err) - } - - if ok != test.expected { - t.Errorf("expected %v, got %v", test.expected, ok) - } - }) - } -} - -func benchmarkNone(b *testing.B, conf []config.Config, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - inspectors, _ := newInspectors(ctx, conf...) - op := opNone{inspectors} - _, _ = op.Operate(ctx, message) - } -} - -func BenchmarkNone(b *testing.B) { - for _, test := range noneTests { - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkNone(b, test.conf, message) - }, - ) - } -} - -func TestNewInspector(t *testing.T) { - for _, test := range allTests { - _, err := newInspector(context.TODO(), test.conf[0]) - if err != nil { - t.Error(err) - } - } -} - -func benchmarknewInspector(b *testing.B, conf config.Config) { - for i := 0; i < b.N; i++ { - _, _ = newInspector(context.TODO(), conf) - } -} - -func BenchmarkNewInspector(b *testing.B) { - for _, test := range allTests { - b.Run(test.name, - func(b *testing.B) { - benchmarknewInspector(b, test.conf[0]) - }, - ) - } -} diff --git a/condition/format_json.go b/condition/format_json.go index 7105a604..83b6bb18 100644 --- a/condition/format_json.go +++ b/condition/format_json.go @@ -4,9 +4,10 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" ) type formatJSONConfig struct{} @@ -32,7 +33,7 @@ type formatJSON struct { conf formatJSONConfig } -func (c *formatJSON) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (c *formatJSON) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/format_json_test.go b/condition/format_json_test.go index cd7603e6..28c8f314 100644 --- a/condition/format_json_test.go +++ b/condition/format_json_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &formatJSON{} +var _ Conditioner = &formatJSON{} var jsonValidTests = []struct { name string @@ -53,7 +53,7 @@ func TestFormatJSON(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -68,7 +68,7 @@ func TestFormatJSON(t *testing.T) { func benchmarkFormatJSONByte(b *testing.B, insp *formatJSON, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/format_mime.go b/condition/format_mime.go index f5889a41..06f38cd1 100644 --- a/condition/format_mime.go +++ b/condition/format_mime.go @@ -5,11 +5,11 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/media" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/media" ) type formatMIMEConfig struct { @@ -25,7 +25,7 @@ func (c *formatMIMEConfig) Decode(in interface{}) error { func (c *formatMIMEConfig) Validate() error { if c.Type == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("type: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -52,7 +52,7 @@ type formatMIME struct { conf formatMIMEConfig } -func (c *formatMIME) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (c *formatMIME) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/format_mime_test.go b/condition/format_mime_test.go index a33dfd7f..104fd7ca 100644 --- a/condition/format_mime_test.go +++ b/condition/format_mime_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &formatMIME{} +var _ Conditioner = &formatMIME{} var formatMIMETests = []struct { name string @@ -87,7 +87,7 @@ func TestFormatMIME(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestFormatMIME(t *testing.T) { func benchmarkFormatMIME(b *testing.B, insp *formatMIME, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/meta.go b/condition/meta.go new file mode 100644 index 00000000..05791f4a --- /dev/null +++ b/condition/meta.go @@ -0,0 +1,26 @@ +package condition + +import ( + "fmt" + + "github.com/brexhq/substation/v2/config" + iconfig "github.com/brexhq/substation/v2/internal/config" +) + +type metaConfig struct { + Conditions []config.Config `json:"conditions"` + + Object iconfig.Object `json:"object"` +} + +func (c *metaConfig) Decode(in interface{}) error { + return iconfig.Decode(in, c) +} + +func (c *metaConfig) Validate() error { + if len(c.Conditions) == 0 { + return fmt.Errorf("conditions: %v", iconfig.ErrMissingRequiredOption) + } + + return nil +} diff --git a/condition/meta_all.go b/condition/meta_all.go new file mode 100644 index 00000000..29d02ac3 --- /dev/null +++ b/condition/meta_all.go @@ -0,0 +1,93 @@ +package condition + +import ( + "context" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +func newMetaAll(ctx context.Context, cfg config.Config) (*metaAll, error) { + conf := metaConfig{} + if err := conf.Decode(cfg.Settings); err != nil { + return nil, err + } + + cnd := metaAll{ + conf: conf, + } + + cnd.cnds = make([]Conditioner, len(conf.Conditions)) + for i, c := range conf.Conditions { + cond, err := New(ctx, c) + if err != nil { + return nil, err + } + + cnd.cnds[i] = cond + } + + return &cnd, nil +} + +type metaAll struct { + conf metaConfig + + cnds []Conditioner +} + +func (c *metaAll) Condition(ctx context.Context, msg *message.Message) (bool, error) { + if c.conf.Object.SourceKey == "" { + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, msg) + if err != nil { + return false, err + } + + if !ok { + return false, nil + } + } + + return true, nil + } + + value := msg.GetValue(c.conf.Object.SourceKey) + if !value.Exists() { + return false, nil + } + + if !value.IsArray() { + m := message.New().SetData(value.Bytes()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + if !ok { + return false, nil + } + } + + return true, nil + } + + for _, v := range value.Array() { + m := message.New().SetData(v.Bytes()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + // If any of the values in the array do not match, then this returns false. + if !ok { + return false, nil + } + } + } + + // At this point every value in the array matched, so this returns true. + return true, nil +} diff --git a/condition/meta_all_test.go b/condition/meta_all_test.go new file mode 100644 index 00000000..d636cb6f --- /dev/null +++ b/condition/meta_all_test.go @@ -0,0 +1,179 @@ +package condition + +import ( + "context" + "testing" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +var _ Conditioner = &metaAll{} + +var metaAllTests = []struct { + name string + cfg config.Config + data []byte + expected bool +}{ + { + "data", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte("a"), + true, + }, + { + "object", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`{"z":"a"}`), + true, + }, + // In this test the data is interpreted as a JSON array, as specified + // by the source_key. This test fails because not every element in the + // array contains "a". + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`["a","a","b"]`), + false, + }, + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`["a","a","a"]`), + true, + }, + { + "object_array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`{"z":["a","a","a"]}`), + true, + }, + // This test passes because both inspectors match the input. + { + "object_mixed", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + // This inspector passes because the elements in the array contains "a". + { + Type: "all", + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "pattern": "a", + }, + }, + }, + }, + }, + // This inspector passes because the data matches the pattern "^{.*}$". + { + Type: "string_match", + Settings: map[string]interface{}{ + "pattern": "^{.*}$", + }, + }, + }, + }, + }, + []byte(`{"z":["a","a","a"]}`), + true, + }, +} + +func TestAllCondition(t *testing.T) { + ctx := context.TODO() + + for _, test := range metaAllTests { + t.Run(test.name, func(t *testing.T) { + message := message.New().SetData(test.data) + + insp, err := newMetaAll(ctx, test.cfg) + if err != nil { + t.Fatal(err) + } + + check, err := insp.Condition(ctx, message) + if err != nil { + t.Error(err) + } + + if test.expected != check { + t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) + } + }) + } +} diff --git a/condition/meta_any.go b/condition/meta_any.go new file mode 100644 index 00000000..422bc3c6 --- /dev/null +++ b/condition/meta_any.go @@ -0,0 +1,92 @@ +package condition + +import ( + "context" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +func newMetaAny(ctx context.Context, cfg config.Config) (*metaAny, error) { + conf := metaConfig{} + if err := conf.Decode(cfg.Settings); err != nil { + return nil, err + } + + cnd := metaAny{ + conf: conf, + } + + cnd.cnds = make([]Conditioner, len(conf.Conditions)) + for i, c := range conf.Conditions { + cond, err := New(ctx, c) + if err != nil { + return nil, err + } + + cnd.cnds[i] = cond + } + + return &cnd, nil +} + +type metaAny struct { + conf metaConfig + + cnds []Conditioner +} + +func (c *metaAny) Condition(ctx context.Context, msg *message.Message) (bool, error) { + if c.conf.Object.SourceKey == "" { + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, msg) + if err != nil { + return false, err + } + + if ok { + return true, nil + } + } + + return false, nil + } + + value := msg.GetValue(c.conf.Object.SourceKey) + if !value.Exists() { + return false, nil + } + + if !value.IsArray() { + m := message.New().SetData(value.Bytes()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + if ok { + return true, nil + } + } + + return false, nil + } + + for _, v := range value.Array() { + m := message.New().SetData(v.Bytes()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + // If any of the values in the array match, then this returns true. + if ok { + return true, nil + } + } + } + + return false, nil +} diff --git a/condition/meta_any_test.go b/condition/meta_any_test.go new file mode 100644 index 00000000..9ed2f46b --- /dev/null +++ b/condition/meta_any_test.go @@ -0,0 +1,179 @@ +package condition + +import ( + "context" + "testing" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +var _ Conditioner = &metaAny{} + +var metaAnyTests = []struct { + name string + cfg config.Config + data []byte + expected bool +}{ + { + "data", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "c", + }, + }, + }, + }, + }, + []byte("abc"), + true, + }, + { + "object", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "c", + }, + }, + }, + }, + }, + []byte(`{"z":"abc"}`), + true, + }, + // In this test the data is interpreted as a JSON array, as specified + // by the source_key. This test passes because at least one element in + // the array contains "c". + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "c", + }, + }, + }, + }, + }, + []byte(`["a","b","c"]`), + true, + }, + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "d", + }, + }, + }, + }, + }, + []byte(`["a","b","c"]`), + false, + }, + { + "object_array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "c", + }, + }, + }, + }, + }, + []byte(`{"z":["a","b","c"]}`), + true, + }, + // This test passes because at least one inspector matches the input. + { + "object_mixed", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + // This inspector fails because no element in the array contains "d". + { + Type: "any", + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "pattern": "d", + }, + }, + }, + }, + }, + // This inspector passes because the data matches the pattern "^{.*}$". + { + Type: "string_match", + Settings: map[string]interface{}{ + "pattern": "^{.*}$", + }, + }, + }, + }, + }, + []byte(`{"z":["a","b","c"]}`), + true, + }, +} + +func TestAnyCondition(t *testing.T) { + ctx := context.TODO() + + for _, test := range metaAnyTests { + t.Run(test.name, func(t *testing.T) { + message := message.New().SetData(test.data) + + insp, err := newMetaAny(ctx, test.cfg) + if err != nil { + t.Fatal(err) + } + + check, err := insp.Condition(ctx, message) + if err != nil { + t.Error(err) + } + + if test.expected != check { + t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) + } + }) + } +} diff --git a/condition/meta_condition.go b/condition/meta_condition.go deleted file mode 100644 index d657d2ee..00000000 --- a/condition/meta_condition.go +++ /dev/null @@ -1,79 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type metaConditionConfig struct { - // Condition used to inspect the message. - Condition Config `json:"condition"` -} - -func (c *metaConditionConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaConditionConfig) Validate() error { - if c.Condition.Operator == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaCondition(ctx context.Context, cfg config.Config) (*metaCondition, error) { - conf := metaConditionConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - op, err := New(ctx, conf.Condition) - if err != nil { - return nil, err - } - - meta := metaCondition{ - conf: conf, - op: op, - } - - return &meta, nil -} - -type metaCondition struct { - conf metaConditionConfig - - op Operator -} - -func (c *metaCondition) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { - if msg.IsControl() { - return false, nil - } - - // This inspector does not directly interpret data, instead the - // message is passed through and each configured inspector - // applies its own data interpretation. - match, err := c.op.Operate(ctx, msg) - if err != nil { - return false, err - } - - return match, nil -} - -func (c *metaCondition) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/condition/meta_condition_test.go b/condition/meta_condition_test.go deleted file mode 100644 index e9d249b1..00000000 --- a/condition/meta_condition_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaCondition{} - -var metaConditionTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "condition": Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "string_contains", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "string": "bcd", - }, - }, - }, - }, - }, - }, - []byte(`{"a":"bcd"}`), - true, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "condition": Config{ - Operator: "all", - Inspectors: []config.Config{ - { - Type: "string_contains", - Settings: map[string]interface{}{ - "string": "bcd", - }, - }, - }, - }, - }, - }, - []byte("bcd"), - true, - }, -} - -func TestMetaCondition(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaConditionTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaCondition(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaCondition(b *testing.B, inspector *metaCondition, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = inspector.Inspect(ctx, message) - } -} - -func BenchmarkMetaCondition(b *testing.B) { - for _, test := range metaConditionTests { - insp, err := newMetaCondition(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaCondition(b, insp, message) - }, - ) - } -} diff --git a/condition/meta_err.go b/condition/meta_err.go deleted file mode 100644 index 7a753603..00000000 --- a/condition/meta_err.go +++ /dev/null @@ -1,107 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaErrConfig struct { - // Inspector used to inspect the message. If the inspector - // throws an error, this inspector will return false. - Inspector config.Config `json:"inspector"` - // ErrorMessages are regular expressions that match error messages and determine - // if the error should be caught. - // - // This is optional and defaults to an empty list (all errors are caught). - ErrorMessages []string `json:"error_messages"` -} - -func (c *metaErrConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaErrConfig) Validate() error { - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaErr(ctx context.Context, cfg config.Config) (*metaErr, error) { - conf := metaErrConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_err: %v", err) - } - - meta := metaErr{ - conf: conf, - insp: i, - } - - meta.errorMessages = make([]*regexp.Regexp, len(conf.ErrorMessages)) - for i, em := range conf.ErrorMessages { - re, err := regexp.Compile(em) - if err != nil { - return nil, fmt.Errorf("condition: meta_err: %v", err) - } - - meta.errorMessages[i] = re - } - - return &meta, nil -} - -type metaErr struct { - conf metaErrConfig - - insp inspector - errorMessages []*regexp.Regexp -} - -func (c *metaErr) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - match, err := c.insp.Inspect(ctx, msg) - if err != nil { - // Catch all errors. - if len(c.errorMessages) == 0 { - return false, nil - } - - // Catch specific errors. - for _, re := range c.errorMessages { - if re.MatchString(err.Error()) { - return false, nil - } - } - - return false, fmt.Errorf("condition: meta_err: %v", err) - } - - return match, nil -} - -func (c *metaErr) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/condition/meta_err_test.go b/condition/meta_err_test.go deleted file mode 100644 index d5ccc699..00000000 --- a/condition/meta_err_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaErr{} - -var metaErrTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "catch_all", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "catch_one", - config.Config{ - Settings: map[string]interface{}{ - "error_messages": []string{"input must be an array"}, - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "no_error", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "c", - }, - }, - "type": "any", - }, - "type": "meta_for_each", - }, - }, - }, - []byte(`{"a":["bcd"]}`), - true, - }, -} - -func TestMetaErr(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaErrTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaErr(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaErr(b *testing.B, insp *metaErr, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaErr(b *testing.B) { - for _, test := range metaErrTests { - insp, err := newMetaErr(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaErr(b, insp, message) - }, - ) - } -} diff --git a/condition/meta_for_each.go b/condition/meta_for_each.go deleted file mode 100644 index 433102b0..00000000 --- a/condition/meta_for_each.go +++ /dev/null @@ -1,141 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "golang.org/x/exp/slices" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaForEachConfig struct { - // Type determines the method of combining results from the inspector. - // - // Must be one of: - // - none: none of the elements match the inspector - // - any: at least one of the elements match the inspector - // - all: all of the elements match the inspector - Type string `json:"type"` - // Inspector applied to each element. - Inspector config.Config `json:"inspector"` - - Object iconfig.Object `json:"object"` -} - -func (c *metaForEachConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaForEachConfig) Validate() error { - if c.Type == "" { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) - } - - if !slices.Contains( - []string{ - "none", - "any", - "all", - }, - c.Type) { - return fmt.Errorf("type %q: %v", c.Type, errors.ErrInvalidOption) - } - - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaForEach(ctx context.Context, cfg config.Config) (*metaForEach, error) { - conf := metaForEachConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_for_each: %v", err) - } - - meta := metaForEach{ - conf: conf, - insp: i, - } - - return &meta, nil -} - -type metaForEach struct { - conf metaForEachConfig - - insp inspector -} - -func (c *metaForEach) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - // This is required to support JSON arrays and objects. - var value message.Value - if c.conf.Object.SourceKey == "" { - value = msg.GetValue("@this") - } else { - value = msg.GetValue(c.conf.Object.SourceKey) - } - - if !value.Exists() { - return false, nil - } - - if !value.IsArray() { - return false, fmt.Errorf("condition: meta_for_each: %v", "input must be an array") - } - - var results []bool - for _, res := range value.Array() { - data := []byte(res.String()) - msg := message.New().SetData(data) - - inspected, err := c.insp.Inspect(ctx, msg) - if err != nil { - return false, fmt.Errorf("condition: meta_for_each: %v", err) - } - results = append(results, inspected) - } - - total := len(results) - matched := 0 - for _, v := range results { - if v { - matched++ - } - } - - switch c.conf.Type { - case "any": - return matched > 0, nil - case "all": - return total == matched, nil - case "none": - return matched == 0, nil - } - - return false, nil -} - -func (c *metaForEach) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/condition/meta_for_each_test.go b/condition/meta_for_each_test.go deleted file mode 100644 index 509e4d88..00000000 --- a/condition/meta_for_each_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaForEach{} - -var metaForEachTests = []struct { - name string - cfg config.Config - test []byte - expected bool - err error -}{ - { - "string starts_with all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "string_starts_with", - "settings": map[string]interface{}{ - "string": "f", - }, - }, - }, - }, - []byte(`{"input":["foo","fizz","flop"]}`), - true, - nil, - }, - { - "ip private all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "network_ip_private", - }, - }, - }, - []byte(`{"input":["192.168.1.2","10.0.42.1","172.16.4.2"]}`), - true, - nil, - }, - { - "string_match", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "any", - "inspector": map[string]interface{}{ - "type": "string_match", - "settings": map[string]interface{}{ - "pattern": "^fizz$", - }, - }, - }, - }, - []byte(`{"input":["foo","fizz","flop"]}`), - true, - nil, - }, - { - "string greater_than", - config.Config{ - Settings: map[string]interface{}{ - "type": "any", - "inspector": map[string]interface{}{ - "type": "string_greater_than", - "settings": map[string]interface{}{ - "string": "0", - }, - }, - }, - }, - []byte(`[0,1,2]`), - true, - nil, - }, - { - "length none", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "none", - "inspector": map[string]interface{}{ - "type": "number_length_greater_than", - "settings": map[string]interface{}{ - "value": 7, - }, - }, - }, - }, - []byte(`{"input":["fooo","fizz","flop"]}`), - true, - nil, - }, - { - "length all", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "input", - }, - "type": "all", - "inspector": map[string]interface{}{ - "type": "number_length_equal_to", - "settings": map[string]interface{}{ - "value": 4, - }, - }, - }, - }, - []byte(`{"input":["fooo","fizz","flop"]}`), - true, - nil, - }, -} - -func TestMetaForEach(t *testing.T) { - ctx := context.TODO() - - for _, tt := range metaForEachTests { - t.Run(tt.name, func(t *testing.T) { - message := message.New().SetData(tt.test) - - insp, err := newMetaForEach(ctx, tt.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if tt.expected != check { - t.Errorf("expected %v, got %v, %v", tt.expected, check, string(tt.test)) - } - }) - } -} - -func benchmarkMetaForEach(b *testing.B, insp *metaForEach, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaForEach(b *testing.B) { - for _, test := range metaForEachTests { - insp, err := newMetaForEach(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.test) - benchmarkMetaForEach(b, insp, message) - }, - ) - } -} diff --git a/condition/meta_negate.go b/condition/meta_negate.go deleted file mode 100644 index c61ec60b..00000000 --- a/condition/meta_negate.go +++ /dev/null @@ -1,77 +0,0 @@ -package condition - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" -) - -type metaNegateConfig struct { - // Inspector used to inspect the message. - Inspector config.Config `json:"inspector"` -} - -func (c *metaNegateConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaNegateConfig) Validate() error { - if c.Inspector.Type == "" { - return fmt.Errorf("inspector: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newMetaNegate(ctx context.Context, cfg config.Config) (*metaNegate, error) { - conf := metaNegateConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, err - } - - if err := conf.Validate(); err != nil { - return nil, err - } - - i, err := newInspector(ctx, conf.Inspector) - if err != nil { - return nil, fmt.Errorf("condition: meta_negate: %v", err) - } - - meta := metaNegate{ - conf: conf, - insp: i, - } - - return &meta, nil -} - -type metaNegate struct { - conf metaNegateConfig - - insp inspector -} - -func (c *metaNegate) Inspect(ctx context.Context, msg *message.Message) (bool, error) { - if msg.IsControl() { - return false, nil - } - - match, err := c.insp.Inspect(ctx, msg) - if err != nil { - return false, err - } - - return !match, nil -} - -func (c *metaNegate) String() string { - b, _ := json.Marshal(c.conf) - return string(b) -} diff --git a/condition/meta_negate_test.go b/condition/meta_negate_test.go deleted file mode 100644 index 9c8451c9..00000000 --- a/condition/meta_negate_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package condition - -import ( - "context" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ inspector = &metaNegate{} - -var metaNegateTests = []struct { - name string - cfg config.Config - data []byte - expected bool -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "bcd", - }, - "type": "string_equal_to", - }, - }, - }, - []byte(`{"a":"bcd"}`), - false, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "inspector": map[string]interface{}{ - "type": "string_equal_to", - "settings": map[string]interface{}{ - "value": "bcd", - }, - }, - }, - }, - []byte(`bcd`), - false, - }, -} - -func TestMetaNegate(t *testing.T) { - ctx := context.TODO() - - for _, test := range metaNegateTests { - t.Run(test.name, func(t *testing.T) { - message := message.New().SetData(test.data) - - insp, err := newMetaNegate(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - check, err := insp.Inspect(ctx, message) - if err != nil { - t.Error(err) - } - - if test.expected != check { - t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) - } - }) - } -} - -func benchmarkMetaNegate(b *testing.B, insp *metaNegate, message *message.Message) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) - } -} - -func BenchmarkMetaNegate(b *testing.B) { - for _, test := range metaNegateTests { - insp, err := newMetaNegate(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - message := message.New().SetData(test.data) - benchmarkMetaNegate(b, insp, message) - }, - ) - } -} diff --git a/condition/meta_none.go b/condition/meta_none.go new file mode 100644 index 00000000..c6020c1c --- /dev/null +++ b/condition/meta_none.go @@ -0,0 +1,97 @@ +package condition + +import ( + "context" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +func newMetaNone(ctx context.Context, cfg config.Config) (*metaNone, error) { + conf := metaConfig{} + if err := conf.Decode(cfg.Settings); err != nil { + return nil, err + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + cnd := metaNone{ + conf: conf, + } + + cnd.cnds = make([]Conditioner, len(conf.Conditions)) + for i, c := range conf.Conditions { + cond, err := New(ctx, c) + if err != nil { + return nil, err + } + + cnd.cnds[i] = cond + } + + return &cnd, nil +} + +type metaNone struct { + conf metaConfig + + cnds []Conditioner +} + +func (c *metaNone) Condition(ctx context.Context, msg *message.Message) (bool, error) { + if c.conf.Object.SourceKey == "" { + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, msg) + if err != nil { + return false, err + } + + if ok { + return false, nil + } + } + + return true, nil + } + + value := msg.GetValue(c.conf.Object.SourceKey) + if !value.Exists() { + return false, nil + } + + if !value.IsArray() { + m := message.New().SetData(msg.Data()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + if ok { + return false, nil + } + } + + return true, nil + } + + for _, v := range value.Array() { + m := message.New().SetData(v.Bytes()).SetMetadata(msg.Metadata()) + for _, cnd := range c.cnds { + ok, err := cnd.Condition(ctx, m) + if err != nil { + return false, err + } + + // If any of the values in the array match, then this returns false. + if ok { + return false, nil + } + } + } + + // At this point every value in the array did not match, so this returns true. + return true, nil +} diff --git a/condition/meta_none_test.go b/condition/meta_none_test.go new file mode 100644 index 00000000..653431ec --- /dev/null +++ b/condition/meta_none_test.go @@ -0,0 +1,179 @@ +package condition + +import ( + "context" + "testing" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" +) + +var _ Conditioner = &metaNone{} + +var metaNoneTests = []struct { + name string + cfg config.Config + data []byte + expected bool +}{ + { + "data", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte("bcd"), + true, + }, + { + "object", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`{"z":"bcd"}`), + true, + }, + // In this test the data is interpreted as a JSON array, as specified + // by the source_key. This test passes because no elements in the array + // contain "d". + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "d", + }, + }, + }, + }, + }, + []byte(`["a","b","c"]`), + true, + }, + { + "array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "@this", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`["a","b","c"]`), + false, + }, + { + "object_array", + config.Config{ + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "value": "a", + }, + }, + }, + }, + }, + []byte(`{"z":["b","c","d"]}`), + true, + }, + // This test passes because both inspectors do not match the input. + { + "object_mixed", + config.Config{ + Settings: map[string]interface{}{ + "conditions": []config.Config{ + // This inspector fails because no elements in the array contain "d". + { + Type: "none", + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "z", + }, + "conditions": []config.Config{ + { + Type: "string_contains", + Settings: map[string]interface{}{ + "pattern": "d", + }, + }, + }, + }, + }, + // This inspector fails because the data does not match the pattern "^\\[.*\\]$". + { + Type: "string_match", + Settings: map[string]interface{}{ + "pattern": "^\\[.*\\]$", + }, + }, + }, + }, + }, + []byte(`{"z":["a","b","c"]}`), + true, + }, +} + +func TestNoneCondition(t *testing.T) { + ctx := context.TODO() + + for _, test := range metaNoneTests { + t.Run(test.name, func(t *testing.T) { + message := message.New().SetData(test.data) + + insp, err := newMetaNone(ctx, test.cfg) + if err != nil { + t.Fatal(err) + } + + check, err := insp.Condition(ctx, message) + if err != nil { + t.Error(err) + } + + if test.expected != check { + t.Errorf("expected %v, got %v, %v", test.expected, check, string(test.data)) + } + }) + } +} diff --git a/condition/network_ip_global_unicast.go b/condition/network_ip_global_unicast.go index 54a46e7a..018d75a9 100644 --- a/condition/network_ip_global_unicast.go +++ b/condition/network_ip_global_unicast.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPGlobalUnicast(_ context.Context, cfg config.Config) (*networkIPGlobalUnicast, error) { @@ -26,7 +26,7 @@ type networkIPGlobalUnicast struct { conf networkIPConfig } -func (insp *networkIPGlobalUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPGlobalUnicast) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_global_unicast_test.go b/condition/network_ip_global_unicast_test.go index 896ac873..71df8db8 100644 --- a/condition/network_ip_global_unicast_test.go +++ b/condition/network_ip_global_unicast_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPGlobalUnicast{} +var _ Conditioner = &networkIPGlobalUnicast{} var networkIPGlobalUnicastTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPGlobalUnicast(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPGlobalUnicast(t *testing.T) { func benchmarkNetworkIPGlobalUnicastByte(b *testing.B, insp *networkIPGlobalUnicast, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_link_local_multicast.go b/condition/network_ip_link_local_multicast.go index 6085cc84..c5a8267d 100644 --- a/condition/network_ip_link_local_multicast.go +++ b/condition/network_ip_link_local_multicast.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPLinkLocalMulticast(_ context.Context, cfg config.Config) (*networkIPLinkLocalMulticast, error) { @@ -26,7 +26,7 @@ type networkIPLinkLocalMulticast struct { conf networkIPConfig } -func (insp *networkIPLinkLocalMulticast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPLinkLocalMulticast) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_link_local_multicast_test.go b/condition/network_ip_link_local_multicast_test.go index 65bff208..a4de5109 100644 --- a/condition/network_ip_link_local_multicast_test.go +++ b/condition/network_ip_link_local_multicast_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPLinkLocalMulticast{} +var _ Conditioner = &networkIPLinkLocalMulticast{} var networkIPLinkLocalMulticastTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPLinkLocalMulticast(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPLinkLocalMulticast(t *testing.T) { func benchmarkNetworkIPLinkLocalMulticastByte(b *testing.B, insp *networkIPLinkLocalMulticast, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_link_local_unicast.go b/condition/network_ip_link_local_unicast.go index 2ead95c8..3a72c9a4 100644 --- a/condition/network_ip_link_local_unicast.go +++ b/condition/network_ip_link_local_unicast.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPLinkLocalUnicast(_ context.Context, cfg config.Config) (*networkIPLinkLocalUnicast, error) { @@ -26,7 +26,7 @@ type networkIPLinkLocalUnicast struct { conf networkIPConfig } -func (insp *networkIPLinkLocalUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPLinkLocalUnicast) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_link_local_unicast_test.go b/condition/network_ip_link_local_unicast_test.go index 46ee4ede..3b21ee4e 100644 --- a/condition/network_ip_link_local_unicast_test.go +++ b/condition/network_ip_link_local_unicast_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPLinkLocalUnicast{} +var _ Conditioner = &networkIPLinkLocalUnicast{} var networkIPLinkLocalUnicastTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPLinkLocalUnicast(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPLinkLocalUnicast(t *testing.T) { func benchmarkNetworkIPLinkLocalUnicastByte(b *testing.B, insp *networkIPLinkLocalUnicast, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_loopback.go b/condition/network_ip_loopback.go index d2a9fde0..77d48f7c 100644 --- a/condition/network_ip_loopback.go +++ b/condition/network_ip_loopback.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPLoopback(_ context.Context, cfg config.Config) (*networkIPLoopback, error) { @@ -26,7 +26,7 @@ type networkIPLoopback struct { conf networkIPConfig } -func (insp *networkIPLoopback) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPLoopback) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_loopback_test.go b/condition/network_ip_loopback_test.go index 8be780d4..cd07e6bc 100644 --- a/condition/network_ip_loopback_test.go +++ b/condition/network_ip_loopback_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPLoopback{} +var _ Conditioner = &networkIPLoopback{} var networkIPLoopbackTests = []struct { name string @@ -41,7 +41,7 @@ func TestNetworkIPLoopback(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -56,7 +56,7 @@ func TestNetworkIPLoopback(t *testing.T) { func benchmarkNetworkIPLoopbackByte(b *testing.B, insp *networkIPLoopback, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_multicast.go b/condition/network_ip_multicast.go index 12fb312c..dbc46c92 100644 --- a/condition/network_ip_multicast.go +++ b/condition/network_ip_multicast.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPMulticast(_ context.Context, cfg config.Config) (*networkIPMulticast, error) { @@ -26,7 +26,7 @@ type networkIPMulticast struct { conf networkIPConfig } -func (insp *networkIPMulticast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPMulticast) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_multicast_test.go b/condition/network_ip_multicast_test.go index a887f274..7c7c27fc 100644 --- a/condition/network_ip_multicast_test.go +++ b/condition/network_ip_multicast_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPMulticast{} +var _ Conditioner = &networkIPMulticast{} var networkIPMulticastTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPMulticast(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPMulticast(t *testing.T) { func benchmarkNetworkIPMulticastByte(b *testing.B, insp *networkIPMulticast, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_private.go b/condition/network_ip_private.go index 774c69ad..9cbda2b2 100644 --- a/condition/network_ip_private.go +++ b/condition/network_ip_private.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPPrivate(_ context.Context, cfg config.Config) (*networkIPPrivate, error) { @@ -26,7 +26,7 @@ type networkIPPrivate struct { conf networkIPConfig } -func (insp *networkIPPrivate) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPPrivate) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_private_test.go b/condition/network_ip_private_test.go index 2553618b..5d2e270e 100644 --- a/condition/network_ip_private_test.go +++ b/condition/network_ip_private_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPPrivate{} +var _ Conditioner = &networkIPPrivate{} var networkIPPrivateTests = []struct { name string @@ -47,7 +47,7 @@ func TestNetworkIPPrivate(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -62,7 +62,7 @@ func TestNetworkIPPrivate(t *testing.T) { func benchmarkNetworkIPPrivateByte(b *testing.B, insp *networkIPPrivate, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_unicast.go b/condition/network_ip_unicast.go index facd6aa0..9281846b 100644 --- a/condition/network_ip_unicast.go +++ b/condition/network_ip_unicast.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPUnicast(_ context.Context, cfg config.Config) (*networkIPUnicast, error) { @@ -26,7 +26,7 @@ type networkIPUnicast struct { conf networkIPConfig } -func (insp *networkIPUnicast) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPUnicast) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_unicast_test.go b/condition/network_ip_unicast_test.go index 73fe3c9d..5e35668b 100644 --- a/condition/network_ip_unicast_test.go +++ b/condition/network_ip_unicast_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPUnicast{} +var _ Conditioner = &networkIPUnicast{} var networkIPUnicastTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPUnicast(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPUnicast(t *testing.T) { func benchmarkNetworkIPUnicastByte(b *testing.B, insp *networkIPUnicast, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_unspecified.go b/condition/network_ip_unspecified.go index 331ea969..4071de05 100644 --- a/condition/network_ip_unspecified.go +++ b/condition/network_ip_unspecified.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPUnspecified(_ context.Context, cfg config.Config) (*networkIPUnspecified, error) { @@ -26,7 +26,7 @@ type networkIPUnspecified struct { conf networkIPConfig } -func (insp *networkIPUnspecified) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPUnspecified) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_unspecified_test.go b/condition/network_ip_unspecified_test.go index 06d0aea6..74272cad 100644 --- a/condition/network_ip_unspecified_test.go +++ b/condition/network_ip_unspecified_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPUnspecified{} +var _ Conditioner = &networkIPUnspecified{} var networkIPUnspecifiedTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPUnspecified(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPUnspecified(t *testing.T) { func benchmarkNetworkIPUnspecifiedByte(b *testing.B, insp *networkIPUnspecified, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/network_ip_valid.go b/condition/network_ip_valid.go index 1c231869..02224eab 100644 --- a/condition/network_ip_valid.go +++ b/condition/network_ip_valid.go @@ -5,8 +5,8 @@ import ( "encoding/json" "net" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkIPValid(_ context.Context, cfg config.Config) (*networkIPValid, error) { @@ -26,7 +26,7 @@ type networkIPValid struct { conf networkIPConfig } -func (insp *networkIPValid) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *networkIPValid) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/network_ip_valid_test.go b/condition/network_ip_valid_test.go index c063c813..378aa83e 100644 --- a/condition/network_ip_valid_test.go +++ b/condition/network_ip_valid_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &networkIPValid{} +var _ Conditioner = &networkIPValid{} var networkIPValidTests = []struct { name string @@ -35,7 +35,7 @@ func TestNetworkIPValid(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -50,7 +50,7 @@ func TestNetworkIPValid(t *testing.T) { func benchmarkNetworkIPValidByte(b *testing.B, insp *networkIPValid, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_bitwise_and.go b/condition/number_bitwise_and.go index 643e0008..03ef31ec 100644 --- a/condition/number_bitwise_and.go +++ b/condition/number_bitwise_and.go @@ -4,8 +4,8 @@ import ( "context" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberBitwiseAND(_ context.Context, cfg config.Config) (*numberBitwiseAND, error) { @@ -25,7 +25,7 @@ type numberBitwiseAND struct { conf numberBitwiseConfig } -func (insp *numberBitwiseAND) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberBitwiseAND) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_bitwise_and_test.go b/condition/number_bitwise_and_test.go index e6f2b219..35f7008f 100644 --- a/condition/number_bitwise_and_test.go +++ b/condition/number_bitwise_and_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberBitwiseAND{} +var _ Conditioner = &numberBitwiseAND{} var numberBitwiseANDTests = []struct { name string @@ -49,7 +49,7 @@ func TestNumberBitwiseAND(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -66,7 +66,7 @@ func TestNumberBitwiseAND(t *testing.T) { func benchmarkNumberBitwiseAND(b *testing.B, insp *numberBitwiseAND, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_bitwise_not.go b/condition/number_bitwise_not.go index 9c5372d9..475898b8 100644 --- a/condition/number_bitwise_not.go +++ b/condition/number_bitwise_not.go @@ -4,8 +4,8 @@ import ( "context" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberBitwiseNOT(_ context.Context, cfg config.Config) (*numberBitwiseNOT, error) { @@ -25,7 +25,7 @@ type numberBitwiseNOT struct { conf numberBitwiseConfig } -func (insp *numberBitwiseNOT) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberBitwiseNOT) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_bitwise_or.go b/condition/number_bitwise_or.go index 7abce320..2bd4e613 100644 --- a/condition/number_bitwise_or.go +++ b/condition/number_bitwise_or.go @@ -4,8 +4,8 @@ import ( "context" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberBitwiseOR(_ context.Context, cfg config.Config) (*numberBitwiseOR, error) { @@ -25,7 +25,7 @@ type numberBitwiseOR struct { conf numberBitwiseConfig } -func (insp *numberBitwiseOR) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberBitwiseOR) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_bitwise_or_test.go b/condition/number_bitwise_or_test.go index f6335edd..0880c9aa 100644 --- a/condition/number_bitwise_or_test.go +++ b/condition/number_bitwise_or_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberBitwiseOR{} +var _ Conditioner = &numberBitwiseOR{} var numberBitwiseORTests = []struct { name string @@ -39,7 +39,7 @@ func TestNumberBitwiseOR(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -56,7 +56,7 @@ func TestNumberBitwiseOR(t *testing.T) { func benchmarkNumberBitwiseOR(b *testing.B, insp *numberBitwiseOR, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_bitwise_xor.go b/condition/number_bitwise_xor.go index 7b4b1466..caabd0cf 100644 --- a/condition/number_bitwise_xor.go +++ b/condition/number_bitwise_xor.go @@ -4,8 +4,8 @@ import ( "context" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberBitwiseXOR(_ context.Context, cfg config.Config) (*numberBitwiseXOR, error) { @@ -25,7 +25,7 @@ type numberBitwiseXOR struct { conf numberBitwiseConfig } -func (insp *numberBitwiseXOR) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberBitwiseXOR) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_bitwise_xor_test.go b/condition/number_bitwise_xor_test.go index 07ad8693..8d2ebdfe 100644 --- a/condition/number_bitwise_xor_test.go +++ b/condition/number_bitwise_xor_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberBitwiseXOR{} +var _ Conditioner = &numberBitwiseXOR{} var numberBitwiseXORTests = []struct { name string @@ -39,7 +39,7 @@ func TestNumberBitwiseXOR(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -56,7 +56,7 @@ func TestNumberBitwiseXOR(t *testing.T) { func benchmarkNumberBitwiseXOR(b *testing.B, insp *numberBitwiseXOR, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_equal_to.go b/condition/number_equal_to.go index 340b40e0..d608969a 100644 --- a/condition/number_equal_to.go +++ b/condition/number_equal_to.go @@ -5,8 +5,8 @@ import ( "encoding/json" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberEqualTo(_ context.Context, cfg config.Config) (*numberEqualTo, error) { @@ -24,7 +24,7 @@ type numberEqualTo struct { conf numberConfig } -func (insp *numberEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberEqualTo) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_equal_to_test.go b/condition/number_equal_to_test.go index cafc1c92..3943a60c 100644 --- a/condition/number_equal_to_test.go +++ b/condition/number_equal_to_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberEqualTo{} +var _ Conditioner = &numberEqualTo{} var numberEqualToTests = []struct { name string @@ -147,7 +147,7 @@ func TestNumberEqualTo(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -164,7 +164,7 @@ func TestNumberEqualTo(t *testing.T) { func benchmarkNumberEqualTo(b *testing.B, insp *numberEqualTo, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_greater_than.go b/condition/number_greater_than.go index 27041c04..6c5405b7 100644 --- a/condition/number_greater_than.go +++ b/condition/number_greater_than.go @@ -5,8 +5,8 @@ import ( "encoding/json" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberGreaterThan(_ context.Context, cfg config.Config) (*numberGreaterThan, error) { @@ -26,7 +26,7 @@ type numberGreaterThan struct { conf numberConfig } -func (insp *numberGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberGreaterThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_greater_than_test.go b/condition/number_greater_than_test.go index f12c27e5..dd40ba2a 100644 --- a/condition/number_greater_than_test.go +++ b/condition/number_greater_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberGreaterThan{} +var _ Conditioner = &numberGreaterThan{} var numberGreaterThanTests = []struct { name string @@ -147,7 +147,7 @@ func TestNumberGreaterThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -164,7 +164,7 @@ func TestNumberGreaterThan(t *testing.T) { func benchmarkNumberGreaterThan(b *testing.B, insp *numberGreaterThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_length_equal_to.go b/condition/number_length_equal_to.go index 63c9059c..381d4106 100644 --- a/condition/number_length_equal_to.go +++ b/condition/number_length_equal_to.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberLengthEqualTo(_ context.Context, cfg config.Config) (*numberLengthEqualTo, error) { @@ -25,7 +25,7 @@ type numberLengthEqualTo struct { conf numberLengthConfig } -func (insp *numberLengthEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberLengthEqualTo) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_length_equal_to_test.go b/condition/number_length_equal_to_test.go index fc99b767..67fb1364 100644 --- a/condition/number_length_equal_to_test.go +++ b/condition/number_length_equal_to_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberLengthEqualTo{} +var _ Conditioner = &numberLengthEqualTo{} var numberLengthEqualToTests = []struct { name string @@ -75,7 +75,7 @@ func TestNumberLengthEqualTo(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestNumberLengthEqualTo(t *testing.T) { func benchmarkNumberLengthEqualTo(b *testing.B, insp *numberLengthEqualTo, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_length_greater_than.go b/condition/number_length_greater_than.go index 89abccd5..c17102cc 100644 --- a/condition/number_length_greater_than.go +++ b/condition/number_length_greater_than.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberLengthGreaterThan(_ context.Context, cfg config.Config) (*numberLengthGreaterThan, error) { @@ -25,7 +25,7 @@ type numberLengthGreaterThan struct { conf numberLengthConfig } -func (insp *numberLengthGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberLengthGreaterThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_length_greater_than_test.go b/condition/number_length_greater_than_test.go index 82a79575..251ea9e5 100644 --- a/condition/number_length_greater_than_test.go +++ b/condition/number_length_greater_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberLengthGreaterThan{} +var _ Conditioner = &numberLengthGreaterThan{} var numberLengthGreaterThanTests = []struct { name string @@ -75,7 +75,7 @@ func TestNumberLengthGreaterThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestNumberLengthGreaterThan(t *testing.T) { func benchmarkNumberLengthGreaterThan(b *testing.B, insp *numberLengthGreaterThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_length_less_than.go b/condition/number_length_less_than.go index 110b0581..7e0feab1 100644 --- a/condition/number_length_less_than.go +++ b/condition/number_length_less_than.go @@ -4,8 +4,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberLengthLessThan(_ context.Context, cfg config.Config) (*numberLengthLessThan, error) { @@ -25,7 +25,7 @@ type numberLengthLessThan struct { conf numberLengthConfig } -func (insp *numberLengthLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberLengthLessThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_length_less_than_test.go b/condition/number_length_less_than_test.go index 06ccbec7..a287ca35 100644 --- a/condition/number_length_less_than_test.go +++ b/condition/number_length_less_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberLengthLessThan{} +var _ Conditioner = &numberLengthLessThan{} var numberLengthLessThanTests = []struct { name string @@ -75,7 +75,7 @@ func TestNumberLengthLessThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestNumberLengthLessThan(t *testing.T) { func benchmarkNumberLengthLessThan(b *testing.B, insp *numberLengthLessThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/number_less_than.go b/condition/number_less_than.go index 527739af..6d1389e2 100644 --- a/condition/number_less_than.go +++ b/condition/number_less_than.go @@ -5,8 +5,8 @@ import ( "encoding/json" "strconv" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNumberLessThan(_ context.Context, cfg config.Config) (*numberLessThan, error) { @@ -24,7 +24,7 @@ type numberLessThan struct { conf numberConfig } -func (insp *numberLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *numberLessThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/number_less_than_test.go b/condition/number_less_than_test.go index 860b7a9e..7bffe8d7 100644 --- a/condition/number_less_than_test.go +++ b/condition/number_less_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &numberLessThan{} +var _ Conditioner = &numberLessThan{} var numberLessThanTests = []struct { name string @@ -147,7 +147,7 @@ func TestNumberLessThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -164,7 +164,7 @@ func TestNumberLessThan(t *testing.T) { func benchmarkNumberLessThan(b *testing.B, insp *numberLessThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string.go b/condition/string.go index ee516bc7..ab389998 100644 --- a/condition/string.go +++ b/condition/string.go @@ -1,7 +1,7 @@ package condition import ( - iconfig "github.com/brexhq/substation/internal/config" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringConfig struct { diff --git a/condition/string_contains.go b/condition/string_contains.go index 66d1f5fe..9950087f 100644 --- a/condition/string_contains.go +++ b/condition/string_contains.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringContains(_ context.Context, cfg config.Config) (*stringContains, error) { @@ -29,7 +29,7 @@ type stringContains struct { b []byte } -func (insp *stringContains) Inspect(ctx context.Context, msg *message.Message) (bool, error) { +func (insp *stringContains) Condition(ctx context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_contains_test.go b/condition/string_contains_test.go index 0e1ad4f8..a3f78c32 100644 --- a/condition/string_contains_test.go +++ b/condition/string_contains_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringContains{} +var _ Conditioner = &stringContains{} var stringContainsTests = []struct { name string @@ -50,7 +50,7 @@ func TestStringContains(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -65,7 +65,7 @@ func TestStringContains(t *testing.T) { func benchmarkStringContains(b *testing.B, insp *stringContains, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_ends_with.go b/condition/string_ends_with.go index fa80dcc7..a8ec76ca 100644 --- a/condition/string_ends_with.go +++ b/condition/string_ends_with.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringEndsWith(_ context.Context, cfg config.Config) (*stringEndsWith, error) { @@ -29,7 +29,7 @@ type stringEndsWith struct { b []byte } -func (insp *stringEndsWith) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringEndsWith) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_ends_with_test.go b/condition/string_ends_with_test.go index 3bff1aa8..a345f57e 100644 --- a/condition/string_ends_with_test.go +++ b/condition/string_ends_with_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringEndsWith{} +var _ Conditioner = &stringEndsWith{} var stringEndsWithTests = []struct { name string @@ -63,7 +63,7 @@ func TestStringEndsWith(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -78,7 +78,7 @@ func TestStringEndsWith(t *testing.T) { func benchmarkStringEndsWith(b *testing.B, insp *stringEndsWith, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_equal_to.go b/condition/string_equal_to.go index c7f9760f..b80d4210 100644 --- a/condition/string_equal_to.go +++ b/condition/string_equal_to.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringEqualTo(_ context.Context, cfg config.Config) (*stringEqualTo, error) { @@ -29,7 +29,7 @@ type stringEqualTo struct { b []byte } -func (insp *stringEqualTo) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringEqualTo) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_equal_to_test.go b/condition/string_equal_to_test.go index 5e37956b..24e20dab 100644 --- a/condition/string_equal_to_test.go +++ b/condition/string_equal_to_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringEqualTo{} +var _ Conditioner = &stringEqualTo{} var stringEqualToTests = []struct { name string @@ -87,7 +87,7 @@ func TestStringEqualTo(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -102,7 +102,7 @@ func TestStringEqualTo(t *testing.T) { func benchmarkStringEqualTo(b *testing.B, insp *stringEqualTo, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_greater_than.go b/condition/string_greater_than.go index afbe9334..38d7f2f4 100644 --- a/condition/string_greater_than.go +++ b/condition/string_greater_than.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringGreaterThan(_ context.Context, cfg config.Config) (*stringGreaterThan, error) { @@ -29,7 +29,7 @@ type stringGreaterThan struct { b []byte } -func (insp *stringGreaterThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringGreaterThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_greater_than_test.go b/condition/string_greater_than_test.go index 52f4117c..49fbf0f5 100644 --- a/condition/string_greater_than_test.go +++ b/condition/string_greater_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringGreaterThan{} +var _ Conditioner = &stringGreaterThan{} var stringGreaterThanTests = []struct { name string @@ -77,7 +77,7 @@ func TestStringGreaterThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestStringGreaterThan(t *testing.T) { func benchmarkStringGreaterThan(b *testing.B, insp *stringGreaterThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_less_than.go b/condition/string_less_than.go index 48d87b1e..2d4c5146 100644 --- a/condition/string_less_than.go +++ b/condition/string_less_than.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringLessThan(_ context.Context, cfg config.Config) (*stringLessThan, error) { @@ -29,7 +29,7 @@ type stringLessThan struct { b []byte } -func (insp *stringLessThan) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringLessThan) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_less_than_test.go b/condition/string_less_than_test.go index b77008ce..a41be617 100644 --- a/condition/string_less_than_test.go +++ b/condition/string_less_than_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringLessThan{} +var _ Conditioner = &stringLessThan{} var stringLessThanTests = []struct { name string @@ -77,7 +77,7 @@ func TestStringLessThan(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -92,7 +92,7 @@ func TestStringLessThan(t *testing.T) { func benchmarkStringLessThan(b *testing.B, insp *stringLessThan, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_match.go b/condition/string_match.go index 57510c84..2d17b00a 100644 --- a/condition/string_match.go +++ b/condition/string_match.go @@ -6,10 +6,10 @@ import ( "fmt" "regexp" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringMatchConfig struct { @@ -25,7 +25,7 @@ func (c *stringMatchConfig) Decode(in interface{}) error { func (c *stringMatchConfig) Validate() error { if c.Pattern == "" { - return fmt.Errorf("pattern: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("pattern: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -60,7 +60,7 @@ type stringMatch struct { re *regexp.Regexp } -func (insp *stringMatch) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringMatch) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_match_test.go b/condition/string_match_test.go index b74615bd..13424977 100644 --- a/condition/string_match_test.go +++ b/condition/string_match_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringMatch{} +var _ Conditioner = &stringMatch{} var stringMatchTests = []struct { name string @@ -49,7 +49,7 @@ func TestStringMatch(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -64,7 +64,7 @@ func TestStringMatch(t *testing.T) { func benchmarkStringMatchByte(b *testing.B, insp *stringMatch, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/string_starts_with.go b/condition/string_starts_with.go index 4b164aa0..0083940c 100644 --- a/condition/string_starts_with.go +++ b/condition/string_starts_with.go @@ -5,8 +5,8 @@ import ( "context" "encoding/json" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringStartsWith(_ context.Context, cfg config.Config) (*stringStartsWith, error) { @@ -29,7 +29,7 @@ type stringStartsWith struct { b []byte } -func (insp *stringStartsWith) Inspect(ctx context.Context, msg *message.Message) (output bool, err error) { +func (insp *stringStartsWith) Condition(ctx context.Context, msg *message.Message) (output bool, err error) { if msg.IsControl() { return false, nil } diff --git a/condition/string_starts_with_test.go b/condition/string_starts_with_test.go index e3e36e05..ff13e75f 100644 --- a/condition/string_starts_with_test.go +++ b/condition/string_starts_with_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) -var _ inspector = &stringStartsWith{} +var _ Conditioner = &stringStartsWith{} var stringStartsWithTests = []struct { name string @@ -63,7 +63,7 @@ func TestStringStartsWith(t *testing.T) { t.Fatal(err) } - check, err := insp.Inspect(ctx, message) + check, err := insp.Condition(ctx, message) if err != nil { t.Error(err) } @@ -78,7 +78,7 @@ func TestStringStartsWith(t *testing.T) { func benchmarkStringStartsWith(b *testing.B, insp *stringStartsWith, message *message.Message) { ctx := context.TODO() for i := 0; i < b.N; i++ { - _, _ = insp.Inspect(ctx, message) + _, _ = insp.Condition(ctx, message) } } diff --git a/condition/utility_random.go b/condition/utility_random.go index 2bbd01bb..01d0df49 100644 --- a/condition/utility_random.go +++ b/condition/utility_random.go @@ -6,9 +6,10 @@ import ( "math/rand" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type utilityRandomConfig struct{} @@ -37,7 +38,7 @@ type utilityRandom struct { r *rand.Rand } -func (insp *utilityRandom) Inspect(_ context.Context, msg *message.Message) (bool, error) { +func (insp *utilityRandom) Condition(_ context.Context, msg *message.Message) (bool, error) { if msg.IsControl() { return false, nil } diff --git a/examples/Makefile b/examples/Makefile deleted file mode 100644 index 3bba9824..00000000 --- a/examples/Makefile +++ /dev/null @@ -1,133 +0,0 @@ -SUBSTATION_DIR ?= $(shell git rev-parse --show-toplevel 2> /dev/null) -SUBSTATION_VERSION ?= $(shell git describe --tags --always --abbrev=0 2> /dev/null) -AWS_ACCOUNT_ID ?= $(shell aws sts get-caller-identity --query 'Account' --output text 2> /dev/null) -AWS_REGION ?= $(shell aws configure get region 2> /dev/null) - -FONT_RED := $(shell tput setaf 1) -FONT_RESET := $(shell tput sgr0) - -check: - @printf "$(FONT_RED)>> Checking Substation variables...$(FONT_RESET)\n" - -ifeq ("${SUBSTATION_DIR}","") - @echo "SUBSTATION_DIR variable is missing!" -else - @echo "SUBSTATION_DIR: ${SUBSTATION_DIR}" -endif - -ifeq ("${SUBSTATION_VERSION}","") - @echo "SUBSTATION_VERSION variable is missing!" -else - @echo "SUBSTATION_VERSION: ${SUBSTATION_VERSION}" -endif - - @printf "$(FONT_RED)>> Checking AWS variables...$(FONT_RESET)\n" - -ifeq ("${AWS_ACCOUNT_ID}","") - @echo "AWS_ACCOUNT_ID variable is missing!" -else - @echo "AWS_ACCOUNT_ID: ${AWS_ACCOUNT_ID}" -endif - -ifeq ("${AWS_REGION}","") - @echo "AWS_REGION variable is missing!" -else - @echo "AWS_REGION: ${AWS_REGION}" -endif - -.PHONY: build -build: - @$(MAKE) build-go - @$(MAKE) build-config - @$(MAKE) build-images - -build-config: - @printf "$(FONT_RED)>> Building configuration files...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && bash build/scripts/config/compile.sh - -build-go: - @printf "$(FONT_RED)>> Building Go binaries...$(FONT_RESET)\n" - @for file in $(shell find $(SUBSTATION_DIR) -name main.go); do \ - cd $$(dirname $$file) && go build; \ - done - -build-images: - @printf "$(FONT_RED)>> Building AppConfig extension...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && AWS_ARCHITECTURE=arm64 AWS_REGION=$(AWS_REGION) bash build/scripts/aws/lambda/get_appconfig_extension.sh - - @printf "$(FONT_RED)>> Building Docker images...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/substation/Dockerfile -t substation:latest-arm64 . && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/autoscale/Dockerfile -t autoscale:latest-arm64 . && \ - docker buildx build --platform linux/arm64 --build-arg ARCH=arm64 -f build/container/aws/lambda/validate/Dockerfile -t validate:latest-arm64 . - -deploy: - @$(MAKE) deploy-aws-init - @$(MAKE) deploy-aws-images - @$(MAKE) deploy-aws-infra - @$(MAKE) deploy-aws-config - @$(MAKE) deploy-aws-post-script - -deploy-aws-init: - @printf "$(FONT_RED)>> Initializing cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - - @cd $(EXAMPLE)/terraform && \ - terraform init && \ - terraform apply -auto-approve -compact-warnings \ - -target=module.kms \ - -target=module.ecr \ - -target=module.ecr_autoscale \ - -target=module.ecr_validate - -deploy-aws-images: - @printf "$(FONT_RED)>> Deploying images to AWS ECR with Docker...$(FONT_RESET)\n" - - @aws ecr get-login-password | docker login --username AWS --password-stdin $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com - -ifneq ("$(shell aws ecr describe-repositories --repository-names substation --output text 2> /dev/null)","") - @docker tag substation:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/substation:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/substation:$(SUBSTATION_VERSION) -endif - -ifneq ("$(shell aws ecr describe-repositories --repository-names validate --output text 2> /dev/null)","") - @docker tag validate:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/validate:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/validate:$(SUBSTATION_VERSION) -endif - -ifneq ("$(shell aws ecr describe-repositories --repository-names autoscale --output text 2> /dev/null)","") - @docker tag autoscale:latest-arm64 $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/autoscale:$(SUBSTATION_VERSION) - @docker push $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/autoscale:$(SUBSTATION_VERSION) -endif - -deploy-aws-infra: - @printf "$(FONT_RED)>> Deploying cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - @cd $(EXAMPLE)/terraform && terraform apply -auto-approve -compact-warnings - -deploy-aws-config: - @printf "$(FONT_RED)>> Deploying configurations to AppConfig with Python...$(FONT_RESET)\n" - @cd $(SUBSTATION_DIR) && SUBSTATION_CONFIG_DIRECTORY=examples/$(EXAMPLE) AWS_DEFAULT_REGION=$(AWS_REGION) AWS_APPCONFIG_APPLICATION_NAME=substation AWS_APPCONFIG_ENVIRONMENT=example AWS_APPCONFIG_DEPLOYMENT_STRATEGY=Instant python3 build/scripts/aws/appconfig/appconfig_upload.py - -deploy-aws-post-script: -ifneq ("$(wildcard $(EXAMPLE)/post_deploy.sh)","") - @printf "$(FONT_RED)>> Running post-deploy script...$(FONT_RESET)\n" - @bash $(EXAMPLE)/post_deploy.sh -endif - -destroy: - @printf "$(FONT_RED)>> Destroying configurations in AppConfig with Python...$(FONT_RESET)\n" - @for file in $(shell find $(EXAMPLE) -name config.jsonnet); do \ - AWS_DEFAULT_REGION=$(AWS_REGION) AWS_APPCONFIG_APPLICATION_NAME=substation AWS_APPCONFIG_PROFILE_NAME=$$(basename $$(dirname $$file)) python3 $(SUBSTATION_DIR)/build/scripts/aws/appconfig/appconfig_delete.py; \ - done - - @printf "$(FONT_RED)>> Destroying cloud infrastructure in AWS with Terraform...$(FONT_RESET)\n" - @cd $(EXAMPLE)/terraform && terraform destroy -auto-approve -compact-warnings - -quickstart: - @$(MAKE) build-go - @$(MAKE) build-config - - @printf "$(FONT_RED)>> Printing data file...$(FONT_RESET)\n" - @cat cmd/client/file/substation/data.json - - @printf "$(FONT_RED)>> Running Substation...$(FONT_RESET)\n" - @cd cmd/client/file/substation && ./substation -config config.json -file data.json diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index b239e340..00000000 --- a/examples/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# examples - -This directory contains examples of how to use Substation. - -## config - -Contains examples for [configuring conditions and transforms](build/config). - -## cmd - -Contains examples for [creating new applications](build/cmd). - -## terraform - -Contains examples for [deploying to AWS using Terraform](build/terraform/aws). diff --git a/examples/cmd/client/file/substation/config.jsonnet b/examples/cmd/client/file/substation/config.jsonnet deleted file mode 100644 index 46a0f7a1..00000000 --- a/examples/cmd/client/file/substation/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local event = import 'event.libsonnet'; -local send = import 'send.libsonnet'; - -{ - transforms: - event.transforms - + send.transforms, -} diff --git a/examples/cmd/client/file/substation/data.json b/examples/cmd/client/file/substation/data.json deleted file mode 100644 index 2393cd01..00000000 --- a/examples/cmd/client/file/substation/data.json +++ /dev/null @@ -1 +0,0 @@ -{"foo":"bar"} diff --git a/examples/cmd/client/file/substation/event.libsonnet b/examples/cmd/client/file/substation/event.libsonnet deleted file mode 100644 index 5d11708d..00000000 --- a/examples/cmd/client/file/substation/event.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -local match = sub.cnd.any( - sub.cnd.string.equal_to({ object: { source_key: 'foo' }, string: 'baz' }), -); - -local copy = sub.tf.object.copy({ object: { source_key: 'foo', target_key: 'bar' } },); - -{ - transforms: [ - sub.pattern.tf.conditional( - condition=match, transform=copy, - ), - sub.tf.object.insert({ object: { target_key: 'qux' }, value: 'quux' },), - ], -} diff --git a/examples/cmd/client/file/substation/send.libsonnet b/examples/cmd/client/file/substation/send.libsonnet deleted file mode 100644 index c80dfa72..00000000 --- a/examples/cmd/client/file/substation/send.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/cmd/development/benchmark/config.jsonnet b/examples/cmd/development/benchmark/config.jsonnet deleted file mode 100644 index e8880cd0..00000000 --- a/examples/cmd/development/benchmark/config.jsonnet +++ /dev/null @@ -1,6 +0,0 @@ -local event = import 'event.libsonnet'; - -{ - transforms: - event.transforms, -} diff --git a/examples/cmd/development/benchmark/data_large.json b/examples/cmd/development/benchmark/data_large.json deleted file mode 100644 index 036769fc..00000000 --- a/examples/cmd/development/benchmark/data_large.json +++ /dev/null @@ -1 +0,0 @@ -{"eventVersion":"1.05","userIdentity":{"type":"IAMUser","principalId":"AIDAJDPLRKLG7UEXAMPLE","arn":"arn:aws:iam::123456789012:user/Alice","accountId":"123456789012","accessKeyId":"AKIAIOSFODNN7EXAMPLE","userName":"Alice","sessionContext":{"attributes":{"mfaAuthenticated":"false","creationDate":"2023-09-23T12:45:30Z"}}},"eventTime":"2023-09-23T12:45:30Z","eventSource":"ec2.amazonaws.com","eventName":"RunInstances","awsRegion":"us-west-2","sourceIPAddress":"192.0.2.1","userAgent":"console.ec2.amazonaws.com","requestParameters":{"instanceType":"t2.micro","imageId":"ami-0abcdef1234567890","keyName":"myKeyPair","subnetId":"subnet-0abcdef1234567890","minCount":1,"maxCount":1,"securityGroupIds":["sg-0abcdef1234567890"],"ebsOptimized":false,"monitoring":{"enabled":false},"disableApiTermination":false,"instanceInitiatedShutdownBehavior":"stop","blockDeviceMapping":[{"deviceName":"/dev/sda1","ebs":{"volumeSize":30,"deleteOnTermination":true,"volumeType":"gp2"}}]},"responseElements":{"instancesSet":{"items":[{"instanceId":"i-0abcdef1234567890","currentState":{"code":0,"name":"pending"},"previousState":{"code":80,"name":"stopped"},"privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","publicDnsName":"","stateTransitionReason":"","amiLaunchIndex":0,"productCodes":[],"instanceType":"t2.micro","launchTime":"2023-09-23T12:45:30Z","placement":{"availabilityZone":"us-west-2a","groupName":"","tenancy":"default"},"monitoring":{"state":"disabled"},"subnetId":"subnet-0abcdef1234567890","vpcId":"vpc-0abcdef1234567890","privateIpAddress":"192.0.2.1","sourceDestCheck":true,"groupSet":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"architecture":"x86_64","rootDeviceType":"ebs","rootDeviceName":"/dev/sda1","blockDeviceMapping":[{"deviceName":"/dev/sda1","ebs":{"volumeId":"vol-0abcdef1234567890","status":"attaching","attachTime":"2023-09-23T12:45:30Z","deleteOnTermination":true}}],"virtualizationType":"hvm","clientToken":"","tags":[],"securityGroups":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"sourceDestCheck":true,"hypervisor":"xen","networkInterfaces":[{"networkInterfaceId":"eni-0abcdef1234567890","subnetId":"subnet-0abcdef1234567890","vpcId":"vpc-0abcdef1234567890","description":"Primary network interface","ownerId":"123456789012","status":"in-use","macAddress":"12:34:56:78:9a:bc","privateIpAddress":"192.0.2.1","privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","sourceDestCheck":true,"groups":[{"groupId":"sg-0abcdef1234567890","groupName":"my-sg"}],"attachment":{"attachmentId":"eni-attach-0abcdef1234567890","deviceIndex":0,"status":"attaching","attachTime":"2023-09-23T12:45:30Z","deleteOnTermination":true},"privateIpAddresses":[{"privateIpAddress":"192.0.2.1","privateDnsName":"ip-192-0-2-1.us-west-2.compute.internal","primary":true}]}],"ebsOptimized":false}]},"ownerId":"123456789012","reservationId":"r-0abcdef1234567890"}} diff --git a/examples/cmd/development/benchmark/data_small.json b/examples/cmd/development/benchmark/data_small.json deleted file mode 100644 index 71df0ed1..00000000 --- a/examples/cmd/development/benchmark/data_small.json +++ /dev/null @@ -1 +0,0 @@ -{"eventVersion":"1.05","userIdentity":{"type":"IAMUser","principalId":"AIDAJDPLRKLG7UEXAMPLE","arn":"arn:aws:iam::123456789012:user/Alice","accountId":"123456789012","accessKeyId":"AKIAIOSFODNN7EXAMPLE","userName":"Alice"},"eventTime":"2023-09-23T12:45:30Z","eventSource":"ec2.amazonaws.com","eventName":"StopInstances","awsRegion":"us-west-2","sourceIPAddress":"192.0.2.1","userAgent":"console.ec2.amazonaws.com","requestParameters":{"instanceIds":["i-0abcdef1234567890"]},"responseElements":{"instancesSet":{"items":[{"instanceId":"i-0abcdef1234567890","currentState":{"code":80,"name":"stopped"},"previousState":{"code":16,"name":"running"}}]}}} diff --git a/examples/cmd/development/benchmark/event.libsonnet b/examples/cmd/development/benchmark/event.libsonnet deleted file mode 100644 index e5b7a210..00000000 --- a/examples/cmd/development/benchmark/event.libsonnet +++ /dev/null @@ -1,7 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - sub.transform.time.now({ object: { target_key: 'now' } }), - ], -} diff --git a/examples/config/condition/meta/if_all_else/config.jsonnet b/examples/condition/meta/config.jsonnet similarity index 54% rename from examples/config/condition/meta/if_all_else/config.jsonnet rename to examples/condition/meta/config.jsonnet index 2d6a5654..3716b042 100644 --- a/examples/config/condition/meta/if_all_else/config.jsonnet +++ b/examples/condition/meta/config.jsonnet @@ -1,13 +1,7 @@ // This example determines if all values in an array are email addresses // that have the DNS domain "brex.com". This technique can be used to // validate or summarize values in an array. -local sub = import '../../../../../build/config/substation.libsonnet'; - -local domain_match = sub.cnd.all( - // After running the example, try changing this to "any" or "none" and see - // what happens. - sub.cnd.meta.for_each(settings={ type: 'all', inspector: sub.cnd.str.ends_with(settings={ value: '@brex.com' }) }), -); +local sub = import '../../../substation.libsonnet'; { concurrency: 1, @@ -19,11 +13,18 @@ local domain_match = sub.cnd.all( sub.tf.meta.switch( settings={ cases: [ { - condition: domain_match, - transform: sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: true }), + condition: sub.cnd.meta.any({ + object: { source_key: '@this' }, // Required to interpret the input as a JSON array. + inspectors: [sub.cnd.str.ends_with(settings={ value: '@brex.com' })], + }), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: true }), + ], }, { - transform: sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: false }), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'meta result' }, value: false }), + ], }, ] } ), diff --git a/examples/config/condition/meta/if_all_else/data.json b/examples/condition/meta/data.json similarity index 100% rename from examples/config/condition/meta/if_all_else/data.json rename to examples/condition/meta/data.json diff --git a/examples/condition/meta/stdout.txt b/examples/condition/meta/stdout.txt new file mode 100644 index 00000000..4b122616 --- /dev/null +++ b/examples/condition/meta/stdout.txt @@ -0,0 +1,2 @@ +["alice@brex.com","bob@brex.com"] +true diff --git a/examples/condition/number/config.jsonnet b/examples/condition/number/config.jsonnet new file mode 100644 index 00000000..ca5e369a --- /dev/null +++ b/examples/condition/number/config.jsonnet @@ -0,0 +1,26 @@ +// This example shows usage of the 'number.equal_to' and 'number.greater_than' conditions. +local sub = import '../../../substation.libsonnet'; + +{ + concurrency: 1, + transforms: [ + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.num.eq({ object: { source_key: 'sourcePort' }, value: 22 }), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'service' }, value: 'SSH' }), + ], + }, + ] }), + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.num.gt({ object: { source_key: 'bytes' }, value: 10000 }), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'severity' }, value: 'high' }), + ], + }, + ] }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), + sub.tf.send.stdout(), + ], +} diff --git a/examples/condition/number/data.json b/examples/condition/number/data.json new file mode 100644 index 00000000..edfed892 --- /dev/null +++ b/examples/condition/number/data.json @@ -0,0 +1 @@ +{"eventId":"123461","timestamp":"2024-07-29T10:00:00Z","sourceIP":"192.168.1.6","destinationIP":"172.16.0.7","sourcePort":"22","destinationPort":"22","protocol":"TCP","action":"ACCEPT","bytes":"20000"} diff --git a/examples/condition/number/stdout.txt b/examples/condition/number/stdout.txt new file mode 100644 index 00000000..96117951 --- /dev/null +++ b/examples/condition/number/stdout.txt @@ -0,0 +1,13 @@ +{ + "eventId": "123461", + "timestamp": "2024-07-29T10:00:00Z", + "sourceIP": "192.168.1.6", + "destinationIP": "172.16.0.7", + "sourcePort": "22", + "destinationPort": "22", + "protocol": "TCP", + "action": "ACCEPT", + "bytes": "20000", + "service": "SSH", + "severity": "high" +} diff --git a/examples/condition/string/config.jsonnet b/examples/condition/string/config.jsonnet new file mode 100644 index 00000000..e2b449ee --- /dev/null +++ b/examples/condition/string/config.jsonnet @@ -0,0 +1,28 @@ +// This example shows usage of the 'string.equal_to' and 'string.greater_than' conditions. +local sub = import '../../../substation.libsonnet'; + +{ + concurrency: 1, + transforms: [ + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.str.eq({ obj: { src: 'action' }, value: 'ACCEPT' }), + transforms: [ + // This overwrites the value of the 'action' key. + sub.tf.obj.insert({ obj: { trg: 'action' }, value: 'Allow' }), + ], + }, + ] }), + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.str.gt({ obj: { src: 'vpcId' }, value: 'vpc-1a2b3c4d' }), + transforms: [ + // This adds a new key-value pair to the object. + sub.tf.obj.insert({ obj: { trg: 'priority' }, value: 'high' }), + ], + }, + ] }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), + sub.tf.send.stdout(), + ], +} diff --git a/examples/condition/string/data.json b/examples/condition/string/data.json new file mode 100644 index 00000000..3c5c76f3 --- /dev/null +++ b/examples/condition/string/data.json @@ -0,0 +1 @@ +{"eventId":"123461","timestamp":"2024-07-29T10:00:00Z","sourceIP":"192.168.1.6","destinationIP":"172.16.0.7","sourcePort":"80","destinationPort":"443","protocol":"TCP","action":"ACCEPT","vpcId":"vpc-2b3c4d5e"} diff --git a/examples/condition/string/stdout.txt b/examples/condition/string/stdout.txt new file mode 100644 index 00000000..9af356d8 --- /dev/null +++ b/examples/condition/string/stdout.txt @@ -0,0 +1,12 @@ +{ + "eventId": "123461", + "timestamp": "2024-07-29T10:00:00Z", + "sourceIP": "192.168.1.6", + "destinationIP": "172.16.0.7", + "sourcePort": "80", + "destinationPort": "443", + "protocol": "TCP", + "action": "Allow", + "vpcId": "vpc-2b3c4d5e", + "priority": "high" +} diff --git a/examples/config/condition/number/config.jsonnet b/examples/config/condition/number/config.jsonnet deleted file mode 100644 index ae2e0522..00000000 --- a/examples/config/condition/number/config.jsonnet +++ /dev/null @@ -1,25 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This shows example usage of the 'number.equal_to' and 'number.greater_than' conditions. - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.num.eq({obj: {src: 'sourcePort'}, value: 22}), - transform: sub.tf.obj.insert({ obj: {trg: 'protocol'}, value: 'SSH' }), - }, - ] - } ), - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.num.gt({obj: {src: 'bytes'}, value: 10000}), - transform: sub.tf.obj.insert({ obj: {trg: 'severity'}, value: 'high' }), - }, - ] } - ), - sub.tf.send.stdout(), - ], -} diff --git a/examples/config/condition/number/data.json b/examples/config/condition/number/data.json deleted file mode 100644 index 1f4466dc..00000000 --- a/examples/config/condition/number/data.json +++ /dev/null @@ -1 +0,0 @@ -{ "eventId": "123461", "timestamp": "2024-07-29T10:00:00Z", "sourceIP": "192.168.1.6", "destinationIP": "172.16.0.7", "sourcePort": "22", "destinationPort": "22", "protocol": "TCP", "action": "ACCEPT", "bytes": "20000" } diff --git a/examples/config/condition/string/config.jsonnet b/examples/config/condition/string/config.jsonnet deleted file mode 100644 index 9898d1b5..00000000 --- a/examples/config/condition/string/config.jsonnet +++ /dev/null @@ -1,26 +0,0 @@ -local sub = import '../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This shows example usage of the 'string.equal_to' and 'string.greater_than' conditions. - // The string greater than and less than conditions compare lexographically with another static or target_key value. - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.str.eq({obj: {src: 'action'}, value: 'ACCEPT'}), - transform: sub.tf.obj.insert({ obj: {trg: 'action'}, value: 'Allow' }), - }, - ] } - ), - sub.tf.meta.switch( - settings={ cases: [ - { - condition: sub.cnd.str.gt({obj: {src: 'vpcId'}, value: 'vpc-1a2b3c4d'}), - transform: sub.tf.obj.insert({ obj: {trg: 'priority'}, value: 'high' }), - }, - ] } - ), - sub.tf.send.stdout(), - ], -} diff --git a/examples/config/condition/string/data.json b/examples/config/condition/string/data.json deleted file mode 100644 index 9ae38675..00000000 --- a/examples/config/condition/string/data.json +++ /dev/null @@ -1 +0,0 @@ -{ "eventId": "123461", "timestamp": "2024-07-29T10:00:00Z", "sourceIP": "192.168.1.6", "destinationIP": "172.16.0.7", "sourcePort": "80", "destinationPort": "443", "protocol": "TCP", "action": "ACCEPT", "vpcId": "vpc-2b3c4d5e" } diff --git a/examples/config/config.jsonnet b/examples/config/config.jsonnet deleted file mode 100644 index 783f2e58..00000000 --- a/examples/config/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local sub = import '../../build/config/substation.libsonnet'; - -{ - // Substation application configs always contain an array named `transforms`. - transforms: [ - // Each transform function is defined in the `substation` library. - sub.transform.object.insert({ id: 'insert-z', object: { target_key: 'a' }, value: 'z' }), - // Transform functions can be conditionally applied using the - // `meta.switch` function. - sub.transform.meta.switch({ cases: [ - { - condition: sub.condition.any( - sub.condition.string.equal_to({ object: { source_key: 'a' }, value: 'z' }) - ), - transform: sub.transform.object.insert({ object: { target_key: 'c' }, value: 'd' }), - }, - ] }), - // This is identical to the previous example, but uses a pre-defined - // pattern and library abbreviations. - sub.pattern.tf.conditional( - condition=sub.cnd.str.eq({ obj: { src: 'a' }, value: 'z' }), - transform=sub.tf.obj.insert({ obj: { trg: 'c' }, value: 'd' }), - ), - // Applications usually rely on a `send` transform to send results - // to a destination. These can be defined anywhere in the config. - sub.tf.send.stdout(), - ], -} diff --git a/examples/config/transform/array/extend/stdout.json b/examples/config/transform/array/extend/stdout.json deleted file mode 100644 index 554b3847..00000000 --- a/examples/config/transform/array/extend/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4],"z":[3,4]} diff --git a/examples/config/transform/array/flatten/stdout.json b/examples/config/transform/array/flatten/stdout.json deleted file mode 100644 index f6595873..00000000 --- a/examples/config/transform/array/flatten/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4]} diff --git a/examples/config/transform/array/flatten_deep/stdout.json b/examples/config/transform/array/flatten_deep/stdout.json deleted file mode 100644 index 9cbb8774..00000000 --- a/examples/config/transform/array/flatten_deep/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[1,2,3,4,5,6]} diff --git a/examples/config/transform/enrich/kvstore_json/kv.json b/examples/config/transform/enrich/kvstore_json/kv.json deleted file mode 100644 index 7d22510e..00000000 --- a/examples/config/transform/enrich/kvstore_json/kv.json +++ /dev/null @@ -1 +0,0 @@ -{"churro":9.99} diff --git a/examples/config/transform/enrich/mmdb/config.jsonnet b/examples/config/transform/enrich/mmdb/config.jsonnet deleted file mode 100644 index f30ac056..00000000 --- a/examples/config/transform/enrich/mmdb/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local sub = import '../../../../../build/config/substation.libsonnet'; - -local city = sub.kv_store.mmdb({ file: 'path/to/GeoLite2-City.mmdb' }); - -local asn = sub.kv_store.mmdb({ file: 'path/to/GeoLite2-ASN.mmdb' }); - -{ - transforms: [ - sub.tf.enrich.kv_store.iget({ object: { source_key: 'ip', target_key: 'city' }, kv_store: city }), - sub.tf.enrich.kv_store.iget({ object: { source_key: 'ip', target_key: 'asn' }, kv_store: asn }), - sub.tf.send.stdout(), - ], -} diff --git a/examples/config/transform/enrich/mmdb/stdout.jsonl b/examples/config/transform/enrich/mmdb/stdout.jsonl deleted file mode 100644 index e1cd24e3..00000000 --- a/examples/config/transform/enrich/mmdb/stdout.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"ip":"8.8.8.8","city":{"continent":{"code":"NA","geoname_id":6255149,"names":{"de":"Nordamerika","en":"North America","es":"Norteamérica","fr":"Amérique du Nord","ja":"北アメリカ","pt-BR":"América do Norte","ru":"Северная Америка","zh-CN":"北美洲"}},"country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}},"location":{"accuracy_radius":1000,"latitude":37.751,"longitude":-97.822,"time_zone":"America/Chicago"},"registered_country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}}},"asn":{"autonomous_system_number":15169,"autonomous_system_organization":"GOOGLE"}} -{"ip":"9.9.9.9","city":{"city":{"geoname_id":5327684,"names":{"de":"Berkeley","en":"Berkeley","es":"Berkeley","fr":"Berkeley","ja":"バークリー","pt-BR":"Berkeley","ru":"Беркли","zh-CN":"伯克利"}},"continent":{"code":"NA","geoname_id":6255149,"names":{"de":"Nordamerika","en":"North America","es":"Norteamérica","fr":"Amérique du Nord","ja":"北アメリカ","pt-BR":"América do Norte","ru":"Северная Америка","zh-CN":"北美洲"}},"country":{"geoname_id":6252001,"iso_code":"US","names":{"de":"Vereinigte Staaten","en":"United States","es":"Estados Unidos","fr":"États Unis","ja":"アメリカ","pt-BR":"EUA","ru":"США","zh-CN":"美国"}},"location":{"accuracy_radius":20,"latitude":37.8767,"longitude":-122.2676,"metro_code":807,"time_zone":"America/Los_Angeles"},"postal":{"code":"94709"},"registered_country":{"geoname_id":2658434,"iso_code":"CH","names":{"de":"Schweiz","en":"Switzerland","es":"Suiza","fr":"Suisse","ja":"スイス連邦","pt-BR":"Suíça","ru":"Швейцария","zh-CN":"瑞士"}},"subdivisions":[{"geoname_id":5332921,"iso_code":"CA","names":{"de":"Kalifornien","en":"California","es":"California","fr":"Californie","ja":"カリフォルニア州","pt-BR":"Califórnia","ru":"Калифорния","zh-CN":"加州"}}]},"asn":{"autonomous_system_number":19281,"autonomous_system_organization":"QUAD9-AS-1"}} -{"ip":"1.1.1.1","city":{"registered_country":{"geoname_id":2077456,"iso_code":"AU","names":{"de":"Australien","en":"Australia","es":"Australia","fr":"Australie","ja":"オーストラリア","pt-BR":"Austrália","ru":"Австралия","zh-CN":"澳大利亚"}}},"asn":{"autonomous_system_number":13335,"autonomous_system_organization":"CLOUDFLARENET"}} diff --git a/examples/config/transform/enrich/urlscan/config.jsonnet b/examples/config/transform/enrich/urlscan/config.jsonnet deleted file mode 100644 index 74a85f6d..00000000 --- a/examples/config/transform/enrich/urlscan/config.jsonnet +++ /dev/null @@ -1,49 +0,0 @@ -// This example shows how to make scan requests and retrieve -// results using the urlscan API (https://urlscan.io/docs/api/). -local sub = import '../../../../../build/config/substation.libsonnet'; - -local headers = { 'API-Key': '${SECRET:URLSCAN}', 'Content-Type': 'application/json' }; - -{ - transforms: [ - // Retrieve the urlscan API key from the secrets store. - // (Never put a secret directly into a configuration.) - sub.transform.utility.secret({ - // The API key is stored in an environment variable named - // `URLSCAN_API_KEY`. - secret: sub.secrets.environment_variable({ id: 'URLSCAN', name: 'URLSCAN_API_KEY' }), - }), - // Sends a scan request and waits for the result. This - // follows recommended practices from the urlscan API docs, - // and will try to fetch the result up to 3 times over 15s. - // If there are no results after retrying, then the unmodified - // message is sent to stdout. - sub.tf.enrich.http.post({ - object: { body_key: '@this', target_key: 'meta response' }, - url: 'https://urlscan.io/api/v1/scan/', - headers: headers, - }), - // - sub.tf.util.delay({ duration: '5s' }), - sub.tf.meta.err({ transforms: [ // Errors are caught in case the retry limit is reached. - sub.tf.meta.retry({ - // This condition runs on the result of the transforms. If - // it returns false, then the transforms are retried until - // it returns true or the retry settings are exhausted. - condition: sub.cnd.all([ - sub.cnd.num.len.gt({ object: { source_key: 'meta result.task.time' }, value: 0 }), - ]), - transforms: [ - sub.tf.enrich.http.get({ - object: { source_key: 'meta response.uuid', target_key: 'meta result' }, - url: 'https://urlscan.io/api/v1/result/${DATA}', // DATA is the value of the source_key. - headers: headers, - }), - ], - retry: { delay: '5s', count: 3 }, // Retry up to 3 times with a 5 second delay (5s, 5s, 5s). - }) - ]}), - sub.tf.obj.cp({ object: { source_key: 'meta result' } }), - sub.tf.send.stdout({ batch: { size: 1000 * 1000 * 5 } }), // 5MB (the results can be large). - ], -} diff --git a/examples/config/transform/meta/each_in_array/stdout.json b/examples/config/transform/meta/each_in_array/stdout.json deleted file mode 100644 index 7598b8b0..00000000 --- a/examples/config/transform/meta/each_in_array/stdout.json +++ /dev/null @@ -1 +0,0 @@ -{"a":[{"c":2,"z":true},{"c":4,"z":true}]} diff --git a/examples/config/transform/number/clamp/config.jsonnet b/examples/config/transform/number/clamp/config.jsonnet deleted file mode 100644 index 0733b599..00000000 --- a/examples/config/transform/number/clamp/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -// This example uses the `number.clamp` pattern to return a value that is -// constrained to a range, where the range is defined by two constants. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - // Use `null` for object keys to operate on the entire message. - transforms: sub.pattern.tf.num.clamp(null, null, 0, 100) + [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/config/transform/send/aws_retryable_errors/config.jsonnet b/examples/config/transform/send/aws_retryable_errors/config.jsonnet deleted file mode 100644 index b5513ebf..00000000 --- a/examples/config/transform/send/aws_retryable_errors/config.jsonnet +++ /dev/null @@ -1,25 +0,0 @@ -// This example configures custom retryable errors for the Kinesis Data Stream -// destination transform. All AWS transforms support a custom retry strategy, -// which can be used to handle transient errors in a way that is specific to -// the AWS service being used or the specific use case. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.aws.kinesis_data_stream( - settings={ stream_name: 'substation', retry: { - // The maximum number of times to retry a request. - // - // The default is 3. - count: 3, - // A list of regular expressions that match error messages - // and cause the request to be retried. If there is no match, then - // the default AWS retry strategy is used. - // - // The default is an empty list (i.e. no custom retryable errors). - error_messages: ['connection reset by peer'], - } }, - ), - ], -} diff --git a/examples/config/transform/send/batch/config.jsonnet b/examples/config/transform/send/batch/config.jsonnet deleted file mode 100644 index fc73e6c1..00000000 --- a/examples/config/transform/send/batch/config.jsonnet +++ /dev/null @@ -1,19 +0,0 @@ -// This example configures send transforms with batch keys to organize -// data before it is sent externally. Every send transform supports batching -// and optionally grouping JSON objects by a value derived from the object. -local sub = import '../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout({ - // Each object is organized by the value retrieved from the `group_id` key. - object: { batch_key: 'group_id' }, - }), - sub.tf.send.file({ - // This also applies to file-based send transforms, and every other send - // transform as well. - object: { batch_key: 'group_id' }, - }), - ], -} diff --git a/examples/terraform/aws/README.md b/examples/terraform/aws/README.md deleted file mode 100644 index 0e844b44..00000000 --- a/examples/terraform/aws/README.md +++ /dev/null @@ -1,576 +0,0 @@ -# AWS - -These example deployments demonstrate different use cases for Substation on AWS. - -# CloudWatch Logs - -## Cross-Account / Cross-Region - -Deploys a data pipeline that collects data from CloudWatch log groups in any account or region into a Kinesis Data Stream. - -```mermaid - -flowchart LR - %% resources - cw1([CloudWatch Log Group]) - cw2([CloudWatch Log Group]) - cw3([CloudWatch Log Group]) - kds([Kinesis Data Stream]) - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - subgraph Account B / Region us-west-2 - cw2 - end - - subgraph Account A / Region us-west-2 - cw3 - end - - subgraph Account A / Region us-east-1 - cw1 --> kds - cw3 --> kds - cw2 --> kds - kds --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end - end -``` - -## To Lambda - -Deploys a data pipeline that sends data from a CloudWatch log group to a Lambda function. - -```mermaid - -flowchart LR - %% resources - cw([CloudWatch Log Group]) - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - cw --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end -``` - -# DynamoDB - -## Change Data Capture (CDC) - -Deploys a data pipeline that implements a [change data capture (CDC) pattern using DynamoDB Streams](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html). - -```mermaid - -flowchart LR - %% resources - ddb([DynamoDB Table]) - - cdcHandler[[Handler]] - cdcTransforms[Transforms] - - %% connections - ddb --> cdcHandler - subgraph Substation CDC Node - cdcHandler --> cdcTransforms - end -``` - -## Distributed Lock - -Deploys a data pipeline that implements a distributed lock pattern using DynamoDB. This pattern can be used to add "exactly-once" semantics to services that otherwise do not support it. For similar examples, see the "exactly once" configurations [here](/examples/config/transform/meta/). - -## Telephone - -Deploys a data pipeline that implements a "telephone" pattern by sharing data as context between multiple Lambda functions using a DynamoDB table. This pattern can be used to enrich events across unique data sources. - -```mermaid - -flowchart LR - %% resources - md_kinesis([Device Management - Kinesis Data Stream]) - edr_kinesis([EDR Kinesis Data Stream]) - idp_kinesis([IdP Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - - edrEnrichmentHandler[[Handler]] - edrEnrichmentTransforms[Transforms] - - edrTransformHandler[[Handler]] - edrTransformTransforms[Transforms] - - idpEnrichmentHandler[[Handler]] - idpEnrichmentTransforms[Transforms] - - mdEnrichmentHandler[[Handler]] - mdEnrichmentTransforms[Transforms] - - %% connections - edr_kinesis --> edrEnrichmentHandler - subgraph Substation EDR Enrichment Node - edrEnrichmentHandler --> edrEnrichmentTransforms - end - - edr_kinesis --> edrTransformHandler - subgraph Substation EDR Transform Node - edrTransformHandler --> edrTransformTransforms - end - - idp_kinesis --> idpEnrichmentHandler - subgraph Substation IdP Enrichment Node - idpEnrichmentHandler --> idpEnrichmentTransforms - end - - md_kinesis --> mdEnrichmentHandler - subgraph Substation Dvc Mgmt Enrichment Node - mdEnrichmentHandler --> mdEnrichmentTransforms - end - - edrEnrichmentTransforms --- dynamodb - edrTransformTransforms --- dynamodb - idpEnrichmentTransforms --- dynamodb - mdEnrichmentTransforms --- dynamodb -``` - -# EventBridge - -## Lambda Bus - -Deploys a data pipeline that sends data from an EventBridge event bus to a Lambda function. - -```mermaid -flowchart LR - %% resources - ebb([EventBridge Bus]) - ebs([EventBridge Scheduler]) - - producerHandler[[Handler]] - producerTransforms[Transforms] - - consumerHandler[[Handler]] - consumerTransforms[Transforms] - - %% connections - ebs --> ebs - ebs --> producerHandler - subgraph Substation Producer Node - producerHandler --> producerTransforms - end - - producerTransforms --> ebb --> consumerHandler - - subgraph Substation Consumer Node - consumerHandler --> consumerTransforms - end -``` - -# Firehose - -## Data Transform - -Deploys a [Firehose](https://aws.amazon.com/firehose/) delivery stream with [data transformation](https://docs.aws.amazon.com/firehose/latest/dev/data-transformation.html) enabled. - -```mermaid - -flowchart LR - %% resources - data[/Data/] - firehose([Kinesis Data Firehose]) - s3([S3 Bucket]) - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - data --> firehose --> nodeHandler - - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end - - nodeHandler --> firehose - firehose --> s3 -``` - -# Kinesis - -## Autoscale - -Deploys a Kinesis Data Stream with autoscaling enabled. This can also be used without Substation to manage Kinesis Data Streams. - -```mermaid - -flowchart LR - kds[("Kinesis - Data Stream")] - sns("Autoscale SNS Topic") - cw_upscale("CloudWatch Upscale Alarm") - cw_downscale("CloudWatch Downscale Alarm") - autoscale("Autoscale Lambda") - - autoscale -- UpdateShardCount API --> kds - autoscale -- PutMetricAlarm API ---> cw_upscale - autoscale -- PutMetricAlarm API ---> cw_downscale - - cw_downscale -. notifies .- sns - cw_upscale -. notifies .- sns - - sns -- notifies ---> autoscale - cw_downscale -. monitors .- kds - cw_upscale -. monitors .- kds -``` - -## Multi-Stream - -Deploys a data pipeline that implements a multi-phase streaming data pattern using Kinesis Data Streams. - -```mermaid - -flowchart LR - %% resources - gateway([API Gateway]) - kds1([Kinesis Data Stream]) - kds2([Kinesis Data Stream]) - - publisherHandler[[Handler]] - publisherTransforms[Transforms] - - subscriberHandler[[Handler]] - subscriberTransforms[Transforms] - - %% connections - gateway --> kds1 --> publisherHandler - subgraph Substation Publisher Node - publisherHandler --> publisherTransforms - end - - publisherTransforms --> kds2 --> subscriberHandler - - subgraph Substation Subscriber Node - subscriberHandler --> subscriberTransforms - end -``` - -## nXDR - -Deploys a data pipeline that implements an nXDR pattern by applying threat / risk enrichment metadata to events and sending the enriched data to multiple destinations. This pattern is useful for: -- Generating risk-based detection rules -- Guiding analysts during incident investigations and incident response -- Aiding unstructured threat hunts -- Priorizing logs for retention and analysis - -```mermaid - -flowchart LR - %% resources - kinesis([Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - ext([External System]) - - enrichmentHandler[[Handler]] - enrichmentTransforms[Transforms] - - transformHandler[[Handler]] - transformTransforms[Transforms] - - %% connections - kinesis --> enrichmentHandler - subgraph Substation Enrichment Node - enrichmentHandler --> enrichmentTransforms - end - - enrichmentTransforms --> dynamodb - - kinesis --> transformHandler - subgraph Substation Transform Node - transformHandler --> transformTransforms - end - - transformTransforms --> ext -``` - -## Time Travel - -Deploys a data pipeline that implements a "time travel" pattern by having a subscriber node read data more slowly than an enrichment node. The nodes share data observed across different events using a DynamoDB table. - -```mermaid - -flowchart LR - %% resources - gateway([API Gateway]) - kinesis([Kinesis Data Stream]) - dynamodb([DynamoDB Table]) - - enrichmentHandler[[Handler]] - enrichmentTransforms[Transforms] - - subscriberHandler[[Handler]] - subscriberTransforms[Transforms] - - gateway --> kinesis - %% connections - kinesis -- 5 seconds --> enrichmentHandler - subgraph Substation Enrichment Node - enrichmentHandler --> enrichmentTransforms - end - - enrichmentTransforms --> dynamodb - - kinesis -- 15 seconds --> subscriberHandler - subgraph Substation Subscriber Node - subscriberHandler --> subscriberTransforms - end - - dynamodb --- subscriberTransforms -``` - -# Lambda - -## AppConfig - -Deploys a data pipeline with an invalid config that triggers AppConfig's validator feature. When the AppConfig service receives the compiled Substation configuration and attempts to deploy, the deployment will fail and return an error. - -## Microservice - -Deploys a synchronous microservice that performs DNS resolution. The service can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html). - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - gateway <--> nodeHandler - cli <--> nodeHandler - - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end -``` - -## VPC - -Deploys a synchronous microservice in a VPC that returns the public IP address of the Lambda function. The Lambda can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html). This example can be used to validate how Substation transforms behave inside a VPC. - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - - nodeHandler[[Handler]] - nodeTransforms[Transforms] - - %% connections - gateway <--> nodeHandler - cli <--> nodeHandler - - subgraph VPC Network - subgraph Substation Node - nodeHandler --> nodeTransforms --> nodeHandler - end - end -``` - -# S3 - -## Data Lake - -Deploys a data pipeline that implements a [data lake pattern using S3](https://docs.aws.amazon.com/whitepapers/latest/building-data-lakes/amazon-s3-data-lake-storage-platform.html). The S3 bucket contains two copies of the data (original and transformed). - -```mermaid - -flowchart LR - bucket([S3 Bucket]) - handler[[Handler]] - gateway([API Gateway]) - - sendS3x[Send to AWS S3] - sendS3y[Send to AWS S3] - mod[...] - - %% connections - gateway --> handler - - subgraph Substation Node - handler --> sendS3x - - subgraph Transforms - sendS3x --> mod --> sendS3y - end - - end - - sendS3x --> bucket - sendS3y --> bucket -``` - -## Retry on Failure - -Deploys a data pipeline that reads data from an S3 bucket and automatically retries failed events using an SQS queue as a [failure destination](https://aws.amazon.com/blogs/compute/introducing-aws-lambda-destinations/). This example will retry forever until the error is resolved. - -```mermaid - -flowchart LR - %% resources - bucket([S3 Bucket]) - queue([SQS Queue]) - %% connections - bucket --> handler - N -.-> queue - queue --> R - rTransforms --> handler - - subgraph N["Substation Node"] - handler[[Handler]] --> transforms[Transforms] - end - subgraph R["Substation Retrier"] - rHandler[[Handler]] --> rTransforms[Transforms] - end -``` - -## SNS - -Deploys a data pipeline that reads data from an S3 bucket via an SNS topic. - -```mermaid - -flowchart LR - %% resources - bucket([S3 Bucket]) - sns([SNS Topic]) - - handler[[Handler]] - transforms[Transforms] - - %% connections - bucket --> sns --> handler - subgraph Substation Node - handler --> transforms - end -``` - -## XDR - -Deploys a data pipeline that implements an XDR (extended detection and response) pattern by reading files from an S3 bucket, conditionally filtering and applying threat / risk enrichment metadata to events, and then writing the enriched events to an S3 bucket. The S3 bucket contains two copies of the data (original and transformed). - -```mermaid -flowchart LR - bucket([S3 Bucket]) - handler[[Handler]] - - threat[Threat Enrichments] - sendS3[Send to AWS S3] - - %% connections - bucket --> handler - - subgraph Substation Node - handler --> threat - - subgraph Transforms - threat --> sendS3 - end - - end - - sendS3 --> bucket -``` - - -# SNS - -## Pub/Sub - -Deploys a data pipeline that implements a [publish/subscribe (pub/sub) pattern](https://aws.amazon.com/what-is/pub-sub-messaging/). The `examples/cmd/client/file` application can be used to send data to the SNS topic. - -```mermaid - -flowchart LR - %% resources - file[(File)] - sns([SNS Topic]) - - cliHandler[[Handler]] - cliTransforms[Transforms] - sub1Handler[[Handler]] - sub1Transforms[Transforms] - sub2Handler[[Handler]] - sub2Transforms[Transforms] - sub3Handler[[Handler]] - sub3Transforms[Transforms] - - %% connections - cliHandler -.- file - subgraph Substation Client - cliHandler --> cliTransforms - end - - cliTransforms --> sns - sns --> sub1Handler - sns --> sub2Handler - sns --> sub3Handler - - subgraph Substation Subscriber Node - sub3Handler --> sub3Transforms - end - - subgraph Substation Subscriber Node - sub2Handler --> sub2Transforms - end - - subgraph Substation Subscriber Node - sub1Handler --> sub1Transforms - end -``` - -# SQS - -## Microservice - -Deploys an asynchronous microservice that performs DNS resolution. The service can be invoked [synchronously](https://docs.aws.amazon.com/lambda/latest/dg/invocation-sync.html) or using a [Lambda URL](https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html); requests to the service are assigned a UUID that can be used to retrieve the result from the DynamoDB table. - -```mermaid - -flowchart LR - %% resources - gateway[HTTPS Endpoint] - cli[AWS CLI] - sqs([SQS Queue]) - ddb([DynamoDB Table]) - - gatewayHandler[[Handler]] - gatewayTransforms[Transforms] - - microserviceHandler[[Handler]] - microserviceTransforms[Transforms] - - %% connections - gateway <--> gatewayHandler - cli <--> gatewayHandler - - subgraph Substation Frontend Node - gatewayHandler --> gatewayTransforms --> gatewayHandler - end - - gatewayTransforms --> sqs --> microserviceHandler - - subgraph Substation Microservice Node - microserviceHandler --> microserviceTransforms - end - - microserviceTransforms --> ddb -``` diff --git a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet b/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet deleted file mode 100644 index f545b4d6..00000000 --- a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/config/consumer/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // CloudWatch logs sent to Kinesis Data Streams are gzip compressed. - // These must be decompressed before other transforms are applied. - sub.tf.fmt.from.gzip(), - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf b/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf deleted file mode 100644 index e2cba33f..00000000 --- a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_provider.tf +++ /dev/null @@ -1,8 +0,0 @@ -provider "aws" { - region = "us-east-1" -} - -provider "aws" { - alias = "usw2" - region = "us-west-2" -} diff --git a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf b/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf deleted file mode 100644 index 0b0ceb2d..00000000 --- a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/_resources.tf +++ /dev/null @@ -1,102 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that is used as the destination for CloudWatch Logs. -module "kds" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Reads data from the stream. - module.lambda_consumer.role.name, - # Writes data to the stream. - module.cw_destination_use1.role.name, - module.cw_destination_usw2.role.name, - ] -} - -# CloudWatch Logs destination that sends logs to the Kinesis Data Stream from us-east-1. -module "cw_destination_use1" { - source = "../../../../../../build/terraform/aws/cloudwatch/destination" - - config = { - name = "substation" - destination_arn = module.kds.arn - - # By default, any CloudWatch log in the current AWS account can send logs to this destination. - # Add additional AWS account IDs to allow them to send logs to the destination. - account_ids = [] - } -} - -module "cw_subscription_use1" { - source = "../../../../../../build/terraform/aws/cloudwatch/subscription" - - config = { - name = "substation" - destination_arn = module.cw_destination_use1.arn - log_groups = [ - # This log group does not exist. Add other log groups for resources in the account and region. - # "/aws/lambda/test", - ] - } -} - -# CloudWatch Logs destination that sends logs to the Kinesis Data Stream from us-west-2. -# To add support for more regions, copy this module and change the provider. -module "cw_destination_usw2" { - source = "../../../../../../build/terraform/aws/cloudwatch/destination" - providers = { - aws = aws.usw2 - } - - config = { - name = "substation" - destination_arn = module.kds.arn - - # By default, any CloudWatch log in the current AWS account can send logs to this destination. - # Add additional AWS account IDs to allow them to send logs to the destination. - account_ids = [] - } -} diff --git a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf b/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf deleted file mode 100644 index e4fa889a..00000000 --- a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/autoscaler.tf +++ /dev/null @@ -1,40 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } - - depends_on = [ - module.appconfig.name, - module.ecr_autoscale.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf b/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf deleted file mode 100644 index 7506606f..00000000 --- a/examples/terraform/aws/cloudwatch_logs/cross_account_cross_region/terraform/consumer.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that consumes from Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_consumer" { - event_source_arn = module.kds.arn - function_name = module.lambda_consumer.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet b/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet deleted file mode 100644 index 99b93025..00000000 --- a/examples/terraform/aws/cloudwatch_logs/to_lambda/config/consumer/config.jsonnet +++ /dev/null @@ -1,15 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // CloudWatch logs sent to Lambda are base64 encoded and gzip - // compressed within the `awslogs.data` field of the event. - // These must be decoded and decompressed before other transforms are - // applied. - sub.tf.obj.cp({ object: { source_key: 'awslogs.data' } }), - sub.tf.fmt.from.base64(), - sub.tf.fmt.from.gzip(), - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf b/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf deleted file mode 100644 index 492b504b..00000000 --- a/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/_resources.tf +++ /dev/null @@ -1,21 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf b/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf deleted file mode 100644 index 363340c5..00000000 --- a/examples/terraform/aws/cloudwatch_logs/to_lambda/terraform/consumer.tf +++ /dev/null @@ -1,49 +0,0 @@ -data "aws_caller_identity" "current" {} -data "aws_region" "current" {} - -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that is invoked by CloudWatch" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -# Allows any CloudWatch log group to send logs to the Lambda function in the current AWS account and region. -# Repeat this for each region that sends logs to the Lambda function. -resource "aws_lambda_permission" "consumer" { - statement_id = "AllowExecutionFromCloudWatch" - action = "lambda:InvokeFunction" - function_name = module.lambda_consumer.name - principal = "logs.amazonaws.com" - source_arn = "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:*" -} - -# CloudWatch Logs subscription filter that sends logs to the Lambda function. -module "cw_subscription" { - source = "../../../../../../build/terraform/aws/cloudwatch/subscription" - - config = { - name = "substation" - destination_arn = module.lambda_consumer.arn - log_groups = [ - # This log group does not exist. Add other log groups for resources in the account and region. - # "/aws/lambda/test", - ] - } -} diff --git a/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet b/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/dynamodb/cdc/config/node/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf b/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf deleted file mode 100644 index 9fae34fa..00000000 --- a/examples/terraform/aws/dynamodb/cdc/terraform/_resources.tf +++ /dev/null @@ -1,40 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - module.node.role.name, - ] -} diff --git a/examples/terraform/aws/dynamodb/cdc/terraform/node.tf b/examples/terraform/aws/dynamodb/cdc/terraform/node.tf deleted file mode 100644 index cd6b5869..00000000 --- a/examples/terraform/aws/dynamodb/cdc/terraform/node.tf +++ /dev/null @@ -1,27 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that receives CDC events" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_DYNAMODB_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "node" { - event_source_arn = module.dynamodb.stream_arn - function_name = module.node.arn - starting_position = "LATEST" -} diff --git a/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet b/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet deleted file mode 100644 index 7a034ea9..00000000 --- a/examples/terraform/aws/dynamodb/distributed_lock/config/node/config.jsonnet +++ /dev/null @@ -1,40 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local kv = sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', ttl: 'ttl' }, -}); - -{ - transforms: [ - // All messages are locked before they are sent through other - // transform functions, ensuring that the message is processed - // exactly once. - // - // An error in any sub-transform will cause all previously locked - // messages to be unlocked; this only applies to messages that have - // not yet been flushed by a control message. Use the `utility_control` - // transform to manage how often messages are flushed. - sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'distributed_lock', - ttl_offset: '1m', - transform: sub.tf.meta.pipeline({ transforms: [ - // Delaying and simulating an error makes it possible to - // test message unlocking in real-time (view changes using - // the DynamoDB console). Uncomment the lines below to see - // how it works. - // - // sub.tf.utility.delay({ duration: '10s' }), - // sub.pattern.transform.conditional( - // condition=sub.cnd.utility.random(), - // transform=sub.tf.utility.err({ message: 'simulating error to trigger unlock' }), - // ), - // - // Messages are printed to the console. After this, they are locked - // and will not be printed again until the lock expires. - sub.tf.send.stdout(), - ] }), - }), - ], -} diff --git a/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf b/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf deleted file mode 100644 index 7200b0f6..00000000 --- a/examples/terraform/aws/dynamodb/distributed_lock/terraform/_resources.tf +++ /dev/null @@ -1,40 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - ttl = "ttl" - - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - module.node.role.name, - ] -} diff --git a/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf b/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf deleted file mode 100644 index 2f291fef..00000000 --- a/examples/terraform/aws/dynamodb/distributed_lock/terraform/node.tf +++ /dev/null @@ -1,26 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that transforms data exactly-once using a distributed lock" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "node" { - function_name = module.node.name - authorization_type = "NONE" -} diff --git a/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet b/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet deleted file mode 100644 index 2f3db52e..00000000 --- a/examples/terraform/aws/dynamodb/telephone/config/const.libsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - kv_store: sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', sort_key: 'SK', ttl: 'TTL', value: 'cache' }, - }), -} diff --git a/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet b/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet deleted file mode 100644 index 42b03fcf..00000000 --- a/examples/terraform/aws/dynamodb/telephone/config/dvc_mgmt_enrichment/config.jsonnet +++ /dev/null @@ -1,10 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Puts the user's metadata into the KV store indexed by the host name. - sub.tf.enrich.kv_store.iset({ obj: { src: 'host.name', trg: 'user' }, prefix: 'md_user', kv_store: const.kv_store }), - ], -} diff --git a/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet b/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet deleted file mode 100644 index 52633cfc..00000000 --- a/examples/terraform/aws/dynamodb/telephone/config/edr_enrichment/config.jsonnet +++ /dev/null @@ -1,18 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // If the host metadata contains the host name, then it's put into the KV store - // indexed by the host ID. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.all([ - sub.cnd.num.len.gt({ obj: { src: 'host.name' }, value: 0 }), - ]), - transform: sub.tf.enrich.kv_store.iset({ obj: { src: 'host.id', trg: 'host' }, prefix: 'edr_host', kv_store: const.kv_store }), - }, - ] }), - ], -} diff --git a/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet b/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet deleted file mode 100644 index 6f340fe5..00000000 --- a/examples/terraform/aws/dynamodb/telephone/config/edr_transform/config.jsonnet +++ /dev/null @@ -1,24 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -// cnd_copy is a helper function for copying values that are not null. -local cnd_copy(source, target) = sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: source }, value: 0 }), - transform=sub.tf.object.copy({ obj: { src: source, trg: target } }), -); - -{ - concurrency: 1, - transforms: [ - // The value from the KV store can be null, so the result is hidden in metadata and checked before - // copying it into the message data. Many of these values are supersets of each other, so values are - // overwritten if they exist. If any source key is missing, the transform is skipped. - sub.tf.enrich.kv_store.iget({ obj: { src: 'host.id', trg: 'meta edr_host' }, prefix: 'edr_host', kv_store: const.kv_store }), - cnd_copy(source='meta edr_host', target='host'), - sub.tf.enrich.kv_store.iget({ obj: { src: 'host.name', trg: 'meta md_user' }, prefix: 'md_user', kv_store: const.kv_store }), - cnd_copy(source='meta md_user', target='user'), - sub.tf.enrich.kv_store.iget({ obj: { src: 'user.email', trg: 'meta idp_user' }, prefix: 'idp_user', kv_store: const.kv_store }), - cnd_copy(source='meta idp_user', target='user'), - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet b/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet deleted file mode 100644 index eb05a4a8..00000000 --- a/examples/terraform/aws/dynamodb/telephone/config/idp_enrichment/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // The user's status is determined to be inactive if there is a successful deletion event. - // Any other successful authentication event will set the user's status to active. - // - // In production deployments, additional filtering should be used to reduce the number of - // queries made to the KV store. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.all([ - sub.cnd.str.eq({ object: { source_key: 'event.category' }, value: 'authentication' }), - sub.cnd.str.eq({ object: { source_key: 'event.type' }, value: 'deletion' }), - sub.cnd.str.eq({ object: { source_key: 'event.outcome' }, value: 'success' }), - ]), - transform: sub.tf.object.insert({ object: { target_key: 'user.status.-1' }, value: 'idp_inactive' }), - }, - { - condition: sub.cnd.all([ - sub.cnd.str.eq({ object: { source_key: 'event.outcome' }, value: 'success' }), - ]), - transform: sub.tf.object.insert({ object: { target_key: 'user.status.-1' }, value: 'idp_active' }), - }, - ] }), - // Puts the user's metadata into the KV store indexed by their email address. - sub.tf.enrich.kv_store.iset({ obj: { src: 'user.email', trg: 'user' }, prefix: 'idp_user', kv_store: const.kv_store }), - ], -} diff --git a/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl b/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl deleted file mode 100644 index aca58aea..00000000 --- a/examples/terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"host":{"name":"Alice's MacBook Pro"},"user":{"email":"alice@brex.com"}} -{"host":{"name":"Bob's MacBook Pro"},"user":{"email":"bob@brex.com"}} diff --git a/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl b/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl deleted file mode 100644 index d42ddb2b..00000000 --- a/examples/terraform/aws/dynamodb/telephone/edr_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"host":{"id":"eb67b0b6a1d04086b75ee38d02018a10","name":"Alice's MacBook Pro"}} -{"event":{"category":"network","type":"connection"},"host":{"id":"eb67b0b6a1d04086b75ee38d02018a10"},"process":{"name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226"},"server":{"ip":"35.186.224.39","port":443}} diff --git a/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl b/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl deleted file mode 100644 index c82b3fcb..00000000 --- a/examples/terraform/aws/dynamodb/telephone/idp_data.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"event":{"category":"authentication","outcome":"success","type":"access"},"user":{"email":"alice@brex.com","roles":["Manager", "Security", "Engineering"]}} -{"event":{"category":"authentication","outcome":"success","type":"deletion"},"user":{"email":"bob@brex.com","roles":["Manager", "Security", "Engineering"]}} diff --git a/examples/terraform/aws/dynamodb/telephone/post_deploy.sh b/examples/terraform/aws/dynamodb/telephone/post_deploy.sh deleted file mode 100644 index 866cafdb..00000000 --- a/examples/terraform/aws/dynamodb/telephone/post_deploy.sh +++ /dev/null @@ -1,4 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_edr terraform/aws/dynamodb/telephone/edr_data.jsonl --print-response -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_idp terraform/aws/dynamodb/telephone/idp_data.jsonl --print-response -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation_md terraform/aws/dynamodb/telephone/dvc_mgmt_data.jsonl --print-response diff --git a/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf b/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf deleted file mode 100644 index 7cbbd781..00000000 --- a/examples/terraform/aws/dynamodb/telephone/terraform/_resources.tf +++ /dev/null @@ -1,57 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.edr_enrichment.role.name, - module.edr_transform.role.name, - module.idp_enrichment.role.name, - module.dvc_mgmt_enrichment.role.name, - ] -} diff --git a/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf b/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf deleted file mode 100644 index b0da7368..00000000 --- a/examples/terraform/aws/dynamodb/telephone/terraform/autoscaler.tf +++ /dev/null @@ -1,38 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf b/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf deleted file mode 100644 index 100f0e8d..00000000 --- a/examples/terraform/aws/dynamodb/telephone/terraform/dvc_mgmt.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "dvc_mgmt_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_md" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.dvc_mgmt_enrichment.role.name, - ] -} - -module "dvc_mgmt_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "dvc_mgmt_enrichment" - description = "Substation node that enriches device management data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/dvc_mgmt_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "dvc_mgmt_enrichment" { - event_source_arn = module.dvc_mgmt_kinesis.arn - function_name = module.dvc_mgmt_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf b/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf deleted file mode 100644 index a5350205..00000000 --- a/examples/terraform/aws/dynamodb/telephone/terraform/edr.tf +++ /dev/null @@ -1,78 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "edr_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_edr" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.edr_enrichment.role.name, - module.edr_transform.role.name, - ] -} - -module "edr_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "edr_transform" - description = "Substation node that transforms EDR data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/edr_transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "edr_transform" { - event_source_arn = module.edr_kinesis.arn - function_name = module.edr_transform.arn - # This is set to 30 seconds (compared to the other data sources - # 5 seconds) to simulate the asynchronous arrival of data in a - # real-world scenario. - maximum_batching_window_in_seconds = 30 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} - -module "edr_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "edr_enrichment" - description = "Substation node that enriches EDR data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/edr_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "edr_enrichment" { - event_source_arn = module.edr_kinesis.arn - function_name = module.edr_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf b/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf deleted file mode 100644 index c5bff745..00000000 --- a/examples/terraform/aws/dynamodb/telephone/terraform/idp.tf +++ /dev/null @@ -1,45 +0,0 @@ -# Kinesis Data Stream that stores data sent from pipeline sources. -module "idp_kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation_idp" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.idp_enrichment.role.name, - ] -} - -module "idp_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "idp_enrichment" - description = "Substation node that enriches IdP data." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/idp_enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "idp_enrichment" { - event_source_arn = module.idp_kinesis.arn - function_name = module.idp_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet b/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/eventbridge/lambda_bus/config/consumer/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet b/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet deleted file mode 100644 index b2a29083..00000000 --- a/examples/terraform/aws/eventbridge/lambda_bus/config/producer/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.time.now({object: {target_key: 'ts'}}), - sub.tf.obj.insert({object: {target_key: 'message'}, value: 'Hello from the EventBridge scheduler!'}), - // This sends the event to the default bus. - sub.tf.send.aws.eventbridge(), - ], -} diff --git a/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf b/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf deleted file mode 100644 index 06e93869..00000000 --- a/examples/terraform/aws/eventbridge/lambda_bus/terraform/_resources.tf +++ /dev/null @@ -1,17 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf b/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf deleted file mode 100644 index 390b0f1e..00000000 --- a/examples/terraform/aws/eventbridge/lambda_bus/terraform/consumer.tf +++ /dev/null @@ -1,35 +0,0 @@ -module "eventbridge_consumer" { - source = "../../../../../../build/terraform/aws/eventbridge/lambda" - - config = { - name = "substation_consumer" - description = "Routes messages from any Substation producer to a Substation Lambda consumer." - function = module.lambda_consumer # This is the Lambda function that will be invoked. - event_pattern = jsonencode({ - # This matches every event sent by any Substation app. - source = [{ "wildcard" : "substation.*" }] - }) - } - - access = [ - module.lambda_producer.role.name, - ] -} - -module "lambda_consumer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "consumer" - description = "Substation node that is invoked by the EventBridge bus." - image_uri = "${module.ecr.url}:v1.5.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/consumer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } -} diff --git a/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf b/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf deleted file mode 100644 index 7673ea4b..00000000 --- a/examples/terraform/aws/eventbridge/lambda_bus/terraform/producer.tf +++ /dev/null @@ -1,28 +0,0 @@ -module "eventbridge_producer" { - source = "../../../../../../build/terraform/aws/eventbridge/lambda" - - config = { - name = "substation_producer" - description = "Sends messages to the default EventBridge bus on a schedule." - function = module.lambda_producer # This is the Lambda function that will be invoked. - schedule = "rate(1 minute)" - } -} - -module "lambda_producer" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "producer" - description = "Substation node that is invoked by the EventBridge schedule." - image_uri = "${module.ecr.url}:v1.5.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/producer" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } -} diff --git a/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet b/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet deleted file mode 100644 index 2bac490c..00000000 --- a/examples/terraform/aws/firehose/data_transform/config/transform_node/config.jsonnet +++ /dev/null @@ -1,14 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.object.insert( - settings={ object: { target_key: 'transformed' }, value: true } - ), - // Appending a newline is required so that the S3 object is line delimited. - sub.tf.string.append( - settings={ suffix: '\n' } - ), - ], -} diff --git a/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf b/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf deleted file mode 100644 index f8d1f409..00000000 --- a/examples/terraform/aws/firehose/data_transform/terraform/_resources.tf +++ /dev/null @@ -1,191 +0,0 @@ -locals { - name = "firehose" -} - -data "aws_caller_identity" "caller" {} - -resource "random_uuid" "id" {} - -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - - config = { - name = "alias/substation" - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -################################## -# Firehose resources -################################## - -# IAM -data "aws_iam_policy_document" "firehose" { - statement { - effect = "Allow" - - principals { - type = "Service" - identifiers = ["firehose.amazonaws.com"] - } - - actions = ["sts:AssumeRole"] - } -} - -resource "aws_iam_role" "firehose" { - name = "substation-firehose-${local.name}" - assume_role_policy = data.aws_iam_policy_document.firehose.json -} - -data "aws_iam_policy_document" "firehose_s3" { - statement { - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - resources = [ - module.kms.arn, - ] - } - - statement { - effect = "Allow" - actions = [ - "s3:AbortMultipartUpload", - "s3:GetBucketLocation", - "s3:GetObject", - "s3:ListBucket", - "s3:ListBucketMultipartUploads", - "s3:PutObject" - ] - - resources = [ - aws_s3_bucket.firehose_s3.arn, - "${aws_s3_bucket.firehose_s3.arn}/*", - ] - } -} - -resource "aws_iam_policy" "firehose_s3" { - name = "substation-firehose-${resource.random_uuid.id.id}" - description = "Policy for the ${local.name} Kinesis Data Firehose." - policy = data.aws_iam_policy_document.firehose_s3.json -} - - -resource "aws_iam_role_policy_attachment" "firehose_s3" { - role = aws_iam_role.firehose.name - policy_arn = aws_iam_policy.firehose_s3.arn -} - -# S3 -resource "random_uuid" "firehose_s3" {} - -resource "aws_s3_bucket" "firehose_s3" { - bucket = "${random_uuid.firehose_s3.result}-substation" - force_destroy = true -} - -resource "aws_s3_bucket_ownership_controls" "firehose_s3" { - bucket = aws_s3_bucket.firehose_s3.id - rule { - object_ownership = "BucketOwnerPreferred" - } -} - -resource "aws_s3_bucket_server_side_encryption_configuration" "firehose_s3" { - bucket = aws_s3_bucket.firehose_s3.bucket - - rule { - apply_server_side_encryption_by_default { - kms_master_key_id = module.kms.arn - sse_algorithm = "aws:kms" - } - } -} - -# Kinesis Data Firehose -resource "aws_kinesis_firehose_delivery_stream" "firehose" { - name = "substation" - destination = "extended_s3" - - server_side_encryption { - enabled = true - key_type = "CUSTOMER_MANAGED_CMK" - key_arn = module.kms.arn - } - - extended_s3_configuration { - role_arn = aws_iam_role.firehose.arn - bucket_arn = aws_s3_bucket.firehose_s3.arn - kms_key_arn = module.kms.arn - buffering_interval = 60 - - processing_configuration { - enabled = "true" - - processors { - type = "Lambda" - - parameters { - parameter_name = "LambdaArn" - # LATEST is always used for container images. - parameter_value = "${module.transform.arn}:$LATEST" - } - } - } - } -} - -module "transform" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "transform_node" - description = "Transforms Kinesis Data Firehose records." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 60 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform_node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_FIREHOSE" - "SUBSTATION_DEBUG" : true - } - } - - access = [ - aws_iam_role.firehose.name, - ] - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} diff --git a/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf b/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf deleted file mode 100644 index 609ae675..00000000 --- a/examples/terraform/aws/kinesis/autoscale/terraform/_resources.tf +++ /dev/null @@ -1,83 +0,0 @@ -# Repository for the Autoscale app. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscale alarms. -resource "aws_sns_topic" "autoscale" { - name = "autoscale" -} - -# Kinesis Data Stream that is managed by the Autoscale app. -module "kds" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscale.arn - } - - # Min and max shards can be defined as tags to override changes made - # by the Autoscale app. - tags = { - MinimumShards = 2 - MaximumShards = 4 - } - - # Add additional consumer and producer roles as needed. - access = [ - # Autoscales the stream. - module.lambda_autoscale.role.name, - ] -} - -# Lambda Autoscale application that manages Kinesis Data Streams. -module "lambda_autoscale" { - source = "../../../../../../build/terraform/aws/lambda" - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams." - image_uri = "${module.ecr.url}:v1.3.0" # This should use the project's release tags. - image_arm = true - - # Override the default Autoscale configuration using environment variables. - # These are the default settings, included for demonstration purposes. - env = { - "AUTOSCALE_KINESIS_THRESHOLD" : 0.7, - "AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS" : 5, - "AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS" : 60, - } - } - - depends_on = [ - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscale_subscription" { - topic_arn = aws_sns_topic.autoscale.arn - protocol = "lambda" - endpoint = module.lambda_autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} - -resource "aws_lambda_permission" "autoscale_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscale.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} diff --git a/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet b/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet deleted file mode 100644 index 9b64d783..00000000 --- a/examples/terraform/aws/kinesis/multistream/config/publisher/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // This forwards data to the destination stream without transformation. - sub.tf.send.aws.kinesis_data_stream( - settings={ stream_name: 'substation_dst' }, - ), - ], -} diff --git a/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet b/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/kinesis/multistream/config/subscriber/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf b/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf deleted file mode 100644 index 355c2c97..00000000 --- a/examples/terraform/aws/kinesis/multistream/terraform/_resources.tf +++ /dev/null @@ -1,139 +0,0 @@ -data "aws_caller_identity" "caller" {} - -# KMS encryption key that is shared by all Substation infrastructure -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - - config = { - name = "alias/substation" - policy = data.aws_iam_policy_document.kms.json - } -} - -# This policy is required to support encrypted SNS topics. -# More information: https://repost.aws/knowledge-center/cloudwatch-receive-sns-for-alarm-trigger -data "aws_iam_policy_document" "kms" { - # Allows CloudWatch to access encrypted SNS topic. - statement { - sid = "CloudWatch" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["cloudwatch.amazonaws.com"] - } - - resources = ["*"] - } - - # Default key policy for KMS. - # https://docs.aws.amazon.com/kms/latest/developerguide/determining-access-key-policy.html - statement { - sid = "KMS" - effect = "Allow" - actions = [ - "kms:*", - ] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.caller.account_id}:root"] - } - - resources = ["*"] - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" - kms_master_key_id = module.kms.id -} - -# API Gateway that sends data to Kinesis. -module "gateway_to_kinesis" { - source = "../../../../../../build/terraform/aws/api_gateway/kinesis_data_stream" - # Always required for the Kinisis Data Stream integration. - kinesis_data_stream = module.kds_src - - config = { - name = "gateway" - } -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kds_src" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - kms = module.kms - - config = { - name = "substation_src" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Reads data to the stream. - module.lambda_publisher.role.name, - # Writes data to the stream. - module.gateway_to_kinesis.role.name, - ] -} - -# Kinesis Data Stream that stores data sent from the pipeline processor. -module "kds_dst" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - kms = module.kms - - config = { - name = "substation_dst" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Writes data to the stream. - module.lambda_publisher.role.name, - # Reads data from the stream. - module.lambda_subscriber.role.name, - ] -} diff --git a/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf b/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf deleted file mode 100644 index e177cb37..00000000 --- a/examples/terraform/aws/kinesis/multistream/terraform/autoscaler.tf +++ /dev/null @@ -1,41 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } - - depends_on = [ - module.appconfig.name, - module.ecr_autoscale.url, - ] -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf b/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf deleted file mode 100644 index 3eb64423..00000000 --- a/examples/terraform/aws/kinesis/multistream/terraform/publisher.tf +++ /dev/null @@ -1,32 +0,0 @@ -module "lambda_publisher" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "publisher" - description = "Substation node that publishes to Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/publisher" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_publisher" { - event_source_arn = module.kds_src.arn - function_name = module.lambda_publisher.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf b/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf deleted file mode 100644 index 1dfa75f5..00000000 --- a/examples/terraform/aws/kinesis/multistream/terraform/subscriber.tf +++ /dev/null @@ -1,32 +0,0 @@ -module "lambda_subscriber" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "subscriber" - description = "Substation node subscribes to Kinesis" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "lambda_subscriber" { - event_source_arn = module.kds_dst.arn - function_name = module.lambda_subscriber.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - starting_position = "LATEST" -} diff --git a/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet b/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet deleted file mode 100644 index 08462e40..00000000 --- a/examples/terraform/aws/kinesis/nxdr/config/const.libsonnet +++ /dev/null @@ -1,15 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - threat_signals_key: 'threat.signals', - // threat_signal is a custom function that appends threat info to an - // event as enrichment metadata. - // - // If a smaller event is needed, then the enriched threat signal can - // be emitted as a separate event. This is similar to the implementation - // seen in the enrichment Lambda function. - threat_signal(settings): sub.tf.obj.insert({ - obj: { trg: sub.helpers.obj.append_array($.threat_signals_key) }, - value: { name: settings.name, description: settings.description, references: settings.references, risk_score: settings.risk_score }, - }), -} diff --git a/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet b/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet deleted file mode 100644 index 9cbaa5ca..00000000 --- a/examples/terraform/aws/kinesis/nxdr/config/enrichment/config.jsonnet +++ /dev/null @@ -1,34 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local const = import '../const.libsonnet'; -local threat = import '../threat_enrichment.libsonnet'; - -{ - concurrency: 2, - transforms: - threat.transforms + [ - // Discards any events that don't contain threat signals. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.num.len.eq({ object: { source_key: const.threat_signals_key }, value: 0 }), - ]), - transform: sub.tf.util.drop(), - }, - ] }), - // Explodes the threat signals array into individual events. These become - // threat signal records in the DynamoDB table. - sub.tf.aggregate.from.array({ object: { source_key: const.threat_signals_key } }), - // The host name and current time are used as the keys for the DynamoDB table. - sub.tf.object.copy({ object: { source_key: 'host.name', target_key: 'PK' } }), - sub.tf.time.now({ object: { target_key: 'SK' } }), - sub.tf.time.to.string({ object: { source_key: 'SK', target_key: 'SK' }, format: '2006-01-02T15:04:05.000Z' }), - // Any fields not needed in the DynamoDB item are removed. - sub.tf.object.delete({ object: { source_key: 'event' } }), - sub.tf.object.delete({ object: { source_key: 'host' } }), - sub.tf.object.delete({ object: { source_key: 'process' } }), - sub.tf.object.delete({ object: { source_key: 'threat' } }), - // Writes the threat signal to the DynamoDB table. - sub.tf.send.aws.dynamodb({ table_name: 'substation_threat_signals' }), - ], -} diff --git a/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet b/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet deleted file mode 100644 index 580517ae..00000000 --- a/examples/terraform/aws/kinesis/nxdr/config/threat_enrichment.libsonnet +++ /dev/null @@ -1,45 +0,0 @@ -// The nXDR pattern relies on Substation's meta_switch transform to conditionally determine -// if an event matches threat criteria. If the event matches, then a threat signal is created. -// The meta_switch transform supports any combination of if-elif-else logic. -local sub = import '../../../../../../build/config/substation.libsonnet'; -local const = import 'const.libsonnet'; - -// Composable conditions are recommended when managing multiple threat signals. -local cnd = { - process: [ - sub.cnd.str.eq({ obj: { src: 'event.category' }, value: 'process' }), - ], - - macos: { - os: sub.cnd.str.eq({ obj: { src: 'host.os.type' }, value: 'macos' }), - process: $.process + [$.macos.os], - }, -}; - -{ - transforms: [ - // Privilege Escalation - // https://attack.mitre.org/tactics/TA0004/ - // - // https://attack.mitre.org/techniques/T1548/004/ - sub.tf.meta.switch({ - local name = 'privilege_escalation_elevated_execution_with_prompt', - - cases: [ - { - transform: const.threat_signal({ - name: name, - description: 'Identifies when an authentication prompt is generated by the AuthorizationExecuteWithPrivileges API.', - references: ['https://objective-see.com/blog/blog_0x2A.html'], - // The risk score can be dynamically calculated based on additional - // fields in the event. - risk_score: 73, - }), - condition: sub.cnd.all(cnd.macos.process + [ - sub.cnd.str.eq({ obj: { src: 'process.name' }, value: 'security_authtrampoline' }), - ]), - }, - ], - }), - ], -} diff --git a/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet b/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet deleted file mode 100644 index 695f96a8..00000000 --- a/examples/terraform/aws/kinesis/nxdr/config/transform/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -local threat = import '../threat_enrichment.libsonnet'; - -{ - concurrency: 2, - transforms: - threat.transforms + [ - // At this point more transforms can be added and the events can be sent - // to an external system. - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/kinesis/nxdr/data.jsonl b/examples/terraform/aws/kinesis/nxdr/data.jsonl deleted file mode 100644 index 037aa3a4..00000000 --- a/examples/terraform/aws/kinesis/nxdr/data.jsonl +++ /dev/null @@ -1,5 +0,0 @@ -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/sbin/sshd -i","name":"sshd","parent":{"command_line":"/usr/libexec/sshd-keygen-wrapper","name":"sshd-keygen-wrapper","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/libexec/security_authtrampoline /usr/sbin/installer auth 22 -verboseR -allowUntrusted -pkg /private/tmp/xp-6100/epsvcp.pkg -target /","name":"security_authtrampoline","parent":{"command_line":"/private/tmp/update_XP-6100 Series/EPSON.app/Contents/MacOS/EpsonInstaller","name":"EpsonInstaller","parent":{"command_line":"/usr/libexec/runningboardd","name":"runningboardd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome","name":"Google Chrome","parent":{"command_line":"/usr/bin/open -n -a /Applications/Google Chrome.app","name":"open","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/sbin/cupsd","name":"cupsd","parent":{"command_line":"/usr/libexec/cups/backend/usb","name":"cups-usb-backend","parent":{"command_line":"/usr/libexec/launchd","name":"launchd"}}}} -{"event":{"category":"process","type":"start"},"host":{"name":"Alice's Macbook Pro","os":{"type":"macos"}},"process":{"command_line":"/usr/bin/python3 /usr/local/bin/pip install requests","name":"python3","parent":{"command_line":"/usr/local/bin/pip install requests","name":"pip","parent":{"command_line":"/usr/bin/sudo pip3","name":"sudo"}}}} diff --git a/examples/terraform/aws/kinesis/nxdr/post_deploy.sh b/examples/terraform/aws/kinesis/nxdr/post_deploy.sh deleted file mode 100644 index b3794540..00000000 --- a/examples/terraform/aws/kinesis/nxdr/post_deploy.sh +++ /dev/null @@ -1,2 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation terraform/aws/kinesis/nxdr/data.jsonl --print-response diff --git a/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf b/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf deleted file mode 100644 index 7867db2d..00000000 --- a/examples/terraform/aws/kinesis/nxdr/terraform/_resources.tf +++ /dev/null @@ -1,77 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - } - - access = [ - # Autoscales the stream. - module.lambda_autoscale.role.name, - # Consumes data from the stream. - module.lambda_transform.role.name, - module.lambda_enrichment.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation_threat_signals" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.lambda_enrichment.role.name, - ] -} diff --git a/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf b/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf deleted file mode 100644 index d936193c..00000000 --- a/examples/terraform/aws/kinesis/nxdr/terraform/autoscaler.tf +++ /dev/null @@ -1,33 +0,0 @@ -module "lambda_autoscale" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscale.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscale.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscale.name - ] -} diff --git a/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf b/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf deleted file mode 100644 index 985e794c..00000000 --- a/examples/terraform/aws/kinesis/nxdr/terraform/enrichment.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "enrichment" - description = "Substation enrichment node that writes threat signals to DynamoDB." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_enrichment" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_enrichment.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf b/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf deleted file mode 100644 index e7fdee3d..00000000 --- a/examples/terraform/aws/kinesis/nxdr/terraform/transform.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "transform" - description = "Substation transform node that enriches events with threat information." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_transform" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_transform.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet b/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet deleted file mode 100644 index e3d453ad..00000000 --- a/examples/terraform/aws/kinesis/time_travel/config/const.libsonnet +++ /dev/null @@ -1,12 +0,0 @@ -local sub = import '../../../../../../build/config/substation.libsonnet'; - -{ - is_process: [ - sub.cnd.str.eq({ obj: { src: 'event.category' }, value: 'process' }), - sub.cnd.str.eq({ obj: { src: 'event.type' }, value: 'start' }), - ], - kv_store: sub.kv_store.aws_dynamodb({ - table_name: 'substation', - attributes: { partition_key: 'PK', sort_key: 'SK', ttl: 'TTL', value: 'cache' }, - }), -} diff --git a/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet b/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet deleted file mode 100644 index ebd3ac54..00000000 --- a/examples/terraform/aws/kinesis/time_travel/config/enrichment/config.jsonnet +++ /dev/null @@ -1,16 +0,0 @@ -// Puts process metadata into the KV store. -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - // The concurrency is set to 1 to ensure that the KV store is not updated in parallel. - concurrency: 1, - transforms: [ - // If the event is a process, then store the process metadata in the KV store - // indexed by the PID. The data is stored in the KV store for 90 days. - sub.pattern.tf.conditional( - condition=sub.cnd.all(const.is_process), - transform=sub.tf.enrich.kv_store.iset({ obj: { src: 'process.pid', trg: 'process' }, prefix: 'process', ttl_offset: std.format('%dh', 24 * 90), kv_store: const.kv_store, close_kv_store: false }), - ), - ], -} diff --git a/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet b/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet deleted file mode 100644 index 2ba9a63f..00000000 --- a/examples/terraform/aws/kinesis/time_travel/config/transform/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -// All values in the KV store were put there by the `enrichment` function. -local sub = import '../../../../../../../build/config/substation.libsonnet'; -local const = import '../const.libsonnet'; - -{ - concurrency: 2, - transforms: [ - // process.* - // - // This is only applied to non-process events. - sub.pattern.tf.conditional( - condition=sub.cnd.none(const.is_process), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.pid', trg: 'process' }, prefix: 'process', kv_store: const.kv_store }), - ), - // process.parent.* - sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: 'process.parent.pid' }, value: 0 }), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.parent.pid', trg: 'process.parent' }, prefix: 'process', kv_store: const.kv_store }), - ), - // process.parent.parent.* - sub.pattern.tf.conditional( - condition=sub.cnd.num.len.gt({ obj: { src: 'process.parent.parent.pid' }, value: 0 }), - transform=sub.tf.enrich.kv_store.iget({ obj: { src: 'process.parent.parent.pid', trg: 'process.parent.parent' }, prefix: 'process', kv_store: const.kv_store }), - ), - // Print the results. - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/kinesis/time_travel/data.jsonl b/examples/terraform/aws/kinesis/time_travel/data.jsonl deleted file mode 100644 index c0c390c5..00000000 --- a/examples/terraform/aws/kinesis/time_travel/data.jsonl +++ /dev/null @@ -1,4 +0,0 @@ -{"event":{"category":"network","type":"connection"},"process":{"name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226"},"server":{"ip":"35.186.224.39","port":443},"@timestamp":"2024-03-29T04:02:38.470000Z"} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/sbin/launchd","name":"launchd","pid":"f23e8b548d2e5e1ef3e122a9c5e08a63","start":"2024-03-13T16:17:45.000000Z","parent":{"pid":"b745f7a7c3a98ac5f087be7420e6e3f9"}}} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/usr/libexec/runningboardd","name":"runningboardd","pid":"8faae8aa27f9b4faff6fd98e60201e3d","start":"2024-03-13T16:17:49.000000Z","parent":{"pid":"f23e8b548d2e5e1ef3e122a9c5e08a63"}}} -{"event":{"category":"process","type":"start"},"process":{"command_line":"/Applications/Spotify.app/Contents/MacOS/Spotify","name":"Spotify","pid":"d3a6c0b9d3751559f206e12fb1b8f226","start":"2024-03-13T16:29:17.000000Z","parent":{"pid":"8faae8aa27f9b4faff6fd98e60201e3d"}}} diff --git a/examples/terraform/aws/kinesis/time_travel/post_deploy.sh b/examples/terraform/aws/kinesis/time_travel/post_deploy.sh deleted file mode 100644 index 6de657d7..00000000 --- a/examples/terraform/aws/kinesis/time_travel/post_deploy.sh +++ /dev/null @@ -1,2 +0,0 @@ -sleep 5 -AWS_DEFAULT_REGION=$AWS_REGION python3 ../build/scripts/aws/kinesis/put_records.py substation terraform/aws/kinesis/time_travel/data.jsonl --print-response diff --git a/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf b/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf deleted file mode 100644 index ff885f0c..00000000 --- a/examples/terraform/aws/kinesis/time_travel/terraform/_resources.tf +++ /dev/null @@ -1,79 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# Repository for the autoscaling application. -module "ecr_autoscale" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "autoscale" - force_delete = true - } -} - -# SNS topic for Kinesis Data Stream autoscaling alarms. -resource "aws_sns_topic" "autoscaling_topic" { - name = "autoscale" -} - -# Kinesis Data Stream that stores data sent from pipeline sources. -module "kinesis" { - source = "../../../../../../build/terraform/aws/kinesis_data_stream" - - config = { - name = "substation" - autoscaling_topic = aws_sns_topic.autoscaling_topic.arn - shards = 1 - } - - access = [ - # Autoscales the stream. - module.lambda_autoscaling.role.name, - # Consumes data from the stream. - module.lambda_enrichment.role.name, - module.lambda_transform.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - range_key = "SK" - ttl = "TTL" - - attributes = [ - { - name = "PK" - type = "S" - }, - { - name = "SK" - type = "S" - }, - ] - } - - access = [ - module.lambda_enrichment.role.name, - module.lambda_transform.role.name, - ] -} diff --git a/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf b/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf deleted file mode 100644 index 8c4a8577..00000000 --- a/examples/terraform/aws/kinesis/time_travel/terraform/autoscaler.tf +++ /dev/null @@ -1,35 +0,0 @@ -# Used for deploying and maintaining the Kinesis Data Streams autoscaling application; does not need to be used if deployments don't include Kinesis Data Streams. - -module "lambda_autoscaling" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "autoscale" - description = "Autoscaler for Kinesis Data Streams" - image_uri = "${module.ecr_autoscale.url}:v1.3.0" - image_arm = true - } -} - -resource "aws_sns_topic_subscription" "autoscaling_subscription" { - topic_arn = aws_sns_topic.autoscaling_topic.arn - protocol = "lambda" - endpoint = module.lambda_autoscaling.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} - -resource "aws_lambda_permission" "autoscaling_invoke" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_autoscaling.name - principal = "sns.amazonaws.com" - source_arn = aws_sns_topic.autoscaling_topic.arn - - depends_on = [ - module.lambda_autoscaling.name - ] -} diff --git a/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf b/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf deleted file mode 100644 index 5ae781b3..00000000 --- a/examples/terraform/aws/kinesis/time_travel/terraform/enrichment.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_enrichment" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "enrichment" - description = "Substation node that enriches data from Kinesis and writes it to DynamoDB" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/enrichment" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_enrichment" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_enrichment.arn - maximum_batching_window_in_seconds = 5 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf b/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf deleted file mode 100644 index 66498b17..00000000 --- a/examples/terraform/aws/kinesis/time_travel/terraform/transform.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "lambda_transform" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "transform" - description = "Substation node that reads from Kinesis with a delay to support enrichment" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/transform" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_KINESIS_DATA_STREAM" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "lambda_transform" { - event_source_arn = module.kinesis.arn - function_name = module.lambda_transform.arn - maximum_batching_window_in_seconds = 15 - batch_size = 100 - parallelization_factor = 1 - # In this example, we start from the beginning of the stream, - # but in a prod environment, you may want to start from the end - # of the stream to avoid processing old data ("LATEST"). - starting_position = "TRIM_HORIZON" -} diff --git a/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet b/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet deleted file mode 100644 index 8b0028e2..00000000 --- a/examples/terraform/aws/lambda/appconfig/config/node/config.jsonnet +++ /dev/null @@ -1,10 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // This will always fail validation because the settings are invalid. - sub.tf.object.delete( - settings={ object: { missing_key: 'abc' } } - ), - ], -} diff --git a/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf b/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf deleted file mode 100644 index ebeed22d..00000000 --- a/examples/terraform/aws/lambda/appconfig/terraform/_resources.tf +++ /dev/null @@ -1,47 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - lambda = module.validate - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "validate" { - source = "../../../../../../build/terraform/aws/lambda" - config = { - name = "validate" - description = "Substation configuration validator that is executed by AppConfig." - image_uri = "${module.ecr_validate.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 1 - } - - depends_on = [ - module.appconfig.name, - module.ecr_validate.url, - ] -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "ecr_validate" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "validate" - force_delete = true - } -} diff --git a/examples/terraform/aws/lambda/appconfig/terraform/node.tf b/examples/terraform/aws/lambda/appconfig/terraform/node.tf deleted file mode 100644 index 83ebc0c4..00000000 --- a/examples/terraform/aws/lambda/appconfig/terraform/node.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "node" { - source = "../../../../../../build/terraform/aws/lambda" - - # AppConfig is configured to validate configurations before deployment. - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that never receives a configuration." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "url" { - function_name = module.node.name - authorization_type = "NONE" -} diff --git a/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet b/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet deleted file mode 100644 index 80684767..00000000 --- a/examples/terraform/aws/lambda/microservice/config/microservice/config.jsonnet +++ /dev/null @@ -1,31 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Dynamically handles input from either Lambda URL or synchronous invocation. - sub.pattern.tf.conditional( - condition=sub.cnd.all([ - sub.cnd.number.length.greater_than( - settings={ object: { source_key: 'body' }, value: 0 } - ), - ]), - transform=sub.tf.object.copy( - settings={ object: { source_key: 'body' } } - ), - ), - // Performs a reverse DNS lookup on the 'addr' field if it is a public IP address. - sub.pattern.tf.conditional( - condition=sub.cnd.none(sub.pattern.cnd.network.ip.internal(key='addr')), - transform=sub.tf.enrich.dns.ip_lookup( - settings={ object: { source_key: 'addr', target_key: 'domain' } }, - ), - ), - // The DNS response is copied so that it is the only value returned in the object. - sub.tf.object.copy( - settings={ object: { source_key: 'domain' } }, - ), - sub.tf.object.copy( - settings={ object: { target_key: 'domain' } }, - ), - ], -} diff --git a/examples/terraform/aws/lambda/microservice/terraform/_resources.tf b/examples/terraform/aws/lambda/microservice/terraform/_resources.tf deleted file mode 100644 index 492b504b..00000000 --- a/examples/terraform/aws/lambda/microservice/terraform/_resources.tf +++ /dev/null @@ -1,21 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} diff --git a/examples/terraform/aws/lambda/microservice/terraform/microservice.tf b/examples/terraform/aws/lambda/microservice/terraform/microservice.tf deleted file mode 100644 index 018b1b8f..00000000 --- a/examples/terraform/aws/lambda/microservice/terraform/microservice.tf +++ /dev/null @@ -1,30 +0,0 @@ -module "microservice" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "microservice" - description = "Substation node that acts as a synchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/microservice" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "substation_microservice" { - function_name = module.microservice.name - authorization_type = "NONE" -} diff --git a/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet b/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet deleted file mode 100644 index c70dd57b..00000000 --- a/examples/terraform/aws/lambda/vpc/config/whatismyip/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Get the IP address of the service and return it in response. - sub.tf.enrich.http.get(settings={ url: 'https://ipinfo.io/ip' }), - sub.tf.object.copy( - settings={ object: { target_key: 'ip' } }, - ), - ], -} diff --git a/examples/terraform/aws/lambda/vpc/terraform/_resources.tf b/examples/terraform/aws/lambda/vpc/terraform/_resources.tf deleted file mode 100644 index 17e80f65..00000000 --- a/examples/terraform/aws/lambda/vpc/terraform/_resources.tf +++ /dev/null @@ -1,29 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -# VPC shared by all Substation resources. -# -# By default, this creates a /16 VPC with private subnets -# in three availability zones in us-east-1. -module "vpc_substation" { - source = "../../../../../../build/terraform/aws/networking/vpc" -} diff --git a/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf b/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf deleted file mode 100644 index 822501bd..00000000 --- a/examples/terraform/aws/lambda/vpc/terraform/whatismyip.tf +++ /dev/null @@ -1,36 +0,0 @@ -module "whatismyip" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "whatismyip" - description = "Substation node that acts as a synchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - - vpc_config = { - subnet_ids = module.vpc_substation.private_subnet_ids - security_group_ids = [module.vpc_substation.default_security_group_id] - } - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/whatismyip" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "substation_microservice" { - function_name = module.whatismyip.name - authorization_type = "NONE" -} diff --git a/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet b/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet deleted file mode 100644 index 2322accb..00000000 --- a/examples/terraform/aws/s3/data_lake/config/node/config.jsonnet +++ /dev/null @@ -1,28 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -// This is a placeholder that must be replaced with the bucket produced by Terraform. -local bucket = 'c034c726-70bf-c397-81bd-c9a0d9e82371-substation'; - -{ - concurrency: 1, - // All data is buffered in memory, then written in JSON Lines format to S3. - transforms: [ - sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'original', time_format: '2006/01/02', uuid: true }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - sub.tf.object.insert( - settings={ object: { target_key: 'transformed' }, value: true } - ), - sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'transformed', time_format: '2006/01/02', uuid: true }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - ], -} diff --git a/examples/terraform/aws/s3/data_lake/terraform/_resources.tf b/examples/terraform/aws/s3/data_lake/terraform/_resources.tf deleted file mode 100644 index af46d213..00000000 --- a/examples/terraform/aws/s3/data_lake/terraform/_resources.tf +++ /dev/null @@ -1,38 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - module.lambda_node.role.name, - ] -} diff --git a/examples/terraform/aws/s3/data_lake/terraform/node.tf b/examples/terraform/aws/s3/data_lake/terraform/node.tf deleted file mode 100644 index 6dc872f9..00000000 --- a/examples/terraform/aws/s3/data_lake/terraform/node.tf +++ /dev/null @@ -1,35 +0,0 @@ -module "lambda_gateway" { - source = "../../../../../../build/terraform/aws/api_gateway/lambda" - lambda = module.lambda_node - - config = { - name = "node_gateway" - } - - depends_on = [ - module.lambda_node - ] -} - -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that writes data to S3" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_API_GATEWAY" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} diff --git a/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet b/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet deleted file mode 100644 index 4523a131..00000000 --- a/examples/terraform/aws/s3/retry_on_failure/config/node/config.jsonnet +++ /dev/null @@ -1,13 +0,0 @@ -// This config generates an error to engage the retry on failure feature. -// The pipeline will retry forever until the error is resolved. Change the -// transform to `sub.tf.send.stdout()` to resolve the error and print the logs -// from S3. -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.util.err(settings={ message: 'simulating error to trigger retries' }), - // sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet b/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet deleted file mode 100644 index 8d1c04be..00000000 --- a/examples/terraform/aws/s3/retry_on_failure/config/retrier/config.jsonnet +++ /dev/null @@ -1,17 +0,0 @@ -// This config transforms the failure record sent by the node Lambda function -// so that it becomes a new request. The new request bypasses S3 and is sent -// directly to the Lambda function. -// -// Additional information is available in the payload and can be used to make -// decisions about the new request or notify external systems about the failure. -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // If needed, then use other information from the failure record to - // decide what to do or notify external systems about the failure. - sub.tf.obj.cp(settings={ object: { source_key: 'requestPayload' } }), - sub.tf.send.aws.lambda(settings={ function_name: 'node' }), - ], -} diff --git a/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf b/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf deleted file mode 100644 index d6a5f9b7..00000000 --- a/examples/terraform/aws/s3/retry_on_failure/terraform/_resources.tf +++ /dev/null @@ -1,57 +0,0 @@ -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ name = "example" }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - # Reads objects from the bucket. - module.lambda_node.role.name, - ] -} - -module "sqs" { - source = "../../../../../../build/terraform/aws/sqs" - - config = { - name = "substation_retries" - - # Delay for 30 seconds to allow the pipeline to recover. - delay = 30 - - # Timeout should be at least 6x the timeout of any consuming Lambda functions, plus the batch window. - # Refer to the Lambda documentation for more information: - # https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html. - timeout = 60 - } - - access = [ - # Sends messages to the queue. - module.lambda_retrier.role.name, - # Receives messages from the queue. - module.lambda_node.role.name, - ] -} diff --git a/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf b/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf deleted file mode 100644 index 73a66b61..00000000 --- a/examples/terraform/aws/s3/retry_on_failure/terraform/node_with_retrier.tf +++ /dev/null @@ -1,86 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads data from S3. The node will retry forever if it fails." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3" - "SUBSTATION_DEBUG" : true - } - } - - # The retrier Lambda must be able to invoke this - # Lambda function to retry failed S3 events. - access = [ - module.lambda_retrier.role.name, - ] -} - -resource "aws_lambda_permission" "node" { - statement_id = "AllowExecutionFromS3Bucket" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "s3.amazonaws.com" - source_arn = module.s3.arn -} - -resource "aws_s3_bucket_notification" "node" { - bucket = module.s3.id - - lambda_function { - lambda_function_arn = module.lambda_node.arn - events = ["s3:ObjectCreated:*"] - } - - depends_on = [ - aws_lambda_permission.node - ] -} - -# Configures the Lambda function to send failed events to the SQS queue. -resource "aws_lambda_function_event_invoke_config" "node" { - function_name = module.lambda_node.name - - # This example disables the built-in retry mechanism. - maximum_retry_attempts = 0 - - destination_config { - on_failure { - destination = module.sqs.arn - } - } -} - -module "lambda_retrier" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "retrier" - description = "Substation node that receives events from the retry queue and invokes the original Lambda function." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - # This value should be 1/6th of the visibility timeout of the SQS queue. - timeout = 5 - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/retrier" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SQS" - "SUBSTATION_DEBUG" : true - } - } -} - -resource "aws_lambda_event_source_mapping" "retrier" { - event_source_arn = module.sqs.arn - function_name = module.lambda_retrier.arn - maximum_batching_window_in_seconds = 30 - batch_size = 10 -} diff --git a/examples/terraform/aws/s3/sns/config/node/config.jsonnet b/examples/terraform/aws/s3/sns/config/node/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/s3/sns/config/node/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/s3/sns/terraform/_resources.tf b/examples/terraform/aws/s3/sns/terraform/_resources.tf deleted file mode 100644 index d1849460..00000000 --- a/examples/terraform/aws/s3/sns/terraform/_resources.tf +++ /dev/null @@ -1,159 +0,0 @@ -data "aws_caller_identity" "caller" {} - -# KMS encryption key that is shared by all Substation resources. -module "kms" { - source = "../../../../../../build/terraform/aws/kms" - config = { - name = "alias/substation" - policy = data.aws_iam_policy_document.kms.json - } -} - -# This policy is required to support encrypted SNS topics. -# More information: https://repost.aws/knowledge-center/cloudwatch-receive-sns-for-alarm-trigger -data "aws_iam_policy_document" "kms" { - # Allows CloudWatch to access encrypted SNS topic. - statement { - sid = "CloudWatch" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["cloudwatch.amazonaws.com"] - } - - resources = ["*"] - } - - # Allows S3 to access encrypted SNS topic. - statement { - sid = "S3" - effect = "Allow" - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - principals { - type = "Service" - identifiers = ["s3.amazonaws.com"] - } - - resources = ["*"] - } - - # Default key policy for KMS. - # https://docs.aws.amazon.com/kms/latest/developerguide/determining-access-key-policy.html - statement { - sid = "KMS" - effect = "Allow" - actions = [ - "kms:*", - ] - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.caller.account_id}:root"] - } - - resources = ["*"] - } -} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - kms = module.kms - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - kms = module.kms - - config = { - # Bucket name is randomized to avoid collisions. - name = "${random_uuid.s3.result}-substation" - } - - access = [ - module.lambda_node.role.name, - ] -} - -module "sns" { - source = "../../../../../../build/terraform/aws/sns" - kms = module.kms - - config = { - name = "substation" - } -} - -# Grants the S3 bucket permission to publish to the SNS topic. -resource "aws_sns_topic_policy" "s3_access" { - arn = module.sns.arn - policy = data.aws_iam_policy_document.s3_access_policy.json -} - -data "aws_iam_policy_document" "s3_access_policy" { - statement { - actions = [ - "sns:Publish", - ] - - resources = [ - module.sns.arn, - ] - - condition { - test = "ArnEquals" - variable = "aws:SourceArn" - - values = [ - module.s3.arn, - ] - } - - principals { - type = "Service" - identifiers = ["s3.amazonaws.com"] - } - - effect = "Allow" - } -} - -resource "aws_s3_bucket_notification" "sns" { - bucket = module.s3.id - - topic { - topic_arn = module.sns.arn - - events = [ - "s3:ObjectCreated:*", - ] - } -} diff --git a/examples/terraform/aws/s3/sns/terraform/node.tf b/examples/terraform/aws/s3/sns/terraform/node.tf deleted file mode 100644 index 46ed06f7..00000000 --- a/examples/terraform/aws/s3/sns/terraform/node.tf +++ /dev/null @@ -1,37 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - kms = module.kms - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads data from S3 via SNS." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "node" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.lambda_node.arn -} - -resource "aws_lambda_permission" "node" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn -} diff --git a/examples/terraform/aws/s3/xdr/config/node/config.jsonnet b/examples/terraform/aws/s3/xdr/config/node/config.jsonnet deleted file mode 100644 index 74f8597c..00000000 --- a/examples/terraform/aws/s3/xdr/config/node/config.jsonnet +++ /dev/null @@ -1,60 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -// This is a placeholder that must be replaced with the bucket produced by Terraform. -local bucket = 'substation-3e820117-61f0-2fbb-05c4-1fba0db9d82c'; -local const = import 'const.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // XDR threat signals rely on the meta_switch transform to conditionally determine - // if an event matches risk criteria. If the event matches, a threat signal is created. - // This transform supports any combination of if-elif-else logic. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'root' }), - ]), - transform: const.threat_signal({ name: 'root_activity', description: 'Root user activity detected.', risk_score: 74 }), - }, - ] }), - // Complex conditions are made possible by using the meta_condition inspector. - sub.tf.meta.switch({ cases: [ - { - // This condition requires both of these statements to be true: - // - // - The `source_ip` field is a public IP address. - // - The `user_name` field contains either `root` or `admin`. - condition: sub.cnd.all([ - sub.cnd.meta.condition({ condition: sub.cnd.none( - sub.pattern.cnd.net.ip.internal(key='source_ip') - ) }), - sub.cnd.meta.condition({ condition: sub.cnd.any([ - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'root' }), - sub.cnd.str.has({ obj: { src: 'user_name' }, value: 'admin' }), - ]) }), - ]), - transform: const.threat_signal({ name: 'public_ip_root_admin_activity', description: 'Public IP root or admin user activity detected.', risk_score: 99 }), - }, - ] }), - // If the event contains a threat signal, then it's written to the XDR path - // in the S3 bucket; otherwise the event is discarded. The `auxiliary_transforms` - // field is used to format the data as a JSON Lines file. - // - // If there are no threat signals, then the event is discarded. - sub.tf.meta.switch({ cases: [ - { - condition: sub.cnd.any([ - sub.cnd.num.len.gt({ obj: { src: const.threat_signals_key }, value: 0 }), - ]), - transform: sub.tf.send.aws.s3( - settings={ - bucket_name: bucket, - file_path: { prefix: 'xdr', time_format: '2006/01/02', uuid: true, suffix: '.jsonl' }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, - } - ), - }, - ] }), - ], -} diff --git a/examples/terraform/aws/s3/xdr/config/node/const.libsonnet b/examples/terraform/aws/s3/xdr/config/node/const.libsonnet deleted file mode 100644 index 300a283a..00000000 --- a/examples/terraform/aws/s3/xdr/config/node/const.libsonnet +++ /dev/null @@ -1,16 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - threat_signals_key: 'threat.signals', - // threat_signal is a custom function that appends a threat signal to an - // event as enrichment metadata. - // - // An alternate approach is to compose a new threat signal event within - // the message metadata and send it as a separate event. This results in - // smaller events with less context and requires a correlation value - // (e.g., hash, ID) to link the threat signal to the original event. - threat_signal(settings): sub.tf.obj.insert({ - obj: { trg: sub.helpers.obj.append_array($.threat_signals_key) }, - value: { name: settings.name, description: settings.description, risk_score: settings.risk_score }, - }), -} diff --git a/examples/terraform/aws/s3/xdr/data.jsonl b/examples/terraform/aws/s3/xdr/data.jsonl deleted file mode 100644 index 8cc71d03..00000000 --- a/examples/terraform/aws/s3/xdr/data.jsonl +++ /dev/null @@ -1,4 +0,0 @@ -{"user_name":"alice","source_ip":"192.168.1.2"} -{"user_name":"admin","source_ip":"192.168.1.3"} -{"user_name":"bob","source_ip":"3.1.1.2"} -{"user_name":"root","source_ip":"3.1.1.3"} diff --git a/examples/terraform/aws/s3/xdr/stdout.jsonl b/examples/terraform/aws/s3/xdr/stdout.jsonl deleted file mode 100644 index 85d9f34e..00000000 --- a/examples/terraform/aws/s3/xdr/stdout.jsonl +++ /dev/null @@ -1,2 +0,0 @@ -{"user_name":"root","source_ip":"192.168.1.3","threat":{"signals":[{"description":"Root user activity detected.","name":"root_activity","risk_score":74}]}} -{"user_name":"admin","source_ip":"3.1.1.3","threat":{"signals":[{"description":"Public IP root or admin user activity detected.","name":"public_ip_root_admin_activity","risk_score":99}]}} diff --git a/examples/terraform/aws/s3/xdr/terraform/_resources.tf b/examples/terraform/aws/s3/xdr/terraform/_resources.tf deleted file mode 100644 index a5c87d5f..00000000 --- a/examples/terraform/aws/s3/xdr/terraform/_resources.tf +++ /dev/null @@ -1,50 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -# Repository for the core Substation application. -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -resource "random_uuid" "s3" {} - -# Monolithic S3 bucket used to store all data. -module "s3" { - source = "../../../../../../build/terraform/aws/s3" - - config = { - # Bucket name is randomized to avoid collisions. - name = "substation-${random_uuid.s3.result}" - } - - access = [ - module.lambda_node.role.name, - ] -} - -resource "aws_s3_bucket_notification" "bucket_notification" { - bucket = module.s3.id - - lambda_function { - lambda_function_arn = module.lambda_node.arn - events = ["s3:ObjectCreated:*"] - filter_prefix = "data/" - } - - depends_on = [aws_lambda_permission.allow_bucket] -} diff --git a/examples/terraform/aws/s3/xdr/terraform/node.tf b/examples/terraform/aws/s3/xdr/terraform/node.tf deleted file mode 100644 index acef20f6..00000000 --- a/examples/terraform/aws/s3/xdr/terraform/node.tf +++ /dev/null @@ -1,30 +0,0 @@ -module "lambda_node" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "node" - description = "Substation node that reads and writes data to S3." - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/node" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_S3" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_permission" "allow_bucket" { - statement_id = "AllowExecutionFromS3Bucket" - action = "lambda:InvokeFunction" - function_name = module.lambda_node.name - principal = "s3.amazonaws.com" - source_arn = module.s3.arn -} diff --git a/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet b/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet deleted file mode 100644 index 89507de6..00000000 --- a/examples/terraform/aws/sns/pub_sub/config/client/config.jsonnet +++ /dev/null @@ -1,11 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.aws.sns( - // This is a placeholder that must be replaced with the SNS ARN produced by Terraform. - settings={ arn: 'arn:aws:sns:us-east-1:123456789012:substation' }, - ), - ], -} diff --git a/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet b/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/sns/pub_sub/config/subscriber_x/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet b/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/sns/pub_sub/config/subscriber_y/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet b/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet deleted file mode 100644 index bb84b999..00000000 --- a/examples/terraform/aws/sns/pub_sub/config/subscriber_z/config.jsonnet +++ /dev/null @@ -1,8 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - sub.tf.send.stdout(), - ], -} diff --git a/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf b/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf deleted file mode 100644 index 5ffa1a86..00000000 --- a/examples/terraform/aws/sns/pub_sub/terraform/_resources.tf +++ /dev/null @@ -1,35 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "sns" { - source = "../../../../../../build/terraform/aws/sns" - - config = { - name = "substation" - } - - access = [ - module.subscriber_x.role.name, - module.subscriber_y.role.name, - module.subscriber_z.role.name, - ] -} diff --git a/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf b/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf deleted file mode 100644 index 33ed4d74..00000000 --- a/examples/terraform/aws/sns/pub_sub/terraform/subscribers.tf +++ /dev/null @@ -1,121 +0,0 @@ -module "subscriber_x" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_x" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_x" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_x" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_x.arn -} - -resource "aws_lambda_permission" "subscriber_x" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_x.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_x.name - ] -} - -module "subscriber_y" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_y" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_y" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_y" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_y.arn -} - -resource "aws_lambda_permission" "subscriber_y" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_y.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_y.name - ] -} - - -module "subscriber_z" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "subscriber_z" - description = "Substation node that subscribes to SNS" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/subscriber_z" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SNS" - "SUBSTATION_DEBUG" : true - } - } - - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_sns_topic_subscription" "subscriber_z" { - topic_arn = module.sns.arn - protocol = "lambda" - endpoint = module.subscriber_z.arn -} - -resource "aws_lambda_permission" "subscriber_z" { - statement_id = "AllowExecutionFromSNS" - action = "lambda:InvokeFunction" - function_name = module.subscriber_z.name - principal = "sns.amazonaws.com" - source_arn = module.sns.arn - - depends_on = [ - module.subscriber_z.name - ] -} diff --git a/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet b/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet deleted file mode 100644 index 37157bb6..00000000 --- a/examples/terraform/aws/sqs/microservice/config/frontend/config.jsonnet +++ /dev/null @@ -1,25 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - transforms: [ - // Dynamically handles input from either Lambda URL or synchronous invocation. - sub.pattern.transform.conditional( - condition=sub.condition.all([ - sub.condition.number.length.greater_than( - settings={ object: { source_key: 'body' }, value: 0 } - ), - ]), - transform=sub.transform.object.copy( - settings={ object: { source_key: 'body' } } - ), - ), - // This UUID is used by the client to retrieve the processed result from DynamoDB. - sub.transform.string.uuid( - settings={ object: { target_key: 'uuid' } }, - ), - sub.transform.send.aws.sqs( - // This is a placeholder that must be replaced with the SQS ARN produced by Terraform. - settings={ arn: 'arn:aws:sqs:us-east-1:123456789012:substation' }, - ), - ], -} diff --git a/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet b/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet deleted file mode 100644 index bc3ea243..00000000 --- a/examples/terraform/aws/sqs/microservice/config/microservice/config.jsonnet +++ /dev/null @@ -1,29 +0,0 @@ -local sub = import '../../../../../../../build/config/substation.libsonnet'; - -{ - concurrency: 1, - transforms: [ - // Remove any events that do not have a 'uuid' field. - sub.pattern.transform.conditional( - condition=sub.condition.all(sub.pattern.condition.number.length.eq_zero(key='uuid')), - transform=sub.transform.utility.drop(), - ), - // Performs a reverse DNS lookup on the 'addr' field if it is a public IP address. - sub.pattern.transform.conditional( - condition=sub.condition.none(sub.pattern.condition.network.ip.internal(key='addr')), - transform=sub.transform.enrich.dns.ip_lookup( - settings={ object: { source_key: 'addr', target_key: 'domain' } }, - ), - ), - // The uuid field is used as the partition key for the DynamoDB table. - sub.transform.object.copy( - settings={ object: { source_key: 'uuid', target_key: 'PK' } } - ), - sub.transform.object.delete( - settings={ object: { source_key: 'uuid' } } - ), - sub.transform.send.aws.dynamodb( - settings={ table_name: 'substation' } - ), - ], -} diff --git a/examples/terraform/aws/sqs/microservice/terraform/_resources.tf b/examples/terraform/aws/sqs/microservice/terraform/_resources.tf deleted file mode 100644 index 9a00233b..00000000 --- a/examples/terraform/aws/sqs/microservice/terraform/_resources.tf +++ /dev/null @@ -1,56 +0,0 @@ -data "aws_caller_identity" "caller" {} - -module "appconfig" { - source = "../../../../../../build/terraform/aws/appconfig" - - config = { - name = "substation" - environments = [{ - name = "example" - }] - } -} - -module "ecr" { - source = "../../../../../../build/terraform/aws/ecr" - - config = { - name = "substation" - force_delete = true - } -} - -module "sqs" { - source = "../../../../../../build/terraform/aws/sqs" - - config = { - name = "substation" - } - - access = [ - # Reads from SQS. - module.microservice.role.name, - # Writes to SQS. - module.frontend.role.name, - ] -} - -module "dynamodb" { - source = "../../../../../../build/terraform/aws/dynamodb" - - config = { - name = "substation" - hash_key = "PK" - attributes = [ - { - name = "PK" - type = "S" - } - ] - } - - access = [ - # Writes to DynamoDB. - module.microservice.role.name, - ] -} diff --git a/examples/terraform/aws/sqs/microservice/terraform/frontend.tf b/examples/terraform/aws/sqs/microservice/terraform/frontend.tf deleted file mode 100644 index 924c7e6c..00000000 --- a/examples/terraform/aws/sqs/microservice/terraform/frontend.tf +++ /dev/null @@ -1,26 +0,0 @@ -module "frontend" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "frontend" - description = "Substation node that acts as a frontend to an asynchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/frontend" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_LAMBDA" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_function_url" "frontend" { - function_name = module.frontend.name - authorization_type = "NONE" -} diff --git a/examples/terraform/aws/sqs/microservice/terraform/microservice.tf b/examples/terraform/aws/sqs/microservice/terraform/microservice.tf deleted file mode 100644 index 327575dd..00000000 --- a/examples/terraform/aws/sqs/microservice/terraform/microservice.tf +++ /dev/null @@ -1,31 +0,0 @@ -module "microservice" { - source = "../../../../../../build/terraform/aws/lambda" - appconfig = module.appconfig - - config = { - name = "microservice" - description = "Substation node that acts as an asynchronous microservice" - image_uri = "${module.ecr.url}:v1.3.0" - image_arm = true - - memory = 128 - timeout = 10 - env = { - "SUBSTATION_CONFIG" : "http://localhost:2772/applications/substation/environments/example/configurations/microservice" - "SUBSTATION_LAMBDA_HANDLER" : "AWS_SQS" - "SUBSTATION_DEBUG" : true - } - } - - depends_on = [ - module.appconfig.name, - module.ecr.url, - ] -} - -resource "aws_lambda_event_source_mapping" "microservice" { - event_source_arn = module.sqs.arn - function_name = module.microservice.arn - maximum_batching_window_in_seconds = 10 - batch_size = 100 -} diff --git a/examples/config/transform/aggregate/sample/config.jsonnet b/examples/transform/aggregate/sample/config.jsonnet similarity index 62% rename from examples/config/transform/aggregate/sample/config.jsonnet rename to examples/transform/aggregate/sample/config.jsonnet index 23713e2f..d728682d 100644 --- a/examples/config/transform/aggregate/sample/config.jsonnet +++ b/examples/transform/aggregate/sample/config.jsonnet @@ -1,7 +1,7 @@ // This example samples data by aggregating events into an array, then // selecting the first event in the array as a sample. The sampling rate // is 1/N, where N is the count of events in the buffer. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -9,23 +9,24 @@ local sub = import '../../../../../build/config/substation.libsonnet'; // Events are aggregated into an array. This example has a sample // rate of up to 1/10. By default, the sample rate will be lower if // fewer than 10 events are processed by Substation. - sub.tf.aggregate.to.array({ object: { target_key: 'sample' }, batch: { count: 10 } }), + sub.tf.aggregate.to.array({ object: { target_key: 'meta sample' }, batch: { count: 10 } }), // A strict sample rate can be enforced by dropping any events that // contain the `sample` key, but do not have a length of 10. sub.tf.meta.switch(settings={ cases: [ { - condition: sub.cnd.any(sub.cnd.num.len.eq({ object: { source_key: 'sample' }, value: 10 })), + condition: sub.cnd.num.len.eq({ object: { source_key: 'meta sample' }, value: 10 }), transforms: [ - sub.tf.object.copy({ object: { source_key: 'sample.0' } }), + sub.tf.object.copy({ object: { source_key: 'meta sample.0' } }), ], }, { - condition: sub.cnd.any(sub.cnd.num.len.gt({ object: { source_key: 'sample' }, value: 0 })), + condition: sub.cnd.num.len.gt({ object: { source_key: 'meta sample' }, value: 0 }), transforms: [ sub.tf.util.drop(), ], }, ] }), + sub.tf.obj.cp({ object: { source_key: 'meta sample.0' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/aggregate/sample/data.jsonl b/examples/transform/aggregate/sample/data.jsonl similarity index 100% rename from examples/config/transform/aggregate/sample/data.jsonl rename to examples/transform/aggregate/sample/data.jsonl diff --git a/examples/transform/aggregate/sample/stdout.txt b/examples/transform/aggregate/sample/stdout.txt new file mode 100644 index 00000000..6c03b134 --- /dev/null +++ b/examples/transform/aggregate/sample/stdout.txt @@ -0,0 +1 @@ +{"c":"d"} diff --git a/examples/config/transform/aggregate/summarize/config.jsonnet b/examples/transform/aggregate/summarize/config.jsonnet similarity index 95% rename from examples/config/transform/aggregate/summarize/config.jsonnet rename to examples/transform/aggregate/summarize/config.jsonnet index 7634782b..7c9031e4 100644 --- a/examples/config/transform/aggregate/summarize/config.jsonnet +++ b/examples/transform/aggregate/summarize/config.jsonnet @@ -1,7 +1,7 @@ // This example reduces data by summarizing multiple network events into a single event, // simulating the behavior of flow records. This technique can be used to reduce // any JSON data that contains common fields, not just network events. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { transforms: [ diff --git a/examples/config/transform/aggregate/summarize/data.jsonl b/examples/transform/aggregate/summarize/data.jsonl similarity index 100% rename from examples/config/transform/aggregate/summarize/data.jsonl rename to examples/transform/aggregate/summarize/data.jsonl diff --git a/examples/config/transform/aggregate/summarize/stdout.jsonl b/examples/transform/aggregate/summarize/stdout.txt similarity index 100% rename from examples/config/transform/aggregate/summarize/stdout.jsonl rename to examples/transform/aggregate/summarize/stdout.txt index b04a0539..06a2f1f1 100644 --- a/examples/config/transform/aggregate/summarize/stdout.jsonl +++ b/examples/transform/aggregate/summarize/stdout.txt @@ -1,5 +1,5 @@ -{"client":"10.1.1.4","server":"1.2.3.4","timestamp":1674429054,"count":1,"bytes_total":2400} -{"client":"10.1.1.3","server":"8.8.4.4","timestamp":1674429064,"count":2,"bytes_total":40} {"client":"10.1.1.2","server":"8.8.8.8","timestamp":1674429067,"count":4,"bytes_total":72} -{"client":"10.1.1.2","server":"8.8.4.4","timestamp":1674429065,"count":6,"bytes_total":112} {"client":"10.1.1.3","server":"8.8.8.8","timestamp":1674429066,"count":6,"bytes_total":52} +{"client":"10.1.1.2","server":"8.8.4.4","timestamp":1674429065,"count":6,"bytes_total":112} +{"client":"10.1.1.4","server":"1.2.3.4","timestamp":1674429054,"count":1,"bytes_total":2400} +{"client":"10.1.1.3","server":"8.8.4.4","timestamp":1674429064,"count":2,"bytes_total":40} diff --git a/examples/config/transform/array/extend/config.jsonnet b/examples/transform/array/extend/config.jsonnet similarity index 77% rename from examples/config/transform/array/extend/config.jsonnet rename to examples/transform/array/extend/config.jsonnet index 48a66608..c0a28134 100644 --- a/examples/config/transform/array/extend/config.jsonnet +++ b/examples/transform/array/extend/config.jsonnet @@ -1,5 +1,5 @@ // This example extends an array by appending and flattening values. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -8,6 +8,7 @@ local sub = import '../../../../../build/config/substation.libsonnet'; sub.tf.object.copy({ object: { source_key: 'z', target_key: 'a.-1' } }), // Flatten the array. sub.tf.object.copy({ object: { source_key: 'a|@flatten', target_key: 'a' } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/array/extend/data.json b/examples/transform/array/extend/data.json similarity index 100% rename from examples/config/transform/array/extend/data.json rename to examples/transform/array/extend/data.json diff --git a/examples/transform/array/extend/stdout.txt b/examples/transform/array/extend/stdout.txt new file mode 100644 index 00000000..f8cc26de --- /dev/null +++ b/examples/transform/array/extend/stdout.txt @@ -0,0 +1,4 @@ +{ + "a": [1, 2, 3, 4], + "z": [3, 4] +} diff --git a/examples/config/transform/array/flatten/config.jsonnet b/examples/transform/array/flatten/config.jsonnet similarity index 70% rename from examples/config/transform/array/flatten/config.jsonnet rename to examples/transform/array/flatten/config.jsonnet index d484e19d..2eac7700 100644 --- a/examples/config/transform/array/flatten/config.jsonnet +++ b/examples/transform/array/flatten/config.jsonnet @@ -1,11 +1,12 @@ // This example flattens an array of arrays. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, transforms: [ // Flatten by copying the value and chaining GJSON's `@flatten` operator. sub.tf.obj.cp({ object: { source_key: 'a|@flatten', target_key: 'a' } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/array/flatten/data.json b/examples/transform/array/flatten/data.json similarity index 100% rename from examples/config/transform/array/flatten/data.json rename to examples/transform/array/flatten/data.json diff --git a/examples/transform/array/flatten/stdout.txt b/examples/transform/array/flatten/stdout.txt new file mode 100644 index 00000000..41096f70 --- /dev/null +++ b/examples/transform/array/flatten/stdout.txt @@ -0,0 +1,3 @@ +{ + "a": [1, 2, 3, 4] +} diff --git a/examples/config/transform/array/flatten_deep/config.jsonnet b/examples/transform/array/flatten_deep/config.jsonnet similarity index 73% rename from examples/config/transform/array/flatten_deep/config.jsonnet rename to examples/transform/array/flatten_deep/config.jsonnet index 22afb0e0..ac726628 100644 --- a/examples/config/transform/array/flatten_deep/config.jsonnet +++ b/examples/transform/array/flatten_deep/config.jsonnet @@ -1,5 +1,5 @@ // This example flattens an array of arrays. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -7,6 +7,7 @@ local sub = import '../../../../../build/config/substation.libsonnet'; // Flatten by copying the value and chaining GJSON's `@flatten` operator // with the `deep` option. sub.tf.object.copy({ object: { source_key: 'a|@flatten:{"deep":true}', target_key: 'a' } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/array/flatten_deep/data.json b/examples/transform/array/flatten_deep/data.json similarity index 100% rename from examples/config/transform/array/flatten_deep/data.json rename to examples/transform/array/flatten_deep/data.json diff --git a/examples/transform/array/flatten_deep/stdout.txt b/examples/transform/array/flatten_deep/stdout.txt new file mode 100644 index 00000000..542900c5 --- /dev/null +++ b/examples/transform/array/flatten_deep/stdout.txt @@ -0,0 +1,3 @@ +{ + "a": [1, 2, 3, 4, 5, 6] +} diff --git a/examples/config/transform/array/group/config.jsonnet b/examples/transform/array/group/config.jsonnet similarity index 82% rename from examples/config/transform/array/group/config.jsonnet rename to examples/transform/array/group/config.jsonnet index 27ce4166..9edbf886 100644 --- a/examples/config/transform/array/group/config.jsonnet +++ b/examples/transform/array/group/config.jsonnet @@ -1,6 +1,6 @@ // This example groups an array of arrays into an array of objects // based on index and configured keys. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; local files_key = 'meta files'; @@ -15,7 +15,7 @@ local files_key = 'meta files'; // Elements from the file_name array are transformed and derived file extensions // are added to a new array. sub.tf.meta.for_each({ - object: { source_key: sub.helpers.object.get_element(files_key, 'file_name'), target_key: sub.helpers.object.append(files_key, 'file_extension') }, + object: { source_key: files_key + '.file_name', target_key: files_key + '.file_extension' }, transforms: [ sub.tf.string.capture(settings={ pattern: '\\.([^\\.]+)$' }), ], @@ -25,6 +25,7 @@ local files_key = 'meta files'; // // [{name: name1, type: type1, size: size1, extension: extension1}, {name: name2, type: type2, size: size2, extension: extension2}] sub.tf.object.cp({ object: { source_key: files_key + '|@group' } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/array/group/data.json b/examples/transform/array/group/data.json similarity index 100% rename from examples/config/transform/array/group/data.json rename to examples/transform/array/group/data.json diff --git a/examples/transform/array/group/stdout.txt b/examples/transform/array/group/stdout.txt new file mode 100644 index 00000000..8c9b507a --- /dev/null +++ b/examples/transform/array/group/stdout.txt @@ -0,0 +1,15 @@ +{"file_name":["foo.txt","bar.html"],"file_type":["text/plain","text/html"],"file_size":[100,500]} +[ + { + "file_name": "foo.txt", + "file_type": "text/plain", + "file_size": 100, + "file_extension": "txt" + }, + { + "file_name": "bar.html", + "file_type": "text/html", + "file_size": 500, + "file_extension": "html" + } +] diff --git a/examples/config/transform/enrich/http_secret/config.jsonnet b/examples/transform/enrich/http_secret/config.jsonnet similarity index 94% rename from examples/config/transform/enrich/http_secret/config.jsonnet rename to examples/transform/enrich/http_secret/config.jsonnet index 2d28d7a5..3b3d77ff 100644 --- a/examples/config/transform/enrich/http_secret/config.jsonnet +++ b/examples/transform/enrich/http_secret/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `utility_secret` transform to // retrieve a secret and reference it in a subsequent transform. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // The secret is retrieved from the environment variable named // `SUBSTATION_EXAMPLE_URL` and referenced in subsequent transforms using diff --git a/examples/transform/enrich/http_secret/data.json b/examples/transform/enrich/http_secret/data.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/examples/transform/enrich/http_secret/data.json @@ -0,0 +1 @@ +{} diff --git a/examples/config/transform/enrich/kvstore_csv/config.jsonnet b/examples/transform/enrich/kvstore_csv/config.jsonnet similarity index 80% rename from examples/config/transform/enrich/kvstore_csv/config.jsonnet rename to examples/transform/enrich/kvstore_csv/config.jsonnet index f648ef54..1b5fa569 100644 --- a/examples/config/transform/enrich/kvstore_csv/config.jsonnet +++ b/examples/transform/enrich/kvstore_csv/config.jsonnet @@ -1,10 +1,10 @@ -// This example shows how to use the `enrich_kv_store_item_get` transform +// This example shows how to use the `enrich_kv_store_item_get` transform // to lookup data in a KV store backed by a CSV file. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // This CSV file must be local to the Substation app. Absolute paths are // recommended. Files accessible over HTTPS and hosted in AWS S3 also work. -// +// // The `column` parameter is required and specifies the column in the CSV file // that will be used to lookup the key in the KV store. local kv = sub.kv_store.csv_file({ file: 'kv.csv', column: 'product' }); @@ -14,9 +14,10 @@ local kv = sub.kv_store.csv_file({ file: 'kv.csv', column: 'product' }); // The CSV file KV store returns the entire row minus the key column. // For example, this returns {"price":"9.99","calories":"500"} for "churro". sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'product', target_key: 'info'}, + object: { source_key: 'product', target_key: 'info' }, kv_store: kv, }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/enrich/kvstore_csv/data.jsonl b/examples/transform/enrich/kvstore_csv/data.jsonl similarity index 100% rename from examples/config/transform/enrich/kvstore_csv/data.jsonl rename to examples/transform/enrich/kvstore_csv/data.jsonl diff --git a/examples/config/transform/enrich/kvstore_csv/kv.csv b/examples/transform/enrich/kvstore_csv/kv.csv similarity index 100% rename from examples/config/transform/enrich/kvstore_csv/kv.csv rename to examples/transform/enrich/kvstore_csv/kv.csv diff --git a/examples/transform/enrich/kvstore_csv/stdout.txt b/examples/transform/enrich/kvstore_csv/stdout.txt new file mode 100644 index 00000000..88d0c953 --- /dev/null +++ b/examples/transform/enrich/kvstore_csv/stdout.txt @@ -0,0 +1,7 @@ +{ + "product": "churro", + "info": { + "calories": "500", + "price": "9.99" + } +} diff --git a/examples/config/transform/enrich/kvstore_json/config.jsonnet b/examples/transform/enrich/kvstore_json/config.jsonnet similarity index 70% rename from examples/config/transform/enrich/kvstore_json/config.jsonnet rename to examples/transform/enrich/kvstore_json/config.jsonnet index 55335dc6..723b5902 100644 --- a/examples/config/transform/enrich/kvstore_json/config.jsonnet +++ b/examples/transform/enrich/kvstore_json/config.jsonnet @@ -1,6 +1,6 @@ -// This example shows how to use the `enrich_kv_store_item_get` transform +// This example shows how to use the `enrich_kv_store_item_get` transform // to lookup data in a KV store backed by a JSON file. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // This JSON file must be local to the Substation app. Absolute paths are // recommended. Files accessible over HTTPS and hosted in AWS S3 also work. @@ -9,9 +9,10 @@ local kv = sub.kv_store.json_file({ file: 'kv.json' }); { transforms: [ sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'product', target_key: 'price'}, + object: { source_key: 'product', target_key: 'price' }, kv_store: kv, }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/enrich/kvstore_json/data.jsonl b/examples/transform/enrich/kvstore_json/data.jsonl similarity index 100% rename from examples/config/transform/enrich/kvstore_json/data.jsonl rename to examples/transform/enrich/kvstore_json/data.jsonl diff --git a/examples/transform/enrich/kvstore_json/stdout.txt b/examples/transform/enrich/kvstore_json/stdout.txt new file mode 100644 index 00000000..46724035 --- /dev/null +++ b/examples/transform/enrich/kvstore_json/stdout.txt @@ -0,0 +1,4 @@ +{ + "product": "churro", + "price": 9.99 +} diff --git a/examples/config/transform/enrich/kvstore_set_add/config.jsonnet b/examples/transform/enrich/kvstore_set_add/config.jsonnet similarity index 70% rename from examples/config/transform/enrich/kvstore_set_add/config.jsonnet rename to examples/transform/enrich/kvstore_set_add/config.jsonnet index 01910d05..1ca44d5a 100644 --- a/examples/config/transform/enrich/kvstore_set_add/config.jsonnet +++ b/examples/transform/enrich/kvstore_set_add/config.jsonnet @@ -1,7 +1,7 @@ -// This example shows how to use the `enrich_kv_store_set_add` transform +// This example shows how to use the `enrich_kv_store_set_add` transform // to track data over time in a KV store. The sample data contains food // orders and is indexed by each customer's email address. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // Default Memory store is used. local mem = sub.kv_store.memory(); @@ -10,19 +10,18 @@ local mem = sub.kv_store.memory(); transforms: [ // Each order is stored in memory indexed by the customer's email // address and printed to stdout. Only unique orders are stored. - sub.tf.enrich.kv_store.sadd({ - object: { source_key: 'customer', target_key: 'order'}, + sub.tf.enrich.kv_store.set.add({ + object: { source_key: 'customer', target_key: 'order' }, kv_store: mem, ttl_offset: '10s', }), - sub.tf.send.stdout(), - // Each message has the list added to its object. The list grows // as orders are added to the store above. sub.tf.enrich.kv_store.item.get({ - object: { source_key: 'customer', target_key: 'kv_store'}, + object: { source_key: 'customer', target_key: 'kv_store' }, kv_store: mem, }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/enrich/kvstore_set_add/data.jsonl b/examples/transform/enrich/kvstore_set_add/data.jsonl similarity index 100% rename from examples/config/transform/enrich/kvstore_set_add/data.jsonl rename to examples/transform/enrich/kvstore_set_add/data.jsonl diff --git a/examples/transform/enrich/kvstore_set_add/stdout.txt b/examples/transform/enrich/kvstore_set_add/stdout.txt new file mode 100644 index 00000000..76487eca --- /dev/null +++ b/examples/transform/enrich/kvstore_set_add/stdout.txt @@ -0,0 +1,41 @@ +{ + "date": "2021-01-07", + "customer": "alice@brex.com", + "order": "pizza", + "kv_store": ["pizza"] +} + +{ + "date": "2021-01-01", + "customer": "alice@brex.com", + "order": "pizza", + "kv_store": ["pizza"] +} + +{ + "date": "2021-01-03", + "customer": "bob@brex.com", + "order": "pizza", + "kv_store": ["burger", "pizza"] +} + +{ + "date": "2021-01-07", + "customer": "bob@brex.com", + "order": "burger", + "kv_store": ["burger", "pizza"] +} + +{ + "date": "2021-01-01", + "customer": "bob@brex.com", + "order": "burger", + "kv_store": ["burger", "pizza"] +} + +{ + "date": "2021-01-13", + "customer": "alice@brex.com", + "order": "pizza", + "kv_store": ["pizza"] +} diff --git a/examples/transform/enrich/mmdb/config.jsonnet b/examples/transform/enrich/mmdb/config.jsonnet new file mode 100644 index 00000000..9097fedc --- /dev/null +++ b/examples/transform/enrich/mmdb/config.jsonnet @@ -0,0 +1,14 @@ +local sub = import '../../../../substation.libsonnet'; + +local city = sub.kv_store.mmdb({ file: 'https://gist.github.com/jshlbrd/59641ccc71ba2873fb204ac44d101640/raw/3ad0e8c09563c614c50de4671caef8c1983cbb4d/GeoLite2-City.mmdb' }); + +local asn = sub.kv_store.mmdb({ file: 'https://gist.github.com/jshlbrd/59641ccc71ba2873fb204ac44d101640/raw/3ad0e8c09563c614c50de4671caef8c1983cbb4d/GeoLite2-ASN.mmdb' }); + +{ + transforms: [ + sub.tf.enrich.kv_store.item.get({ object: { source_key: 'ip', target_key: 'city' }, kv_store: city }), + sub.tf.enrich.kv_store.item.get({ object: { source_key: 'ip', target_key: 'asn' }, kv_store: asn }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), + sub.tf.send.stdout(), + ], +} diff --git a/examples/config/transform/enrich/mmdb/data.jsonl b/examples/transform/enrich/mmdb/data.jsonl similarity index 100% rename from examples/config/transform/enrich/mmdb/data.jsonl rename to examples/transform/enrich/mmdb/data.jsonl diff --git a/examples/transform/enrich/mmdb/stdout.txt b/examples/transform/enrich/mmdb/stdout.txt new file mode 100644 index 00000000..c14c9930 --- /dev/null +++ b/examples/transform/enrich/mmdb/stdout.txt @@ -0,0 +1,172 @@ +{ + "ip": "8.8.8.8", + "city": { + "continent": { + "code": "NA", + "geoname_id": 6255149, + "names": { + "de": "Nordamerika", + "en": "North America", + "es": "Norteamérica", + "fr": "Amérique du Nord", + "ja": "北アメリカ", + "pt-BR": "América do Norte", + "ru": "Северная Америка", + "zh-CN": "北美洲" + } + }, + "country": { + "geoname_id": 6252001, + "iso_code": "US", + "names": { + "de": "Vereinigte Staaten", + "en": "United States", + "es": "Estados Unidos", + "fr": "États Unis", + "ja": "アメリカ", + "pt-BR": "EUA", + "ru": "США", + "zh-CN": "美国" + } + }, + "location": { + "accuracy_radius": 1000, + "latitude": 37.751, + "longitude": -97.822, + "time_zone": "America/Chicago" + }, + "registered_country": { + "geoname_id": 6252001, + "iso_code": "US", + "names": { + "de": "Vereinigte Staaten", + "en": "United States", + "es": "Estados Unidos", + "fr": "États Unis", + "ja": "アメリカ", + "pt-BR": "EUA", + "ru": "США", + "zh-CN": "美国" + } + } + }, + "asn": { + "autonomous_system_number": 15169, + "autonomous_system_organization": "GOOGLE" + } +} + +{ + "ip": "9.9.9.9", + "city": { + "city": { + "geoname_id": 5327684, + "names": { + "de": "Berkeley", + "en": "Berkeley", + "es": "Berkeley", + "fr": "Berkeley", + "ja": "バークリー", + "pt-BR": "Berkeley", + "ru": "Беркли", + "zh-CN": "伯克利" + } + }, + "continent": { + "code": "NA", + "geoname_id": 6255149, + "names": { + "de": "Nordamerika", + "en": "North America", + "es": "Norteamérica", + "fr": "Amérique du Nord", + "ja": "北アメリカ", + "pt-BR": "América do Norte", + "ru": "Северная Америка", + "zh-CN": "北美洲" + } + }, + "country": { + "geoname_id": 6252001, + "iso_code": "US", + "names": { + "de": "Vereinigte Staaten", + "en": "United States", + "es": "Estados Unidos", + "fr": "États Unis", + "ja": "アメリカ", + "pt-BR": "EUA", + "ru": "США", + "zh-CN": "美国" + } + }, + "location": { + "accuracy_radius": 20, + "latitude": 37.8767, + "longitude": -122.2676, + "metro_code": 807, + "time_zone": "America/Los_Angeles" + }, + "postal": { + "code": "94709" + }, + "registered_country": { + "geoname_id": 2658434, + "iso_code": "CH", + "names": { + "de": "Schweiz", + "en": "Switzerland", + "es": "Suiza", + "fr": "Suisse", + "ja": "スイス連邦", + "pt-BR": "Suíça", + "ru": "Швейцария", + "zh-CN": "瑞士" + } + }, + "subdivisions": [ + { + "geoname_id": 5332921, + "iso_code": "CA", + "names": { + "de": "Kalifornien", + "en": "California", + "es": "California", + "fr": "Californie", + "ja": "カリフォルニア州", + "pt-BR": "Califórnia", + "ru": "Калифорния", + "zh-CN": "加州" + } + } + ] + }, + "asn": { + "autonomous_system_number": 19281, + "autonomous_system_organization": "QUAD9-AS-1" + } +} + +{ + "ip": "1.1.1.1", + "city": { + "registered_country": { + "geoname_id": 2077456, + "iso_code": "AU", + "names": { + "de": "Australien", + "en": "Australia", + "es": "Australia", + "fr": "Australie", + "ja": "オーストラリア", + "pt-BR": "Austrália", + "ru": "Австралия", + "zh-CN": "澳大利亚" + } + } + }, + "asn": { + "autonomous_system_number": 13335, + "autonomous_system_organization": "CLOUDFLARENET" + } +} diff --git a/examples/transform/enrich/urlscan/config.jsonnet b/examples/transform/enrich/urlscan/config.jsonnet new file mode 100644 index 00000000..c7c7c4fe --- /dev/null +++ b/examples/transform/enrich/urlscan/config.jsonnet @@ -0,0 +1,52 @@ +// This example shows how to make scan requests and retrieve +// results using the urlscan API (https://urlscan.io/docs/api/). +local sub = import '../../../../substation.libsonnet'; + +local headers = { 'API-Key': '${SECRET:URLSCAN}', 'Content-Type': 'application/json' }; + +{ + transforms: [ + // Retrieve the urlscan API key from the secrets store. + // (Never put a secret directly into a configuration.) + sub.transform.utility.secret({ + // The API key is stored in an environment variable named + // `URLSCAN_API_KEY`. + secret: sub.secrets.environment_variable({ id: 'URLSCAN', name: 'URLSCAN_API_KEY' }), + }), + // Sends a scan request and waits for the result. This + // follows recommended practices from the urlscan API docs, + // and will try to fetch the result up to 3 times over 15s. + // If there are no results after retrying, then the unmodified + // message is sent to stdout. + sub.tf.enrich.http.post({ + object: { body_key: '@this', target_key: 'meta response' }, + url: 'https://urlscan.io/api/v1/scan/', + headers: headers, + }), + sub.tf.util.delay({ duration: '5s' }), + sub.tf.meta.err({ + error_messages: ['retry limit reached'], // Errors are caught in case the retry limit is reached. + transforms: [ + sub.tf.meta.retry({ + // This condition runs on the result of the transforms. If + // it returns false, then the transforms are retried until + // it returns true or the retry settings are exhausted. + condition: sub.cnd.all([ + sub.cnd.num.len.gt({ object: { source_key: 'meta result.task.time' }, value: 0 }), + ]), + transforms: [ + sub.tf.enrich.http.get({ + object: { source_key: 'meta response.uuid', target_key: 'meta result' }, + url: 'https://urlscan.io/api/v1/result/${DATA}', // DATA is the value of the source_key. + headers: headers, + }), + ], + retry: { delay: '5s', count: 3 }, // Retry up to 3 times with a 5 second delay (5s, 5s, 5s). + }), + ], + }), + sub.tf.obj.cp({ object: { source_key: 'meta result' } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), + sub.tf.send.stdout({ batch: { size: 1000 * 1000 * 5 } }), // 5MB (the results can be large). + ], +} diff --git a/examples/config/transform/enrich/urlscan/data.jsonl b/examples/transform/enrich/urlscan/data.json similarity index 100% rename from examples/config/transform/enrich/urlscan/data.jsonl rename to examples/transform/enrich/urlscan/data.json diff --git a/examples/config/transform/format/zip/config.jsonnet b/examples/transform/format/zip/config.jsonnet similarity index 87% rename from examples/config/transform/format/zip/config.jsonnet rename to examples/transform/format/zip/config.jsonnet index 85f8fae2..5a379018 100644 --- a/examples/config/transform/format/zip/config.jsonnet +++ b/examples/transform/format/zip/config.jsonnet @@ -1,8 +1,8 @@ // This example shows how to unzip a file and send the contents to stdout. -// Add the two data files in this directory to a Zip file and send it to +// Add the two data files in this directory to a Zip file and send it to // Substation. You can use this command to create the Zip file: // zip data.zip data.jsonl data.csv -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { transforms: [ @@ -13,5 +13,5 @@ local sub = import '../../../../../build/config/substation.libsonnet'; sub.tf.agg.from.string({ separator: '\n' }), // Send the messages to stdout. sub.tf.send.stdout(), - ] + ], } diff --git a/examples/config/transform/format/zip/data.csv b/examples/transform/format/zip/data.csv similarity index 100% rename from examples/config/transform/format/zip/data.csv rename to examples/transform/format/zip/data.csv diff --git a/examples/config/transform/format/zip/data.jsonl b/examples/transform/format/zip/data.jsonl similarity index 100% rename from examples/config/transform/format/zip/data.jsonl rename to examples/transform/format/zip/data.jsonl diff --git a/examples/transform/format/zip/stdout.txt b/examples/transform/format/zip/stdout.txt new file mode 100644 index 00000000..5f8d1265 --- /dev/null +++ b/examples/transform/format/zip/stdout.txt @@ -0,0 +1,6 @@ +foo,bar +baz,qux +quux,corge +{"foo":"bar"} +{"baz":"qux"} +{"quux":"corge"} diff --git a/examples/config/transform/meta/crash_program/config.jsonnet b/examples/transform/meta/crash_program/config.jsonnet similarity index 54% rename from examples/config/transform/meta/crash_program/config.jsonnet rename to examples/transform/meta/crash_program/config.jsonnet index 242ec6e3..858a04f5 100644 --- a/examples/config/transform/meta/crash_program/config.jsonnet +++ b/examples/transform/meta/crash_program/config.jsonnet @@ -1,7 +1,7 @@ // This example shows how to intentionally crash a program if a transform // does not produce an output. This technique can be used to provide strict // guarantees about the result of data transformations. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // `key` is the target of the transform that may not produce an output and is // checked to determine if the transform was successful. @@ -9,22 +9,26 @@ local key = 'c'; { transforms: [ - // This conditional transform simulates a transform that may not produce an output. - sub.pattern.tf.conditional( - condition=sub.cnd.any(sub.cnd.utility.random()), - transform=sub.tf.obj.insert(settings={ object: { target_key: key }, value: true }), - ), + // This simulates a transform that may not produce an output. + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.utility.random(), + transforms: [ + sub.tf.obj.insert({ object: { target_key: key }, value: true }), + ], + }, + ] }), // If there is no output from the transform, then an error is thrown to crash the program. - sub.tf.meta.switch(settings={ cases: [ + sub.tf.meta.switch({ cases: [ { - condition: sub.cnd.any(sub.cnd.num.len.eq(settings={ object: { source_key: key }, value: 0 })), + condition: sub.cnd.num.len.eq(settings={ object: { source_key: key }, value: 0 }), transforms: [ - sub.tf.util.err(settings={ message: 'transform produced no output' }) + sub.tf.util.err(settings={ message: 'transform produced no output' }), ], }, - { + { transforms: [ - sub.tf.send.stdout() + sub.tf.send.stdout(), ], }, ] }), diff --git a/examples/config/data.json b/examples/transform/meta/crash_program/data.json similarity index 100% rename from examples/config/data.json rename to examples/transform/meta/crash_program/data.json diff --git a/examples/transform/meta/crash_program/stdout.txt b/examples/transform/meta/crash_program/stdout.txt new file mode 100644 index 00000000..a093b4e1 --- /dev/null +++ b/examples/transform/meta/crash_program/stdout.txt @@ -0,0 +1 @@ +panic: main: transform b3a47dd1-f67ef52a: transform produced no output diff --git a/examples/transform/meta/default_value/config.jsonnet b/examples/transform/meta/default_value/config.jsonnet new file mode 100644 index 00000000..8ae2e064 --- /dev/null +++ b/examples/transform/meta/default_value/config.jsonnet @@ -0,0 +1,18 @@ +local sub = import '../../../../substation.libsonnet'; + +{ + transforms: [ + sub.tf.object.insert({ object: { target_key: 'z' }, value: true }), + // This simulates a transform that may not produce an output. + sub.tf.meta.switch({ cases: [ + { + condition: sub.cnd.utility.random(), + transforms: [ + sub.tf.obj.insert({ object: { target_key: 'z' }, value: false }), + ], + }, + ] }), + sub.tf.object.copy({ source_key: '@pretty' }), + sub.tf.send.stdout(), + ], +} diff --git a/examples/config/transform/meta/crash_program/data.json b/examples/transform/meta/default_value/data.json similarity index 100% rename from examples/config/transform/meta/crash_program/data.json rename to examples/transform/meta/default_value/data.json diff --git a/examples/config/transform/meta/each_in_array/config.jsonnet b/examples/transform/meta/each_in_array/config.jsonnet similarity index 62% rename from examples/config/transform/meta/each_in_array/config.jsonnet rename to examples/transform/meta/each_in_array/config.jsonnet index 62a0d6ac..ef596e4a 100644 --- a/examples/config/transform/meta/each_in_array/config.jsonnet +++ b/examples/transform/meta/each_in_array/config.jsonnet @@ -1,20 +1,21 @@ -// This example shows how to use the `meta.for_each` transform to +// This example shows how to use the `meta.for_each` transform to // modify objects in an array. In this example, keys are removed // and added to each object in the array. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { transforms: [ sub.tf.meta.for_each({ object: { source_key: 'a', target_key: 'a' }, - // Multiple transforms can be applied in series to each object - // in the array by using the `meta.pipeline` transform. Otherwise, + // Multiple transforms can be applied in series to each object + // in the array by using the `meta.pipeline` transform. Otherwise, // use any individual transform to modify the object. - transform: sub.tf.meta.pipeline({ transforms: [ - sub.tf.object.delete({ object: { source_key: 'b'}}), - sub.tf.object.insert({ object: { target_key: 'z'}, value: true }), - ]}) + transforms: [ + sub.tf.object.delete({ object: { source_key: 'b' } }), + sub.tf.object.insert({ object: { target_key: 'z' }, value: true }), + ], }), + sub.tf.object.copy({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/meta/each_in_array/data.json b/examples/transform/meta/each_in_array/data.json similarity index 100% rename from examples/config/transform/meta/each_in_array/data.json rename to examples/transform/meta/each_in_array/data.json diff --git a/examples/transform/meta/each_in_array/stdout.txt b/examples/transform/meta/each_in_array/stdout.txt new file mode 100644 index 00000000..600c8c15 --- /dev/null +++ b/examples/transform/meta/each_in_array/stdout.txt @@ -0,0 +1,12 @@ +{ + "a": [ + { + "c": 2, + "z": true + }, + { + "c": 4, + "z": true + } + ] +} diff --git a/examples/config/transform/meta/exactly_once_consumer/config.jsonnet b/examples/transform/meta/exactly_once_consumer/config.jsonnet similarity index 89% rename from examples/config/transform/meta/exactly_once_consumer/config.jsonnet rename to examples/transform/meta/exactly_once_consumer/config.jsonnet index 9902fc58..67e58ec5 100644 --- a/examples/config/transform/meta/exactly_once_consumer/config.jsonnet +++ b/examples/transform/meta/exactly_once_consumer/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `meta_kv_store_lock` transform to // create an "exactly once" semantic for a pipeline consumer. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // In production environments a distributed KV store should be used. local kv = sub.kv_store.memory(); @@ -28,6 +28,7 @@ local kv = sub.kv_store.memory(); }, ] }), // At this point only locked messages exist in the pipeline. + sub.tf.object.copy({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/meta/exactly_once_consumer/data.jsonl b/examples/transform/meta/exactly_once_consumer/data.jsonl similarity index 100% rename from examples/config/transform/meta/exactly_once_consumer/data.jsonl rename to examples/transform/meta/exactly_once_consumer/data.jsonl diff --git a/examples/transform/meta/exactly_once_consumer/stdout.txt b/examples/transform/meta/exactly_once_consumer/stdout.txt new file mode 100644 index 00000000..deb69e5f --- /dev/null +++ b/examples/transform/meta/exactly_once_consumer/stdout.txt @@ -0,0 +1,11 @@ +{ + "a": "b" +} + +{ + "c": "d" +} + +{ + "e": "f" +} diff --git a/examples/config/transform/meta/exactly_once_producer/config.jsonnet b/examples/transform/meta/exactly_once_producer/config.jsonnet similarity index 57% rename from examples/config/transform/meta/exactly_once_producer/config.jsonnet rename to examples/transform/meta/exactly_once_producer/config.jsonnet index c7851700..1e440fdc 100644 --- a/examples/config/transform/meta/exactly_once_producer/config.jsonnet +++ b/examples/transform/meta/exactly_once_producer/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `meta_kv_store_lock` transform to // create an "exactly once" semantic for a pipeline producer. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // In production environments a distributed KV store should be used. local kv = sub.kv_store.memory(); @@ -11,13 +11,16 @@ local kv = sub.kv_store.memory(); // that fails to acquire a lock will be skipped. An error in the // sub-transform will cause all previously locked messages to be // unlocked. - sub.tf.meta.err({ transform: sub.tf.meta.kv_store.lock(settings={ - kv_store: kv, - prefix: 'eo_producer', - ttl_offset: '1m', - transforms: [ - sub.tf.send.stdout(), - ], - }) }), + sub.tf.meta.err({ transforms: [ + sub.tf.meta.kv_store.lock({ + kv_store: kv, + prefix: 'eo_producer', + ttl_offset: '1m', + transforms: [ + sub.tf.object.copy({ object: { source_key: '@pretty' } }), + sub.tf.send.stdout(), + ], + }), + ] }), ], } diff --git a/examples/config/transform/meta/exactly_once_producer/data.jsonl b/examples/transform/meta/exactly_once_producer/data.jsonl similarity index 100% rename from examples/config/transform/meta/exactly_once_producer/data.jsonl rename to examples/transform/meta/exactly_once_producer/data.jsonl diff --git a/examples/transform/meta/exactly_once_producer/stdout.txt b/examples/transform/meta/exactly_once_producer/stdout.txt new file mode 100644 index 00000000..deb69e5f --- /dev/null +++ b/examples/transform/meta/exactly_once_producer/stdout.txt @@ -0,0 +1,11 @@ +{ + "a": "b" +} + +{ + "c": "d" +} + +{ + "e": "f" +} diff --git a/examples/config/transform/meta/exactly_once_system/config.jsonnet b/examples/transform/meta/exactly_once_system/config.jsonnet similarity index 82% rename from examples/config/transform/meta/exactly_once_system/config.jsonnet rename to examples/transform/meta/exactly_once_system/config.jsonnet index 588d848a..b5a718ea 100644 --- a/examples/config/transform/meta/exactly_once_system/config.jsonnet +++ b/examples/transform/meta/exactly_once_system/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `meta_kv_store_lock` transform to // create an "exactly once" semantic for an entire pipeline system. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // In production environments a distributed KV store should be used. local kv = sub.kv_store.memory(); @@ -12,15 +12,16 @@ local kv = sub.kv_store.memory(); // An error in any sub-transform will cause all previously locked // messages to be unlocked. sub.tf.meta.err({ transforms: [ - sub.tf.meta.kv_store.lock(settings={ + sub.tf.meta.kv_store.lock({ kv_store: kv, prefix: 'eo_system', ttl_offset: '1m', transforms: [ sub.tf.obj.insert({ object: { target_key: 'processed' }, value: true }), + sub.tf.object.copy({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], }), - ]}), + ] }), ], } diff --git a/examples/config/transform/meta/exactly_once_system/data.jsonl b/examples/transform/meta/exactly_once_system/data.jsonl similarity index 100% rename from examples/config/transform/meta/exactly_once_system/data.jsonl rename to examples/transform/meta/exactly_once_system/data.jsonl diff --git a/examples/transform/meta/exactly_once_system/stdout.txt b/examples/transform/meta/exactly_once_system/stdout.txt new file mode 100644 index 00000000..468b2b2c --- /dev/null +++ b/examples/transform/meta/exactly_once_system/stdout.txt @@ -0,0 +1,14 @@ +{ + "a": "b", + "processed": true +} + +{ + "c": "d", + "processed": true +} + +{ + "e": "f", + "processed": true +} diff --git a/examples/config/transform/meta/execution_time/config.jsonnet b/examples/transform/meta/execution_time/config.jsonnet similarity index 95% rename from examples/config/transform/meta/execution_time/config.jsonnet rename to examples/transform/meta/execution_time/config.jsonnet index 6ea6278d..0984356d 100644 --- a/examples/config/transform/meta/execution_time/config.jsonnet +++ b/examples/transform/meta/execution_time/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `meta_metric_duration` transform to // measure the execution time of other transforms. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; local attr = { AppName: 'example' }; local dest = { type: 'aws_cloudwatch_embedded_metrics' }; @@ -37,7 +37,7 @@ local dest = { type: 'aws_cloudwatch_embedded_metrics' }; sub.transform.utility.delay({ duration: '100ms' }), sub.transform.utility.delay({ duration: '100ms' }), sub.transform.utility.delay({ duration: '100ms' }), - ] + ], }, ), ], diff --git a/examples/config/transform/meta/retry_with_backoff/data.json b/examples/transform/meta/execution_time/data.json similarity index 100% rename from examples/config/transform/meta/retry_with_backoff/data.json rename to examples/transform/meta/execution_time/data.json diff --git a/examples/transform/meta/execution_time/stdout.txt b/examples/transform/meta/execution_time/stdout.txt new file mode 100644 index 00000000..d8b7594c --- /dev/null +++ b/examples/transform/meta/execution_time/stdout.txt @@ -0,0 +1,3 @@ +{"_aws":{"Timestamp":1724300928469,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"ObjectCopyDuration"}]}]},"AppName":"example","ObjectCopyDuration":28209} +{"_aws":{"Timestamp":1724300928469,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"UtilityDelayDuration"}]}]},"AppName":"example","UtilityDelayDuration":101036000} +{"_aws":{"Timestamp":1724300928470,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"UtilityMultiDuration"}]}]},"AppName":"example","UtilityMultiDuration":303265541} diff --git a/examples/config/transform/meta/retry_with_backoff/config.jsonnet b/examples/transform/meta/retry_with_backoff/config.jsonnet similarity index 88% rename from examples/config/transform/meta/retry_with_backoff/config.jsonnet rename to examples/transform/meta/retry_with_backoff/config.jsonnet index ac6518cc..a2e52066 100644 --- a/examples/config/transform/meta/retry_with_backoff/config.jsonnet +++ b/examples/transform/meta/retry_with_backoff/config.jsonnet @@ -1,7 +1,7 @@ // This example shows how to implement retry with backoff behavior for any // transform that does not produce an output. This technique may be useful // when enriching data with external services or asynchronous data pipelines. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // `key` is the target of the transform that may not produce an output and is // checked to determine if the transform was successful. @@ -21,6 +21,7 @@ local cnd = sub.cnd.all([ condition: cnd, // If this returns false, then the transforms are retried. retry: { delay: '1s', count: 4 }, // Retry up to 4 times with a 1 second backoff (1s, 1s, 1s, 1s). }), + sub.tf.object.copy({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/transform/meta/retry_with_backoff/data.json b/examples/transform/meta/retry_with_backoff/data.json new file mode 100644 index 00000000..b6e81411 --- /dev/null +++ b/examples/transform/meta/retry_with_backoff/data.json @@ -0,0 +1 @@ +{"a":"b"} diff --git a/examples/transform/meta/retry_with_backoff/stdout.txt b/examples/transform/meta/retry_with_backoff/stdout.txt new file mode 100644 index 00000000..567a78fe --- /dev/null +++ b/examples/transform/meta/retry_with_backoff/stdout.txt @@ -0,0 +1,4 @@ +{ + "a": "b", + "c": true +} diff --git a/examples/transform/number/clamp/config.jsonnet b/examples/transform/number/clamp/config.jsonnet new file mode 100644 index 00000000..5067dd2e --- /dev/null +++ b/examples/transform/number/clamp/config.jsonnet @@ -0,0 +1,11 @@ +// This example shows how to clamp a number to a range. +local sub = import '../../../../substation.libsonnet'; + +{ + concurrency: 1, + transforms: [ + sub.tf.number.maximum({ value: 0 }), + sub.tf.number.minimum({ value: 100 }), + sub.tf.send.stdout(), + ], +} diff --git a/examples/config/transform/number/clamp/data.txt b/examples/transform/number/clamp/data.txt similarity index 100% rename from examples/config/transform/number/clamp/data.txt rename to examples/transform/number/clamp/data.txt diff --git a/examples/config/transform/number/clamp/stdout.txt b/examples/transform/number/clamp/stdout.txt similarity index 100% rename from examples/config/transform/number/clamp/stdout.txt rename to examples/transform/number/clamp/stdout.txt diff --git a/examples/config/transform/number/max/config.jsonnet b/examples/transform/number/max/config.jsonnet similarity index 78% rename from examples/config/transform/number/max/config.jsonnet rename to examples/transform/number/max/config.jsonnet index 42b3d359..74709096 100644 --- a/examples/config/transform/number/max/config.jsonnet +++ b/examples/transform/number/max/config.jsonnet @@ -1,6 +1,6 @@ // This example uses the `number_maximum` transform to return the larger // of two values, where one value is a constant and the other is a message. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, diff --git a/examples/config/transform/number/max/data.txt b/examples/transform/number/max/data.txt similarity index 100% rename from examples/config/transform/number/max/data.txt rename to examples/transform/number/max/data.txt diff --git a/examples/config/transform/number/max/stdout.txt b/examples/transform/number/max/stdout.txt similarity index 100% rename from examples/config/transform/number/max/stdout.txt rename to examples/transform/number/max/stdout.txt diff --git a/examples/config/transform/number/min/config.jsonnet b/examples/transform/number/min/config.jsonnet similarity index 78% rename from examples/config/transform/number/min/config.jsonnet rename to examples/transform/number/min/config.jsonnet index 672d7245..9a70e26f 100644 --- a/examples/config/transform/number/min/config.jsonnet +++ b/examples/transform/number/min/config.jsonnet @@ -1,6 +1,6 @@ // This example uses the `number_minimum` transform to return the smaller // of two values, where one value is a constant and the other is a message. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, diff --git a/examples/config/transform/number/min/data.txt b/examples/transform/number/min/data.txt similarity index 100% rename from examples/config/transform/number/min/data.txt rename to examples/transform/number/min/data.txt diff --git a/examples/config/transform/number/min/stdout.txt b/examples/transform/number/min/stdout.txt similarity index 100% rename from examples/config/transform/number/min/stdout.txt rename to examples/transform/number/min/stdout.txt diff --git a/examples/config/transform/send/aux_transforms/config.jsonnet b/examples/transform/send/aux_transforms/config.jsonnet similarity index 90% rename from examples/config/transform/send/aux_transforms/config.jsonnet rename to examples/transform/send/aux_transforms/config.jsonnet index 1e3ad904..3d726095 100644 --- a/examples/config/transform/send/aux_transforms/config.jsonnet +++ b/examples/transform/send/aux_transforms/config.jsonnet @@ -2,7 +2,7 @@ // are executed after the data is buffered and before it is sent. The // transforms applied inside of the send transform do not affect the data // sent through the main pipeline. All send transforms use this behavior. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -26,9 +26,8 @@ local sub = import '../../../../../build/config/substation.libsonnet'; // the additional transforms to compress and encode the file. aux_tforms: [ sub.tf.object.insert({ object: { target_key: 'transformed_by' }, value: 'send_file' }), - ] + sub.pattern.tf.fmt.jsonl + [ - // sub.tf.fmt.to.gzip(), - // sub.tf.fmt.to.base64(), + sub.tf.agg.to.string({ separator: '\n' }), + sub.tf.str.append({ suffix: '\n' }), ], }), // This transform is included to show that the data is not modified outside of diff --git a/examples/config/transform/send/aux_transforms/data.jsonl b/examples/transform/send/aux_transforms/data.jsonl similarity index 100% rename from examples/config/transform/send/aux_transforms/data.jsonl rename to examples/transform/send/aux_transforms/data.jsonl diff --git a/examples/transform/send/aux_transforms/stdout.txt b/examples/transform/send/aux_transforms/stdout.txt new file mode 100644 index 00000000..3358032f --- /dev/null +++ b/examples/transform/send/aux_transforms/stdout.txt @@ -0,0 +1,26 @@ +{"a":"b"} +{"c":"d"} +{"e":"f"} +{"g":"h"} +{"i":"j"} +{"k":"l"} +{"m":"n"} +{"o":"p"} +{"q":"r"} +{"s":"t"} +{"u":"v"} +{"w":"x"} +{"a":"b","transformed_by":"send_stdout"} +{"c":"d","transformed_by":"send_stdout"} +{"e":"f","transformed_by":"send_stdout"} +{"g":"h","transformed_by":"send_stdout"} +{"i":"j","transformed_by":"send_stdout"} +{"k":"l","transformed_by":"send_stdout"} +{"m":"n","transformed_by":"send_stdout"} +{"o":"p","transformed_by":"send_stdout"} +{"q":"r","transformed_by":"send_stdout"} +{"s":"t","transformed_by":"send_stdout"} +{"u":"v","transformed_by":"send_stdout"} +{"w":"x","transformed_by":"send_stdout"} +{"y":"z","transformed_by":"send_stdout"} +{"y":"z"} diff --git a/examples/config/transform/send/aws_s3_glacier/config.jsonnet b/examples/transform/send/aws_s3_glacier/config.jsonnet similarity index 76% rename from examples/config/transform/send/aws_s3_glacier/config.jsonnet rename to examples/transform/send/aws_s3_glacier/config.jsonnet index 4133b405..b9784e6f 100644 --- a/examples/config/transform/send/aws_s3_glacier/config.jsonnet +++ b/examples/transform/send/aws_s3_glacier/config.jsonnet @@ -2,7 +2,7 @@ // The Glacier Instant Retrieval class is recommended for archival data that is // compatible with Substation's serverless architecture; this class can be read // directly by a Lambda function triggered by an SNS notification. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -16,8 +16,12 @@ local sub = import '../../../../../build/config/substation.libsonnet'; storage_class: 'GLACIER_IR', // Glacier Instant Retrieval. // S3 objects are organized by time to the nearest hour and have a UUID filename. file_path: { time_format: '2006/01/02/15', uuid: true, suffix: '.jsonl.gz' }, - // This example formats the data as JSONL and compresses it with Gzip. - aux_tforms: sub.pattern.tf.fmt.jsonl + [sub.tf.fmt.to.gzip()], + // This example formats the data as JSON Lines and compresses it with Gzip. + aux_tforms: [ + sub.tf.agg.to.string({ separator: '\n' }), + sub.tf.str.append({ suffix: '\n' }), + sub.tf.fmt.to.gzip(), + ], }), ], } diff --git a/examples/transform/send/batch/config.jsonnet b/examples/transform/send/batch/config.jsonnet new file mode 100644 index 00000000..00477a44 --- /dev/null +++ b/examples/transform/send/batch/config.jsonnet @@ -0,0 +1,16 @@ +// This example configures send transforms with batch keys to organize +// data before it is sent externally. Every send transform supports batching +// and optionally grouping JSON objects by a value derived from the object. +local sub = import '../../../../substation.libsonnet'; + +{ + concurrency: 1, + transforms: [ + sub.tf.object.copy({ object: { source_key: '@pretty' } }), + // Each object is organized by the value retrieved from the `group_id` key. + sub.tf.send.stdout({ object: { batch_key: 'group_id' } }), + // This also applies to file-based send transforms, and every other send + // transform as well. + sub.tf.send.file({ object: { batch_key: 'group_id' } }), + ], +} diff --git a/examples/config/transform/send/batch/data.jsonl b/examples/transform/send/batch/data.jsonl similarity index 100% rename from examples/config/transform/send/batch/data.jsonl rename to examples/transform/send/batch/data.jsonl diff --git a/examples/transform/send/batch/stdout.txt b/examples/transform/send/batch/stdout.txt new file mode 100644 index 00000000..183cb9c0 --- /dev/null +++ b/examples/transform/send/batch/stdout.txt @@ -0,0 +1,64 @@ +{ + "a": "b", + "group_id": 1 +} + +{ + "e": "f", + "group_id": 1 +} + +{ + "i": "j", + "group_id": 1 +} + +{ + "m": "n", + "group_id": 1 +} + +{ + "q": "r", + "group_id": 1 +} + +{ + "u": "v", + "group_id": 1 +} + +{ + "y": "z", + "group_id": 1 +} + +{ + "c": "d", + "group_id": 2 +} + +{ + "g": "h", + "group_id": 2 +} + +{ + "k": "l", + "group_id": 2 +} + +{ + "o": "p", + "group_id": 2 +} + +{ + "s": "t", + "group_id": 2 +} + +{ + "w": "x", + "group_id": 2 +} diff --git a/examples/config/transform/send/datadog/config.jsonnet b/examples/transform/send/datadog/config.jsonnet similarity index 94% rename from examples/config/transform/send/datadog/config.jsonnet rename to examples/transform/send/datadog/config.jsonnet index 3e702982..ca6b8906 100644 --- a/examples/config/transform/send/datadog/config.jsonnet +++ b/examples/transform/send/datadog/config.jsonnet @@ -3,7 +3,7 @@ // // More information about the Datadog Logs API can be found here: // https://docs.datadoghq.com/api/latest/logs/#send-logs -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // Datadog has a strict limit of 5MB per payload. Any individual event // larger than 1MB will be truncated on ingest. diff --git a/examples/config/transform/send/datadog/data.jsonl b/examples/transform/send/datadog/data.jsonl similarity index 100% rename from examples/config/transform/send/datadog/data.jsonl rename to examples/transform/send/datadog/data.jsonl diff --git a/examples/config/transform/send/splunk/config.jsonnet b/examples/transform/send/splunk/config.jsonnet similarity index 93% rename from examples/config/transform/send/splunk/config.jsonnet rename to examples/transform/send/splunk/config.jsonnet index df1830e3..9b2e60c0 100644 --- a/examples/config/transform/send/splunk/config.jsonnet +++ b/examples/transform/send/splunk/config.jsonnet @@ -5,7 +5,7 @@ // // More information about the Splunk HEC can be found here: // https://docs.splunk.com/Documentation/SplunkCloud/latest/Data/HECExamples -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // By default the Splunk HEC limits the size of each request to 1MB. local max_size = 1000 * 1000; diff --git a/examples/config/transform/send/splunk/data.jsonl b/examples/transform/send/splunk/data.jsonl similarity index 100% rename from examples/config/transform/send/splunk/data.jsonl rename to examples/transform/send/splunk/data.jsonl diff --git a/examples/config/transform/send/sumologic/config.jsonnet b/examples/transform/send/sumologic/config.jsonnet similarity index 71% rename from examples/config/transform/send/sumologic/config.jsonnet rename to examples/transform/send/sumologic/config.jsonnet index 44347f45..90252a03 100644 --- a/examples/config/transform/send/sumologic/config.jsonnet +++ b/examples/transform/send/sumologic/config.jsonnet @@ -1,9 +1,9 @@ -// This example creates a newline delimited JSON (ndjson) document that can be -// sent to a Sumo Logic HTTPS endpoint. +// This example creates a JSON Lines document that can be sent to a +// Sumo Logic HTTPS endpoint. // // More information about Sumo Logic HTTP upload can be found here: // https://help.sumologic.com/docs/send-data/hosted-collectors/http-source/logs-metrics/upload-logs/ -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; // Sumo Logic has a strict limit of 1MB per request. local max_size = 1000 * 1000; @@ -13,7 +13,10 @@ local max_size = 1000 * 1000; transforms: [ sub.tf.send.http.post({ batch: { size: max_size }, - auxiliary_transforms: sub.pattern.tf.fmt.jsonl, + aux_tforms: [ + sub.tf.agg.to.string({ separator: '\n' }), + sub.tf.str.append({ suffix: '\n' }), + ], // There is no authentication, so the URL should be treated like a secret. url: 'https://endpoint6.collection.us2.sumologic.com/receiver/v1/http/xxxxxxxxxx', // You can override the default source category associated with the URL. diff --git a/examples/config/transform/send/sumologic/data.jsonl b/examples/transform/send/sumologic/data.jsonl similarity index 100% rename from examples/config/transform/send/sumologic/data.jsonl rename to examples/transform/send/sumologic/data.jsonl diff --git a/examples/config/transform/time/string_conversion/config.jsonnet b/examples/transform/time/str_conversion/config.jsonnet similarity index 86% rename from examples/config/transform/time/string_conversion/config.jsonnet rename to examples/transform/time/str_conversion/config.jsonnet index 58fbbcfe..ea16d4e5 100644 --- a/examples/config/transform/time/string_conversion/config.jsonnet +++ b/examples/transform/time/str_conversion/config.jsonnet @@ -1,5 +1,5 @@ // This example shows how to convert time values between string formats. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { concurrency: 1, @@ -11,6 +11,7 @@ local sub = import '../../../../../build/config/substation.libsonnet'; sub.tf.time.from.string({ obj: { source_key: 'time', target_key: 'time' }, format: '2006-01-02T15:04:05.000Z' }), // This converts the Unix time back to a string. sub.tf.time.to.string({ obj: { source_key: 'time', target_key: 'time' }, format: '2006-01-02T15:04:05' }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/time/string_conversion/data.json b/examples/transform/time/str_conversion/data.json similarity index 100% rename from examples/config/transform/time/string_conversion/data.json rename to examples/transform/time/str_conversion/data.json diff --git a/examples/transform/time/str_conversion/stdout.txt b/examples/transform/time/str_conversion/stdout.txt new file mode 100644 index 00000000..b05f7192 --- /dev/null +++ b/examples/transform/time/str_conversion/stdout.txt @@ -0,0 +1,3 @@ +{ + "time": "2024-01-01T01:02:03" +} diff --git a/examples/config/transform/utility/generate_ctrl/config.jsonnet b/examples/transform/utility/generate_ctrl/config.jsonnet similarity index 80% rename from examples/config/transform/utility/generate_ctrl/config.jsonnet rename to examples/transform/utility/generate_ctrl/config.jsonnet index feb1b242..be754bdf 100644 --- a/examples/config/transform/utility/generate_ctrl/config.jsonnet +++ b/examples/transform/utility/generate_ctrl/config.jsonnet @@ -2,12 +2,13 @@ // generate a control (ctrl) Message based on the amount of data Messages // received by the system. ctrl Messages overrides the settings of the // `aggregate_to_array` transform (and any other transform that supports). -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; { transforms: [ sub.tf.utility.control({ batch: { count: 2 } }), sub.tf.aggregate.to.array({ batch: { count: 10000 } }), + sub.tf.obj.cp({ object: { source_key: '@pretty' } }), sub.tf.send.stdout(), ], } diff --git a/examples/config/transform/utility/generate_ctrl/data.jsonl b/examples/transform/utility/generate_ctrl/data.jsonl similarity index 100% rename from examples/config/transform/utility/generate_ctrl/data.jsonl rename to examples/transform/utility/generate_ctrl/data.jsonl diff --git a/examples/transform/utility/generate_ctrl/stdout.txt b/examples/transform/utility/generate_ctrl/stdout.txt new file mode 100644 index 00000000..71a916df --- /dev/null +++ b/examples/transform/utility/generate_ctrl/stdout.txt @@ -0,0 +1,59 @@ +[ + { + "a": "b" + }, + { + "c": "d" + } +] + +[ + { + "e": "f" + }, + { + "g": "h" + } +] + +[ + { + "i": "j" + }, + { + "k": "l" + } +] + +[ + { + "m": "n" + }, + { + "o": "p" + } +] + +[ + { + "q": "r" + }, + { + "s": "t" + } +] + +[ + { + "u": "v" + }, + { + "w": "x" + } +] + +[ + { + "y": "z" + } +] diff --git a/examples/config/transform/utility/message_bytes/config.jsonnet b/examples/transform/utility/message_bytes/config.jsonnet similarity index 92% rename from examples/config/transform/utility/message_bytes/config.jsonnet rename to examples/transform/utility/message_bytes/config.jsonnet index a2cdff2c..8fbdc123 100644 --- a/examples/config/transform/utility/message_bytes/config.jsonnet +++ b/examples/transform/utility/message_bytes/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `utility_metric_bytes` transform to // sum the amount of data received and transformed by Substation. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; local attr = { AppName: 'example' }; local dest = { type: 'aws_cloudwatch_embedded_metrics' }; diff --git a/examples/config/transform/utility/message_bytes/data.jsonl b/examples/transform/utility/message_bytes/data.jsonl similarity index 100% rename from examples/config/transform/utility/message_bytes/data.jsonl rename to examples/transform/utility/message_bytes/data.jsonl diff --git a/examples/transform/utility/message_bytes/stdout.txt b/examples/transform/utility/message_bytes/stdout.txt new file mode 100644 index 00000000..4141fade --- /dev/null +++ b/examples/transform/utility/message_bytes/stdout.txt @@ -0,0 +1,2 @@ +{"_aws":{"Timestamp":1724299043477,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"BytesReceived"}]}]},"AppName":"example","BytesReceived":117} +{"_aws":{"Timestamp":1724299043477,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"BytesTransformed"}]}]},"AppName":"example","BytesTransformed":195} diff --git a/examples/config/transform/utility/message_count/config.jsonnet b/examples/transform/utility/message_count/config.jsonnet similarity index 91% rename from examples/config/transform/utility/message_count/config.jsonnet rename to examples/transform/utility/message_count/config.jsonnet index 37b2c3ef..9443e128 100644 --- a/examples/config/transform/utility/message_count/config.jsonnet +++ b/examples/transform/utility/message_count/config.jsonnet @@ -1,6 +1,6 @@ // This example shows how to use the `utility_metric_count` transform to // count the number of messages received and transformed by Substation. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; local attr = { AppName: 'example' }; local dest = { type: 'aws_cloudwatch_embedded_metrics' }; diff --git a/examples/config/transform/utility/message_count/data.jsonl b/examples/transform/utility/message_count/data.jsonl similarity index 100% rename from examples/config/transform/utility/message_count/data.jsonl rename to examples/transform/utility/message_count/data.jsonl diff --git a/examples/transform/utility/message_count/stdout.txt b/examples/transform/utility/message_count/stdout.txt new file mode 100644 index 00000000..5589b801 --- /dev/null +++ b/examples/transform/utility/message_count/stdout.txt @@ -0,0 +1,2 @@ +{"_aws":{"Timestamp":1724299095089,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"MessagesReceived"}]}]},"AppName":"example","MessagesReceived":13} +{"_aws":{"Timestamp":1724299095089,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName"]],"Metrics":[{"Name":"MessagesTransformed"}]}]},"AppName":"example","MessagesTransformed":0} diff --git a/examples/config/transform/utility/message_freshness/config.jsonnet b/examples/transform/utility/message_freshness/config.jsonnet similarity index 93% rename from examples/config/transform/utility/message_freshness/config.jsonnet rename to examples/transform/utility/message_freshness/config.jsonnet index 3ce8c5f5..2bbfef4e 100644 --- a/examples/config/transform/utility/message_freshness/config.jsonnet +++ b/examples/transform/utility/message_freshness/config.jsonnet @@ -8,7 +8,7 @@ // // The transform emits two metrics that describe success and failure, annotated // in the `FreshnessType` attribute. -local sub = import '../../../../../build/config/substation.libsonnet'; +local sub = import '../../../../substation.libsonnet'; local attr = { AppName: 'example' }; local dest = { type: 'aws_cloudwatch_embedded_metrics' }; diff --git a/examples/transform/utility/message_freshness/data.jsonl b/examples/transform/utility/message_freshness/data.jsonl new file mode 100644 index 00000000..7ba48645 --- /dev/null +++ b/examples/transform/utility/message_freshness/data.jsonl @@ -0,0 +1 @@ +{"timestamp":1724299266000000000} diff --git a/examples/transform/utility/message_freshness/stdout.txt b/examples/transform/utility/message_freshness/stdout.txt new file mode 100644 index 00000000..db57f7be --- /dev/null +++ b/examples/transform/utility/message_freshness/stdout.txt @@ -0,0 +1,2 @@ +{"_aws":{"Timestamp":1724299370516,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["AppName","FreshnessType"]],"Metrics":[{"Name":"MessageFreshness"}]}]},"AppName":"example","FreshnessType":"Success","MessageFreshness":0} +{"_aws":{"Timestamp":1724299370516,"CloudWatchMetrics":[{"Namespace":"Substation","Dimensions":[["FreshnessType","AppName"]],"Metrics":[{"Name":"MessageFreshness"}]}]},"FreshnessType":"Failure","AppName":"example","MessageFreshness":1} diff --git a/go.mod b/go.mod index 38563679..0dda1abe 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,30 @@ -module github.com/brexhq/substation +module github.com/brexhq/substation/v2 go 1.22 require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.54.8 - github.com/aws/aws-sdk-go-v2 v1.30.3 - github.com/aws/aws-sdk-go-v2/config v1.27.26 - github.com/aws/aws-sdk-go-v2/credentials v1.17.26 + github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2/config v1.27.30 + github.com/aws/aws-sdk-go-v2/credentials v1.17.29 + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.14.12 + github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.34 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.14 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.6 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 - github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 + github.com/aws/aws-sdk-go-v2/service/firehose v1.32.2 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 + github.com/aws/aws-sdk-go-v2/service/lambda v1.58.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 + github.com/aws/aws-sdk-go-v2/service/sns v1.31.5 + github.com/aws/aws-sdk-go-v2/service/sqs v1.34.5 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 github.com/aws/aws-xray-sdk-go v1.8.4 - github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486 + github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20230808105340-e631fe742486 + github.com/brexhq/substation v1.7.1 github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 @@ -29,16 +42,21 @@ require ( require ( github.com/andybalholm/brotli v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect - github.com/aws/smithy-go v1.20.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect + github.com/aws/smithy-go v1.20.4 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -52,5 +70,4 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 88d0cea9..68f77d0d 100644 --- a/go.sum +++ b/go.sum @@ -4,54 +4,95 @@ github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1 github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= -github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.54.8 h1:+soIjaRsuXfEJ9ts9poJD2fIIzSSRwfx+T69DrTtL2M= github.com/aws/aws-sdk-go v1.54.8/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= -github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= -github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= -github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.30 h1:AQF3/+rOgeJBQP3iI4vojlPib5X6eeOYoa/af7OxAYg= +github.com/aws/aws-sdk-go-v2/config v1.27.30/go.mod h1:yxqvuubha9Vw8stEgNiStO+yZpP68Wm9hLmcm+R/Qk4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.29 h1:CwGsupsXIlAFYuDVHv1nnK0wnxO0wZ/g1L8DSK/xiIw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.29/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.14.12 h1:R8nvub089lfNl3+j6Yf+m8kS64Zois56Bu5ku6KAXNE= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.14.12/go.mod h1:bswOrGH35stnF9k41t5gKQ8b+j6B4SLe6cF3xHuJG6E= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.34 h1:aBhXqjhRjD7LAyhahF4wyV7VRj+zrKUq0DMQlZ++xF0= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.34/go.mod h1:zKXjmMV9v/LiSAiupgPuW8QQ3HyvuLGbiHZpIpEmxlg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.14 h1:dQa4KkoEVgk3oLL9IeoW9qrXijyQ6lWa+DX6Vn32Lhw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.14/go.mod h1:aRKW0B+zH8J6cz3FFiQ9JbUQc7UroLx6lwfvNqIsPOs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 h1:/YvqO1j75i4leoV+Z3a5s/dAlEszf2wTKBW8jc3Gd4s= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5/go.mod h1:maEDlnDRdhsc0xrUljh3dUJbej11AHz+VTQJsNw1QmE= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.6 h1:LKZuRTlh8RszjuWcUwEDvCGwjx5olHPp6ZOepyZV5p8= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.6/go.mod h1:s2fYaueBuCnwv1XQn6T8TfShxJWusv5tWPMcL+GY6+g= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.5 h1:sM/SaWUKPtsCcXE0bHZPUG4jjCbFbxakyptXQbYLrdU= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.22.5/go.mod h1:3YxVsEoCNYOLIbdA+cCXSp1fom9hrhyB1DsCiYryCaQ= github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U= github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.2 h1:BaLB1YvppB82w++nMzw0+CESCCW2vAPaLxRt0Zi06l8= +github.com/aws/aws-sdk-go-v2/service/firehose v1.32.2/go.mod h1:aEIXb5VUx5COGtVbhP8pe/Ulm0bQzxPbPmsVH5+Jog8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17 h1:HDJGz1jlV7RokVgTPfx1UHBHANC0N5Uk++xgyYgz5E0= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.17/go.mod h1:5szDu6TWdRDytfDxUQVv2OYfpTQMKApVFyqpm+TcA98= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 h1:iirGMva2IXw4kcqsvuF+uc8ARweuVqoQJjzRZGaiV1E= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= +github.com/aws/aws-sdk-go-v2/service/lambda v1.58.1 h1:AfTND9lcZ0i4QV0LwgiwonDbWm8YPr4iYJ28n/x+FAo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.58.1/go.mod h1:19OJBUjzuycsyPiTi8Gxx17XJjsF9Ck/cQeDGvsiics= github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2 h1:OsggywXCk9iFKdu2Aopg3e1oJITIuyW36hA/B0rqupE= github.com/aws/aws-sdk-go-v2/service/route53 v1.6.2/go.mod h1:ZnAMilx42P7DgIrdjlWCkNIGSBLzeyk6T31uB8oGTwY= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 h1:3TZlWvCC813uhS1Z4fVTmBhg41OYUrgSlvXqIDDkurw= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.5 h1:q8R1hxwOHE4e6TInafToa8AHTLQpJrxWXYk7GINJoyw= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.5/go.mod h1:wDacBq+NshhM8KhdysbM4wRFxVyghyj7AAI+l8+o9f0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.5 h1:HYyVDOC2/PIg+3oBX1q0wtDU5kONki6lrgIG0afrBkY= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.5/go.mod h1:7idt3XszF6sE9WPS1GqZRiDJOxw4oPtlRBXodWnCGjU= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/aws-xray-sdk-go v1.8.4 h1:5D631fWhs5hdBFW/8ALjWam+alm4tW42UGAuMJ1WAUI= github.com/aws/aws-xray-sdk-go v1.8.4/go.mod h1:mbN1uxWCue9WjS2Oj2FWg7TGIsLikxMOscD0qtEjFFY= -github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= -github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486 h1:fBy4wQzC3T5S6F1o1uTYeR8WF1MIL7GSsPYjzabOwtA= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20230808105340-e631fe742486/go.mod h1:CQGhQ8Rf1WF5Ke8XuUjcd4PRb+mFTjzKR/pm3EWKaQw= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20230808105340-e631fe742486 h1:266Pq6JfxdphziJ1LiqU68OJrKiTxyF8hbiceQWX3Cs= +github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20230808105340-e631fe742486/go.mod h1:0Qr1uMHFmHsIYMcG4T7BJ9yrJtWadhOmpABCX69dwuc= +github.com/brexhq/substation v1.7.1 h1:v5SwdexouuTLzLdwSVJ/RbkJyt+lN+2a5WOw+l11ULI= +github.com/brexhq/substation v1.7.1/go.mod h1:4GFx9JFQVZX17xjA7fY0tMaLnsPzhVKb8YzLNeymm2I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -70,7 +111,6 @@ github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g= github.com/itchyny/gojq v0.12.16/go.mod h1:6abHbdC2uB9ogMS38XsErnfqJ94UlngIJGlRAIj4jTM= github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -90,7 +130,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -108,29 +147,27 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/aws/README.md b/internal/aws/README.md deleted file mode 100644 index c6071b51..00000000 --- a/internal/aws/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# aws - -Contains functions for managing AWS API calls. Substation follows these rules across every application: -* AWS clients are configured using [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html) -* AWS clients use service interface APIs (e.g., s3iface, kinesisiface, etc.) -* AWS clients enable [X-Ray](https://aws.amazon.com/xray/) for tracing if a [daemon address](https://docs.aws.amazon.com/xray/latest/devguide/xray-sdk-go-configuration.html#xray-sdk-go-configuration-envvars) is found diff --git a/internal/aws/appconfig/appconfig.go b/internal/aws/appconfig/appconfig.go deleted file mode 100644 index 78a3d830..00000000 --- a/internal/aws/appconfig/appconfig.go +++ /dev/null @@ -1,44 +0,0 @@ -// package appconfig provides functions for interacting with AWS AppConfig. -package appconfig - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/brexhq/substation/internal/http" -) - -// errMissingPrefetchEnvVar is returned when a Lambda is deployed without a configured AppConfig URL. -var errMissingPrefetchEnvVar = fmt.Errorf("missing AWS_APPCONFIG_EXTENSION_PREFETCH_LIST environment variable") - -var client http.HTTP - -// GetPrefetch queries and returns the Lambda's prefetched AppConfig configuration. -func GetPrefetch(ctx context.Context, dst io.Writer) error { - if !client.IsEnabled() { - client.Setup() - } - - env := "AWS_APPCONFIG_EXTENSION_PREFETCH_LIST" - url, found := os.LookupEnv(env) - if !found { - return fmt.Errorf("appconfig getprefetch: %v", errMissingPrefetchEnvVar) - } - - local := "http://localhost:2772" + url - - ctx = context.WithoutCancel(ctx) - resp, err := client.Get(ctx, local) - if err != nil { - return fmt.Errorf("appconfig getprefetch URL %s: %v", local, err) - } - defer resp.Body.Close() - - if _, err := io.Copy(dst, resp.Body); err != nil { - return fmt.Errorf("appconfig getprefetch: %v", err) - } - - return nil -} diff --git a/internal/aws/cloudwatch/cloudwatch.go b/internal/aws/cloudwatch/cloudwatch.go deleted file mode 100644 index c1e0a6d9..00000000 --- a/internal/aws/cloudwatch/cloudwatch.go +++ /dev/null @@ -1,305 +0,0 @@ -package cloudwatch - -import ( - "fmt" - "os" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -const ( - // This is the period in seconds that the AWS Kinesis CloudWatch alarms - // will evaluate the metrics over. - kinesisMetricsPeriod = 60 -) - -var ( - // By default, AWS Kinesis streams must be below the lower threshold for - // 100% of the evaluation period (60 minutes) to scale down. This value can - // be overridden by the environment variable AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS. - kinesisDownscaleDatapoints = 60 - // By default, AWS Kinesis streams must be above the upper threshold for - // 100% of the evaluation period (5 minutes) to scale up. This value can - // be overridden by the environment variable AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS. - kinesisUpscaleDatapoints = 5 - // By default, AWS Kinesis streams will scale up if the incoming records and bytes - // are above 70% of the threshold. This value can be overridden by the environment - // variable AUTOSCALE_KINESIS_THRESHOLD, but it cannot be less than 40% or greater - // than 90%. - kinesisThreshold = 0.7 -) - -func init() { - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_DOWNSCALE_DATAPOINTS"); found { - dps, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - kinesisDownscaleDatapoints = dps - } - - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_UPSCALE_DATAPOINTS"); found { - dps, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - kinesisUpscaleDatapoints = dps - } - - if v, found := os.LookupEnv("AUTOSCALE_KINESIS_THRESHOLD"); found { - threshold, err := strconv.ParseFloat(v, 64) - if err != nil { - panic(err) - } - - if threshold >= 0.4 && threshold <= 0.9 { - kinesisThreshold = threshold - } - } -} - -// New returns a configured CloudWatch client. -func New(cfg iaws.Config) *cloudwatch.CloudWatch { - conf, sess := iaws.New(cfg) - - c := cloudwatch.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the CloudWatch API interface. -type API struct { - Client cloudwatchiface.CloudWatchAPI -} - -// Setup creates a new CloudWatch client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// UpdateKinesisDownscaleAlarm updates CloudWatch alarms that manage the scale down tracking for Kinesis streams. -func (a *API) UpdateKinesisDownscaleAlarm(ctx aws.Context, name, stream, topic string, shards int64) error { - downscaleThreshold := kinesisThreshold - 0.35 - - if _, err := a.Client.PutMetricAlarmWithContext( - ctx, - &cloudwatch.PutMetricAlarmInput{ - AlarmName: aws.String(name), - AlarmDescription: aws.String(stream), - ActionsEnabled: aws.Bool(true), - AlarmActions: []*string{aws.String(topic)}, - EvaluationPeriods: aws.Int64(int64(kinesisDownscaleDatapoints)), - DatapointsToAlarm: aws.Int64(int64(kinesisDownscaleDatapoints)), - Threshold: aws.Float64(downscaleThreshold), - ComparisonOperator: aws.String("LessThanOrEqualToThreshold"), - TreatMissingData: aws.String("ignore"), - Metrics: []*cloudwatch.MetricDataQuery{ - { - Id: aws.String("m1"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingRecords"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("m2"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingBytes"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e1"), - Expression: aws.String("FILL(m1,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e2"), - Expression: aws.String("FILL(m2,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e3"), - Expression: aws.String( - fmt.Sprintf("e1/(1000*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingRecordsPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e4"), - Expression: aws.String( - fmt.Sprintf("e2/(1048576*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingBytesPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e5"), - Expression: aws.String("MAX([e3,e4])"), - Label: aws.String("IncomingMax"), - ReturnData: aws.Bool(true), - }, - }, - }); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - if err := a.UpdateKinesisAlarmState(ctx, name, "Threshold value updated"); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - return nil -} - -// UpdateKinesisUpscaleAlarm updates CloudWatch alarms that manage the scale up tracking for Kinesis streams. -func (a *API) UpdateKinesisUpscaleAlarm(ctx aws.Context, name, stream, topic string, shards int64) error { - upscaleThreshold := kinesisThreshold - - if _, err := a.Client.PutMetricAlarmWithContext( - ctx, - &cloudwatch.PutMetricAlarmInput{ - AlarmName: aws.String(name), - AlarmDescription: aws.String(stream), - ActionsEnabled: aws.Bool(true), - AlarmActions: []*string{aws.String(topic)}, - EvaluationPeriods: aws.Int64(int64(kinesisUpscaleDatapoints)), - DatapointsToAlarm: aws.Int64(int64(kinesisUpscaleDatapoints)), - Threshold: aws.Float64(upscaleThreshold), - ComparisonOperator: aws.String("GreaterThanOrEqualToThreshold"), - TreatMissingData: aws.String("ignore"), - Metrics: []*cloudwatch.MetricDataQuery{ - { - Id: aws.String("m1"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingRecords"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("m2"), - MetricStat: &cloudwatch.MetricStat{ - Metric: &cloudwatch.Metric{ - Namespace: aws.String("AWS/Kinesis"), - MetricName: aws.String("IncomingBytes"), - Dimensions: []*cloudwatch.Dimension{ - { - Name: aws.String("StreamName"), - Value: aws.String(stream), - }, - }, - }, - Period: aws.Int64(kinesisMetricsPeriod), - Stat: aws.String("Sum"), - }, - Label: aws.String("IncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e1"), - Expression: aws.String("FILL(m1,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingRecords"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e2"), - Expression: aws.String("FILL(m2,REPEAT)"), - Label: aws.String("FillMissingDataPointsForIncomingBytes"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e3"), - Expression: aws.String( - fmt.Sprintf("e1/(1000*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingRecordsPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e4"), - Expression: aws.String( - fmt.Sprintf("e2/(1048576*%d*%d)", shards, kinesisMetricsPeriod), - ), - Label: aws.String("IncomingBytesPercent"), - ReturnData: aws.Bool(false), - }, - { - Id: aws.String("e5"), - Expression: aws.String("MAX([e3,e4])"), - Label: aws.String("IncomingMax"), - ReturnData: aws.Bool(true), - }, - }, - }); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - if err := a.UpdateKinesisAlarmState(ctx, name, "Threshold value updated"); err != nil { - return fmt.Errorf("updatealarm alarm %s stream %s: %v", name, stream, err) - } - - return nil -} - -func (a *API) UpdateKinesisAlarmState(ctx aws.Context, name, reason string) error { - _, err := a.Client.SetAlarmStateWithContext(ctx, - &cloudwatch.SetAlarmStateInput{ - AlarmName: aws.String(name), - StateValue: aws.String("INSUFFICIENT_DATA"), - StateReason: aws.String(reason), - }) - return err -} diff --git a/internal/aws/config.go b/internal/aws/config.go deleted file mode 100644 index f593918e..00000000 --- a/internal/aws/config.go +++ /dev/null @@ -1,105 +0,0 @@ -package aws - -import ( - "os" - "regexp" - "strconv" - - "github.com/brexhq/substation/internal/config" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" -) - -type Config struct { - Region string `json:"region"` - RoleARN string `json:"role_arn"` - MaxRetries int `json:"max_retries"` - RetryableErrors []string `json:"retryable_errors"` -} - -// New returns a new AWS configuration and session. -func New(cfg Config) (*aws.Config, *session.Session) { - conf := aws.NewConfig() - - if cfg.Region != "" { - conf = conf.WithRegion(cfg.Region) - } else if v, ok := os.LookupEnv("AWS_REGION"); ok { - conf = conf.WithRegion(v) - } else if v, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { - conf = conf.WithRegion(v) - } - - retryer := NewRetryer(config.Retry{ - Count: cfg.MaxRetries, - ErrorMessages: cfg.RetryableErrors, - }) - - // Configurations take precedence over environment variables. - if cfg.MaxRetries != 0 { - goto RETRYER - } - - if v, ok := os.LookupEnv("AWS_MAX_ATTEMPTS"); ok { - max, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - - retryer.SetMaxRetries(max) - } - -RETRYER: - conf.Retryer = retryer - sess := session.Must(session.NewSession()) - if cfg.RoleARN != "" { - conf = conf.WithCredentials(stscreds.NewCredentials(sess, cfg.RoleARN)) - } - - return conf, sess -} - -// NewDefault returns a new AWS configuration and session with default values. -func NewDefault() (*aws.Config, *session.Session) { - return New(Config{}) -} - -func NewRetryer(cfg config.Retry) customRetryer { - errMsg := make([]*regexp.Regexp, len(cfg.ErrorMessages)) - for i, err := range cfg.ErrorMessages { - errMsg[i] = regexp.MustCompile(err) - } - - return customRetryer{ - DefaultRetryer: client.DefaultRetryer{ - NumMaxRetries: cfg.Count, - }, - errorMessages: errMsg, - } -} - -type customRetryer struct { - client.DefaultRetryer - - // errorMessages are regular expressions that are used to match error messages. - errorMessages []*regexp.Regexp -} - -func (r customRetryer) SetMaxRetries(max int) { - r.NumMaxRetries = max -} - -// ShouldRetry retries if any of the configured error strings are found in the request error. -func (r customRetryer) ShouldRetry(req *request.Request) bool { - for _, err := range r.errorMessages { - if err.MatchString(req.Error.Error()) { - return true - } - } - - // Fallback to the default retryer. - return r.DefaultRetryer.ShouldRetry(req) -} diff --git a/internal/aws/config_v2.go b/internal/aws/config_v2.go deleted file mode 100644 index 62634a97..00000000 --- a/internal/aws/config_v2.go +++ /dev/null @@ -1,83 +0,0 @@ -package aws - -import ( - "context" - "os" - "regexp" - "strconv" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-xray-sdk-go/instrumentation/awsv2" -) - -// NewV2 returns an SDK v2 configuration. -func NewV2(ctx context.Context, cfg Config) (aws.Config, error) { - var region string - if cfg.Region != "" { - region = cfg.Region - } else if v, ok := os.LookupEnv("AWS_REGION"); ok { - region = v - } else if v, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { - region = v - } - - var creds aws.CredentialsProvider // nil is a valid default. - if cfg.RoleARN != "" { - conf, err := config.LoadDefaultConfig(ctx, - config.WithRegion(region), - ) - if err != nil { - return aws.Config{}, err - } - - stsSvc := sts.NewFromConfig(conf) - creds = stscreds.NewAssumeRoleProvider(stsSvc, cfg.RoleARN) - } - - maxRetry := 3 // Matches the standard retryer. - if cfg.MaxRetries != 0 { - maxRetry = cfg.MaxRetries - } else if v, ok := os.LookupEnv("AWS_MAX_ATTEMPTS"); ok { - max, err := strconv.Atoi(v) - if err != nil { - return aws.Config{}, err - } - - maxRetry = max - } - - errMsg := make([]*regexp.Regexp, len(cfg.RetryableErrors)) - for i, err := range cfg.RetryableErrors { - errMsg[i] = regexp.MustCompile(err) - } - - conf, err := config.LoadDefaultConfig(ctx, - config.WithRegion(region), - config.WithCredentialsProvider(creds), - config.WithRetryer(func() aws.Retryer { - return retry.NewStandard(func(o *retry.StandardOptions) { - o.MaxAttempts = maxRetry - // Additional retryable errors ~must be appended~ to not overwrite the defaults. - o.Retryables = append(o.Retryables, retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - for _, msg := range errMsg { - if msg.MatchString(err.Error()) { - return aws.TrueTernary - } - } - - return aws.FalseTernary - })) - }) - }), - ) - - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - awsv2.AWSV2Instrumentor(&conf.APIOptions) - } - - return conf, err -} diff --git a/internal/aws/dynamodb/dynamodb.go b/internal/aws/dynamodb/dynamodb.go deleted file mode 100644 index 9b607628..00000000 --- a/internal/aws/dynamodb/dynamodb.go +++ /dev/null @@ -1,278 +0,0 @@ -package dynamodb - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured DynamoDB client. -func New(cfg iaws.Config) *dynamodb.DynamoDB { - conf, sess := iaws.New(cfg) - - c := dynamodb.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the DynamoDB API interface. -type API struct { - Client dynamodbiface.DynamoDBAPI -} - -// Setup creates a new DynamoDB client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -func (a *API) DeleteItem(ctx aws.Context, table string, key map[string]*dynamodb.AttributeValue) (resp *dynamodb.DeleteItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.DeleteItemWithContext( - ctx, - &dynamodb.DeleteItemInput{ - TableName: aws.String(table), - Key: key, - }, - ) - if err != nil { - return nil, fmt.Errorf("deleteitem table %s: %v", table, err) - } - - return resp, nil -} - -// BatchPutItem is a convenience wrapper for putting multiple items into a DynamoDB table. -func (a *API) BatchPutItem(ctx aws.Context, table string, items []map[string]*dynamodb.AttributeValue) (resp *dynamodb.BatchWriteItemOutput, err error) { - var requests []*dynamodb.WriteRequest - for _, item := range items { - requests = append(requests, &dynamodb.WriteRequest{ - PutRequest: &dynamodb.PutRequest{ - Item: item, - }, - }) - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.BatchWriteItemWithContext( - ctx, - &dynamodb.BatchWriteItemInput{ - RequestItems: map[string][]*dynamodb.WriteRequest{ - table: requests, - }, - }, - ) - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - switch aerr.Code() { - case dynamodb.ErrCodeProvisionedThroughputExceededException: - var retry []map[string]*dynamodb.AttributeValue - - for _, item := range resp.UnprocessedItems[table] { - retry = append(retry, item.PutRequest.Item) - } - - if len(retry) > 0 { - return a.BatchPutItem(ctx, table, retry) - } - - fallthrough - default: - return nil, fmt.Errorf("batch_put_item: table %s: %v", table, err) - } - } - } - - return resp, nil -} - -// UpdateItem -func (a *API) UpdateItem(ctx aws.Context, input *dynamodb.UpdateItemInput) (resp *dynamodb.UpdateItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - return a.Client.UpdateItemWithContext(ctx, input) -} - -// PutItem is a convenience wrapper for putting items into a DynamoDB table. -func (a *API) PutItem(ctx aws.Context, table string, item map[string]*dynamodb.AttributeValue) (resp *dynamodb.PutItemOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.PutItemWithContext( - ctx, - &dynamodb.PutItemInput{ - TableName: aws.String(table), - Item: item, - }) - if err != nil { - return nil, fmt.Errorf("putitem table %s: %v", table, err) - } - - return resp, nil -} - -func (a *API) PutItemWithCondition(ctx aws.Context, table string, item map[string]*dynamodb.AttributeValue, conditionExpression string, expressionAttributeNames map[string]*string, expressionAttributeValues map[string]*dynamodb.AttributeValue) (resp *dynamodb.PutItemOutput, err error) { - input := &dynamodb.PutItemInput{ - TableName: aws.String(table), - ConditionExpression: aws.String(conditionExpression), - ExpressionAttributeNames: expressionAttributeNames, - Item: item, - ExpressionAttributeValues: expressionAttributeValues, - ReturnValues: aws.String("ALL_OLD"), - } - - resp, err = a.Client.PutItemWithContext(ctx, input) - if err != nil { - return resp, err - } - - return resp, nil -} - -/* -Query is a convenience wrapper for querying a DynamoDB table. The paritition and sort keys are always referenced in the key condition expression as ":PK" and ":SK". Refer to the DynamoDB documentation for the Query operation's request syntax and key condition expression patterns: - -- https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#API_Query_RequestSyntax - -- https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.KeyConditionExpressions -*/ -func (a *API) Query(ctx aws.Context, table, partitionKey, sortKey, keyConditionExpression string, limit int64, scanIndexForward bool) (resp *dynamodb.QueryOutput, err error) { - expression := make(map[string]*dynamodb.AttributeValue) - expression[":PK"] = &dynamodb.AttributeValue{ - S: aws.String(partitionKey), - } - - if sortKey != "" { - expression[":SK"] = &dynamodb.AttributeValue{ - S: aws.String(sortKey), - } - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.QueryWithContext( - ctx, - &dynamodb.QueryInput{ - TableName: aws.String(table), - KeyConditionExpression: aws.String(keyConditionExpression), - ExpressionAttributeValues: expression, - Limit: aws.Int64(limit), - ScanIndexForward: aws.Bool(scanIndexForward), - }) - if err != nil { - return nil, fmt.Errorf("query: table %s key_condition_expression %s: %v", table, keyConditionExpression, err) - } - - return resp, nil -} - -// GetItem is a convenience wrapper for getting items into a DynamoDB table. -func (a *API) GetItem(ctx aws.Context, table string, attributes map[string]interface{}, consistentRead bool) (resp *dynamodb.GetItemOutput, err error) { - attr, err := dynamodbattribute.MarshalMap(attributes) - if err != nil { - return nil, fmt.Errorf("get_item: table %s: %v", table, err) - } - - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.GetItemWithContext( - ctx, - &dynamodb.GetItemInput{ - TableName: aws.String(table), - Key: attr, - ConsistentRead: aws.Bool(consistentRead), - }, - ) - if err != nil { - return nil, fmt.Errorf("get_item: table %s: %v", table, err) - } - - return resp, nil -} - -// ConvertEventsAttributeValue converts events.DynamoDBAttributeValue to dynamodb.AttributeValue. -func ConvertEventsAttributeValue(v events.DynamoDBAttributeValue) *dynamodb.AttributeValue { - switch v.DataType() { - case events.DataTypeBinary: - return &dynamodb.AttributeValue{ - B: v.Binary(), - } - case events.DataTypeBinarySet: - return &dynamodb.AttributeValue{ - BS: v.BinarySet(), - } - case events.DataTypeNumber: - return &dynamodb.AttributeValue{ - N: aws.String(v.Number()), - } - case events.DataTypeNumberSet: - av := &dynamodb.AttributeValue{} - - for _, n := range v.NumberSet() { - av.NS = append(av.NS, aws.String(n)) - } - - return av - case events.DataTypeString: - return &dynamodb.AttributeValue{ - S: aws.String(v.String()), - } - case events.DataTypeStringSet: - av := &dynamodb.AttributeValue{} - - for _, s := range v.StringSet() { - av.SS = append(av.SS, aws.String(s)) - } - - return av - case events.DataTypeList: - av := &dynamodb.AttributeValue{} - - for _, v := range v.List() { - av.L = append(av.L, ConvertEventsAttributeValue(v)) - } - - return av - case events.DataTypeMap: - av := &dynamodb.AttributeValue{} - av.M = make(map[string]*dynamodb.AttributeValue) - - for k, v := range v.Map() { - av.M[k] = ConvertEventsAttributeValue(v) - } - - return av - case events.DataTypeNull: - return &dynamodb.AttributeValue{ - NULL: aws.Bool(true), - } - case events.DataTypeBoolean: - return &dynamodb.AttributeValue{ - BOOL: aws.Bool(v.Boolean()), - } - default: - return nil - } -} - -// ConvertEventsAttributeValueMap converts a map of events.DynamoDBAttributeValue to a map of dynamodb.AttributeValue. -func ConvertEventsAttributeValueMap(m map[string]events.DynamoDBAttributeValue) map[string]*dynamodb.AttributeValue { - av := make(map[string]*dynamodb.AttributeValue) - - for k, v := range m { - av[k] = ConvertEventsAttributeValue(v) - } - - return av -} diff --git a/internal/aws/dynamodb/dynamodb_test.go b/internal/aws/dynamodb/dynamodb_test.go deleted file mode 100644 index 6003b073..00000000 --- a/internal/aws/dynamodb/dynamodb_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package dynamodb - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" -) - -type mockedGetItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.GetItemOutput -} - -func (m mockedGetItem) GetItemWithContext(ctx aws.Context, input *dynamodb.GetItemInput, opts ...request.Option) (*dynamodb.GetItemOutput, error) { - return &m.Resp, nil -} - -func TestGetItem(t *testing.T) { - tests := []struct { - resp dynamodb.GetItemOutput - expected string - }{ - { - resp: dynamodb.GetItemOutput{ - Item: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - ConsumedCapacity: &dynamodb.ConsumedCapacity{}, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetItem{Resp: test.resp}, - } - - m := make(map[string]interface{}) - resp, err := a.GetItem(ctx, "", m, false) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.Item, &item) - if err != nil { - t.Fatalf("%v, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedBatchPutItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.BatchWriteItemOutput -} - -func (m mockedBatchPutItem) BatchWriteItemWithContext(ctx aws.Context, input *dynamodb.BatchWriteItemInput, opts ...request.Option) (*dynamodb.BatchWriteItemOutput, error) { - return &m.Resp, nil -} - -func TestBatchPutItem(t *testing.T) { - tests := []struct { - resp dynamodb.BatchWriteItemOutput - expected string - }{ - { - resp: dynamodb.BatchWriteItemOutput{ - ItemCollectionMetrics: map[string][]*dynamodb.ItemCollectionMetrics{ - "table": { - { - ItemCollectionKey: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - }, - }, - }, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedBatchPutItem{Resp: test.resp}, - } - - resp, err := a.BatchPutItem(ctx, "", []map[string]*dynamodb.AttributeValue{}) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.ItemCollectionMetrics["table"][0].ItemCollectionKey, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedPutItem struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.PutItemOutput -} - -func (m mockedPutItem) PutItemWithContext(ctx aws.Context, input *dynamodb.PutItemInput, opts ...request.Option) (*dynamodb.PutItemOutput, error) { - return &m.Resp, nil -} - -func TestPutItem(t *testing.T) { - tests := []struct { - resp dynamodb.PutItemOutput - expected string - }{ - { - resp: dynamodb.PutItemOutput{ - Attributes: map[string]*dynamodb.AttributeValue{ - "foo": { - S: aws.String("bar"), - }, - }, - ConsumedCapacity: &dynamodb.ConsumedCapacity{}, - ItemCollectionMetrics: &dynamodb.ItemCollectionMetrics{}, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutItem{Resp: test.resp}, - } - - resp, err := a.PutItem(ctx, "", map[string]*dynamodb.AttributeValue{}) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(resp.Attributes, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if item["foo"] != test.expected { - t.Errorf("expected %+v, got %s", item["foo"], test.expected) - } - } -} - -type mockedQuery struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.QueryOutput -} - -func (m mockedQuery) QueryWithContext(ctx aws.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error) { - return &m.Resp, nil -} - -func TestQuery(t *testing.T) { - tests := []struct { - resp dynamodb.QueryOutput - expected string - }{ - { - resp: dynamodb.QueryOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "foo": { - S: aws.String("bar"), - }, - }, - }, - }, - expected: "bar", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedQuery{Resp: test.resp}, - } - - resp, err := a.Query(ctx, "", "", "", "", 0, true) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - var items []map[string]interface{} - for _, i := range resp.Items { - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(i, &item) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - items = append(items, item) - } - - if items[0]["foo"] != test.expected { - t.Errorf("expected %+v, got %s", items[0]["foo"], test.expected) - } - } -} diff --git a/internal/aws/firehose/firehose.go b/internal/aws/firehose/firehose.go deleted file mode 100644 index 8d0125f0..00000000 --- a/internal/aws/firehose/firehose.go +++ /dev/null @@ -1,97 +0,0 @@ -package firehose - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New creates a new session for Kinesis Firehose -func New(cfg iaws.Config) *firehose.Firehose { - conf, sess := iaws.New(cfg) - - c := firehose.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps a Kinesis Firehose client interface -type API struct { - Client firehoseiface.FirehoseAPI -} - -// IsEnabled checks whether a new client has been set -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates a Kinesis Firehose client -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// PutRecord is a convenience wrapper for putting a record into a Kinesis Firehose stream. -func (a *API) PutRecord(ctx aws.Context, data []byte, stream string) (*firehose.PutRecordOutput, error) { - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PutRecordWithContext( - ctx, - &firehose.PutRecordInput{ - DeliveryStreamName: aws.String(stream), - Record: &firehose.Record{Data: data}, - }) - if err != nil { - return nil, fmt.Errorf("putrecord stream %s: %v", stream, err) - } - - return resp, nil -} - -// PutRecordBatch is a convenience wrapper for putting multiple records into a Kinesis Firehose stream. This function becomes recursive for any records that failed the PutRecord operation. -func (a *API) PutRecordBatch(ctx aws.Context, stream string, data [][]byte) (*firehose.PutRecordBatchOutput, error) { - var records []*firehose.Record - for _, d := range data { - records = append(records, &firehose.Record{Data: d}) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PutRecordBatchWithContext( - ctx, - &firehose.PutRecordBatchInput{ - DeliveryStreamName: aws.String(stream), - Records: records, - }, - ) - - // failed records are identified by the existence of an error code. - // if an error code exists, then data is stored in a new slice and - // recursively input into the function. - if resp.FailedPutCount != aws.Int64(0) { - var retry [][]byte - for idx, r := range resp.RequestResponses { - if r.ErrorCode == nil { - continue - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.PutRecordBatch(ctx, stream, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("putrecordbatch stream %s: %v", stream, err) - } - - return resp, nil -} diff --git a/internal/aws/firehose/firehose_test.go b/internal/aws/firehose/firehose_test.go deleted file mode 100644 index ca40cca7..00000000 --- a/internal/aws/firehose/firehose_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package firehose - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" -) - -type mockedPutRecord struct { - firehoseiface.FirehoseAPI - Resp firehose.PutRecordOutput -} - -func (m mockedPutRecord) PutRecordWithContext(ctx aws.Context, in *firehose.PutRecordInput, opts ...request.Option) (*firehose.PutRecordOutput, error) { - return &m.Resp, nil -} - -func TestPutRecord(t *testing.T) { - tests := []struct { - resp firehose.PutRecordOutput - expected string - }{ - { - resp: firehose.PutRecordOutput{ - Encrypted: aws.Bool(true), - RecordId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecord{Resp: test.resp}, - } - resp, err := a.PutRecord(ctx, []byte{}, "") - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.RecordId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.RecordId) - } - } -} - -type mockedPutRecordBatch struct { - firehoseiface.FirehoseAPI - Resp firehose.PutRecordBatchOutput -} - -func (m mockedPutRecordBatch) PutRecordBatchWithContext(ctx aws.Context, in *firehose.PutRecordBatchInput, opts ...request.Option) (*firehose.PutRecordBatchOutput, error) { - return &m.Resp, nil -} - -func TestPutRecordBatch(t *testing.T) { - tests := []struct { - resp firehose.PutRecordBatchOutput - expected []string - }{ - { - resp: firehose.PutRecordBatchOutput{ - Encrypted: aws.Bool(true), - FailedPutCount: aws.Int64(0), - RequestResponses: []*firehose.PutRecordBatchResponseEntry{ - { - RecordId: aws.String("foo"), - }, - { - RecordId: aws.String("bar"), - }, - { - RecordId: aws.String("baz"), - }, - }, - }, - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecordBatch{Resp: test.resp}, - } - - resp, err := a.PutRecordBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.RequestResponses { - if *resp.RecordId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.RecordId) - } - } - } -} diff --git a/internal/aws/kinesis/kinesis.go b/internal/aws/kinesis/kinesis.go deleted file mode 100644 index a402e12a..00000000 --- a/internal/aws/kinesis/kinesis.go +++ /dev/null @@ -1,344 +0,0 @@ -package kinesis - -import ( - "context" - "crypto/md5" - "fmt" - "os" - "time" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "github.com/aws/aws-xray-sdk-go/xray" - rec "github.com/awslabs/kinesis-aggregation/go/records" - iaws "github.com/brexhq/substation/internal/aws" - - //nolint: staticcheck // not ready to switch package - "github.com/golang/protobuf/proto" -) - -// Aggregate produces a KPL-compliant Kinesis record -type Aggregate struct { - Record *rec.AggregatedRecord - Count int - Size int - PartitionKey string -} - -// New creates a new Kinesis record with default values -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L167 -func (a *Aggregate) New() { - a.Record = &rec.AggregatedRecord{} - a.Count = 0 - a.Size = 0 - - a.PartitionKey = "" - a.Record.PartitionKeyTable = make([]string, 0) -} - -func varIntSize(i int) int { - if i == 0 { - return 1 - } - - var needed int - for i > 0 { - needed++ - i >>= 1 - } - - bytes := needed / 7 - if needed%7 > 0 { - bytes++ - } - - return bytes -} - -func (a *Aggregate) calculateRecordSize(data []byte, partitionKey string) int { - var recordSize int - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L344-L349 - pkSize := 1 + varIntSize(len(partitionKey)) + len(partitionKey) - recordSize += pkSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L362-L364 - pkiSize := 1 + varIntSize(a.Count) - recordSize += pkiSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L371-L374 - dataSize := 1 + varIntSize(len(data)) + len(data) - recordSize += dataSize - // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L376-L378 - recordSize = recordSize + 1 + varIntSize(pkiSize+dataSize) - - // input record size + current aggregated record size + 4 byte magic header + 16 byte MD5 digest - return recordSize + a.Record.XXX_Size() + 20 -} - -// Add inserts a Kinesis record into an aggregated Kinesis record -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L382 -func (a *Aggregate) Add(data []byte, partitionKey string) bool { - // https://docs.aws.amazon.com/streams/latest/dev/key-concepts.html#partition-key - if len(partitionKey) > 256 { - partitionKey = partitionKey[0:256] - } - - // grab the first parition key in the set of events - if a.PartitionKey == "" { - a.PartitionKey = partitionKey - } - - // Verify the record size won't exceed the 1 MB limit of the Kinesis service. - // https://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html - if a.calculateRecordSize(data, partitionKey) > 1024*1024 { - return false - } - - pki := uint64(a.Count) - r := &rec.Record{ - PartitionKeyIndex: &pki, - Data: data, - } - - // Append the data to the aggregated record. - a.Record.Records = append(a.Record.Records, r) - a.Record.PartitionKeyTable = append(a.Record.PartitionKeyTable, partitionKey) - - // Update the record count and size. This is not used in the aggregated record. - a.Count++ - a.Size += a.calculateRecordSize(data, partitionKey) - - return true -} - -// Get returns a KPL-compliant compressed Kinesis record -// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L293 -func (a *Aggregate) Get() []byte { - data, _ := proto.Marshal(a.Record) - md5Hash := md5.Sum(data) - - record := []byte("\xf3\x89\x9a\xc2") - record = append(record, data...) - record = append(record, md5Hash[:]...) - - return record -} - -// ConvertEventsRecords converts Kinesis records between the Lambda and Go SDK packages. This is required for deaggregating Kinesis records processed by AWS Lambda. -func ConvertEventsRecords(records []events.KinesisEventRecord) []*kinesis.Record { - output := make([]*kinesis.Record, 0) - - for _, r := range records { - // ApproximateArrivalTimestamp is events.SecondsEpochTime which serializes time.Time - ts := r.Kinesis.ApproximateArrivalTimestamp.UTC() - output = append(output, &kinesis.Record{ - ApproximateArrivalTimestamp: &ts, - Data: r.Kinesis.Data, - EncryptionType: &r.Kinesis.EncryptionType, - PartitionKey: &r.Kinesis.PartitionKey, - SequenceNumber: &r.Kinesis.SequenceNumber, - }) - } - - return output -} - -// New returns a configured Kinesis client. -func New(cfg iaws.Config) *kinesis.Kinesis { - conf, sess := iaws.New(cfg) - - c := kinesis.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Kinesis API interface. -type API struct { - Client kinesisiface.KinesisAPI -} - -// Setup creates a new Kinesis client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// ListShards wraps the ListShardsWithContext API. -func (a *API) ListShards(ctx aws.Context, stream string) (*kinesis.ListShardsOutput, error) { - return a.Client.ListShardsWithContext(ctx, &kinesis.ListShardsInput{ - StreamName: aws.String(stream), - }) -} - -// GetShardIterator wraps the GetShardIteratorWithContext API. -func (a *API) GetShardIterator(ctx aws.Context, stream, shard, iteratorType string) (*kinesis.GetShardIteratorOutput, error) { - return a.Client.GetShardIteratorWithContext(ctx, &kinesis.GetShardIteratorInput{ - ShardId: aws.String(shard), - ShardIteratorType: aws.String(iteratorType), - StreamName: aws.String(stream), - }) -} - -// GetRecords wraps the GetRecordsWithContext API. -func (a *API) GetRecords(ctx aws.Context, iterator string) (*kinesis.GetRecordsOutput, error) { - return a.Client.GetRecordsWithContext(ctx, &kinesis.GetRecordsInput{ - ShardIterator: aws.String(iterator), - }) -} - -// PutRecords is a convenience wrapper for putting multiple records into a Kinesis stream. -func (a *API) PutRecords(ctx aws.Context, stream, partitionKey string, data [][]byte) (*kinesis.PutRecordsOutput, error) { - var records []*kinesis.PutRecordsRequestEntry - - ctx = context.WithoutCancel(ctx) - for _, d := range data { - records = append(records, &kinesis.PutRecordsRequestEntry{ - Data: d, - PartitionKey: aws.String(partitionKey), - }) - } - - resp, err := a.Client.PutRecordsWithContext( - ctx, - &kinesis.PutRecordsInput{ - Records: records, - StreamName: aws.String(stream), - }, - ) - - // If any record fails, then the record is recursively retried. - if resp.FailedRecordCount != nil && *resp.FailedRecordCount > 0 { - var retry [][]byte - - for idx, r := range resp.Records { - if r.ErrorCode != nil { - retry = append(retry, data[idx]) - } - } - - if len(retry) > 0 { - return a.PutRecords(ctx, stream, partitionKey, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("put_records: stream %s: %v", stream, err) - } - - return resp, nil -} - -// ActiveShards returns the number of in-use shards for a Kinesis stream. -func (a *API) ActiveShards(ctx aws.Context, stream string) (int64, error) { - var shards int64 - params := &kinesis.ListShardsInput{ - StreamName: aws.String(stream), - } - -LOOP: - for { - output, err := a.Client.ListShardsWithContext(ctx, params) - if err != nil { - return 0, fmt.Errorf("listshards stream %s: %v", stream, err) - } - - for _, s := range output.Shards { - if end := s.SequenceNumberRange.EndingSequenceNumber; end == nil { - shards++ - } - } - - if output.NextToken != nil { - params = &kinesis.ListShardsInput{ - NextToken: output.NextToken, - } - } else { - break LOOP - } - } - - return shards, nil -} - -// UpdateShards uniformly updates a Kinesis stream's shard count and returns when the update is complete. -func (a *API) UpdateShards(ctx aws.Context, stream string, shards int64) error { - params := &kinesis.UpdateShardCountInput{ - StreamName: aws.String(stream), - TargetShardCount: aws.Int64(shards), - ScalingType: aws.String("UNIFORM_SCALING"), - } - if _, err := a.Client.UpdateShardCountWithContext(ctx, params); err != nil { - return fmt.Errorf("updateshards stream %s shards %d: %v", stream, shards, err) - } - - for { - resp, err := a.Client.DescribeStreamSummaryWithContext(ctx, - &kinesis.DescribeStreamSummaryInput{ - StreamName: aws.String(stream), - }) - if err != nil { - return fmt.Errorf("describestream stream %s: %v", stream, err) - } - - if status := resp.StreamDescriptionSummary.StreamStatus; status != aws.String("UPDATING") { - break - } - time.Sleep(1 * time.Second) - } - - return nil -} - -// GetTags recursively retrieves all tags for a Kinesis stream. -func (a *API) GetTags(ctx aws.Context, stream string) ([]*kinesis.Tag, error) { - var tags []*kinesis.Tag - var lastTag string - - for { - req := &kinesis.ListTagsForStreamInput{ - StreamName: aws.String(stream), - } - - if lastTag != "" { - req.ExclusiveStartTagKey = aws.String(lastTag) - } - - resp, err := a.Client.ListTagsForStreamWithContext(ctx, req) - if err != nil { - return nil, fmt.Errorf("listtags stream %s: %v", stream, err) - } - - tags = append(tags, resp.Tags...) - lastTag = *resp.Tags[len(resp.Tags)-1].Key - - // enables recursion - if !*resp.HasMoreTags { - break - } - } - - return tags, nil -} - -// UpdateTag updates a tag on a Kinesis stream. -func (a *API) UpdateTag(ctx aws.Context, stream, key, value string) error { - input := &kinesis.AddTagsToStreamInput{ - StreamName: aws.String(stream), - Tags: map[string]*string{ - key: aws.String(value), - }, - } - - if _, err := a.Client.AddTagsToStreamWithContext(ctx, input); err != nil { - return fmt.Errorf("updatetag stream %s key %s value %s: %v", stream, key, value, err) - } - - return nil -} diff --git a/internal/aws/kinesis/kinesis_test.go b/internal/aws/kinesis/kinesis_test.go deleted file mode 100644 index 8119b7a0..00000000 --- a/internal/aws/kinesis/kinesis_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package kinesis - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" -) - -type mockedPutRecords struct { - kinesisiface.KinesisAPI - Resp kinesis.PutRecordsOutput -} - -func (m mockedPutRecords) PutRecordsWithContext(ctx aws.Context, in *kinesis.PutRecordsInput, opts ...request.Option) (*kinesis.PutRecordsOutput, error) { - return &m.Resp, nil -} - -func TestPutRecords(t *testing.T) { - tests := []struct { - resp kinesis.PutRecordsOutput - expected string - }{ - { - resp: kinesis.PutRecordsOutput{ - EncryptionType: aws.String("NONE"), - Records: []*kinesis.PutRecordsResultEntry{ - { - ErrorCode: aws.String(""), - ErrorMessage: aws.String(""), - SequenceNumber: aws.String("ABCDEF"), - ShardId: aws.String("XYZ"), - }, - }, - }, - expected: "ABCDEF", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPutRecords{Resp: test.resp}, - } - - b := [][]byte{ - []byte(""), - } - resp, err := a.PutRecords(ctx, "", "", b) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.Records[0].SequenceNumber != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.Records[0].SequenceNumber) - } - } -} - -type mockedGetTags struct { - kinesisiface.KinesisAPI - Resp kinesis.ListTagsForStreamOutput -} - -func (m mockedGetTags) ListTagsForStreamWithContext(ctx aws.Context, in *kinesis.ListTagsForStreamInput, opts ...request.Option) (*kinesis.ListTagsForStreamOutput, error) { - return &m.Resp, nil -} - -func TestGetTags(t *testing.T) { - tests := []struct { - resp kinesis.ListTagsForStreamOutput - expected []*kinesis.Tag - }{ - { - resp: kinesis.ListTagsForStreamOutput{ - Tags: []*kinesis.Tag{ - { - Key: aws.String("foo"), - Value: aws.String("bar"), - }, - { - Key: aws.String("baz"), - Value: aws.String("qux"), - }, - }, - // can't test recursion via this style of mock - HasMoreTags: aws.Bool(false), - }, - expected: []*kinesis.Tag{ - { - Key: aws.String("foo"), - Value: aws.String("bar"), - }, - { - Key: aws.String("baz"), - Value: aws.String("qux"), - }, - }, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetTags{Resp: test.resp}, - } - tags, err := a.GetTags(ctx, "") - if err != nil { - t.Fatalf("%v", err) - } - - for idx, test := range test.expected { - tag := tags[idx] - if *tag.Key != *test.Key { - t.Logf("expected %s, got %s", *test.Key, *tag.Key) - t.Fail() - } - - if *tag.Value != *test.Value { - t.Logf("expected %s, got %s", *test.Value, *tag.Value) - t.Fail() - } - } - } -} diff --git a/internal/aws/lambda/lambda.go b/internal/aws/lambda/lambda.go deleted file mode 100644 index b140099d..00000000 --- a/internal/aws/lambda/lambda.go +++ /dev/null @@ -1,75 +0,0 @@ -package lambda - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured Lambda client. - -func New(cfg iaws.Config) *lambda.Lambda { - conf, sess := iaws.New(cfg) - - c := lambda.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Lambda API interface. -type API struct { - Client lambdaiface.LambdaAPI -} - -// Setup creates a new Lambda client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Invoke is a convenience wrapper for synchronously invoking a Lambda function. -func (a *API) Invoke(ctx aws.Context, function string, payload []byte) (resp *lambda.InvokeOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.InvokeWithContext( - ctx, - &lambda.InvokeInput{ - FunctionName: aws.String(function), - InvocationType: aws.String("RequestResponse"), - Payload: payload, - }) - if err != nil { - return nil, fmt.Errorf("invoke function %s: %v", function, err) - } - - return resp, nil -} - -// InvokeAsync is a convenience wrapper for asynchronously invoking a Lambda function. -func (a *API) InvokeAsync(ctx aws.Context, function string, payload []byte) (resp *lambda.InvokeOutput, err error) { - ctx = context.WithoutCancel(ctx) - resp, err = a.Client.InvokeWithContext( - ctx, - &lambda.InvokeInput{ - FunctionName: aws.String(function), - InvocationType: aws.String("Event"), - Payload: payload, - }) - if err != nil { - return nil, fmt.Errorf("invoke_async function %s: %v", function, err) - } - - return resp, nil -} diff --git a/internal/aws/lambda/lambda_test.go b/internal/aws/lambda/lambda_test.go deleted file mode 100644 index d0ddf985..00000000 --- a/internal/aws/lambda/lambda_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package lambda - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" -) - -type mockedInvoke struct { - lambdaiface.LambdaAPI - Resp lambda.InvokeOutput -} - -func (m mockedInvoke) InvokeWithContext(ctx aws.Context, input *lambda.InvokeInput, opts ...request.Option) (*lambda.InvokeOutput, error) { - return &m.Resp, nil -} - -func TestInvoke(t *testing.T) { - tests := []struct { - resp lambda.InvokeOutput - expected int64 - }{ - { - resp: lambda.InvokeOutput{ - StatusCode: aws.Int64(200), - }, - expected: 200, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedInvoke{Resp: test.resp}, - } - - resp, err := a.Invoke(ctx, "", nil) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if *resp.StatusCode != test.expected { - t.Errorf("expected %+v, got %d", resp.Payload, test.expected) - } - } -} - -func TestInvokeAsync(t *testing.T) { - tests := []struct { - resp lambda.InvokeOutput - expected int64 - }{ - { - resp: lambda.InvokeOutput{ - StatusCode: aws.Int64(202), - }, - expected: 202, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedInvoke{Resp: test.resp}, - } - - resp, err := a.Invoke(ctx, "", nil) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if *resp.StatusCode != test.expected { - t.Errorf("expected %+v, got %d", resp.Payload, test.expected) - } - } -} diff --git a/internal/aws/s3manager/s3manager.go b/internal/aws/s3manager/s3manager.go deleted file mode 100644 index 9f0a89ef..00000000 --- a/internal/aws/s3manager/s3manager.go +++ /dev/null @@ -1,129 +0,0 @@ -// package s3manager provides methods and functions for downloading and uploading objects in AWS S3. -package s3manager - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/media" -) - -// NewS3 returns a configured S3 client. -func NewS3(cfg iaws.Config) *s3.S3 { - conf, sess := iaws.New(cfg) - - c := s3.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// NewS3Downloader returns a configured Downloader client. -func NewS3Downloader(cfg iaws.Config) *s3manager.Downloader { - return s3manager.NewDownloaderWithClient(NewS3(cfg)) -} - -// DownloaderAPI wraps the Downloader API interface. -type DownloaderAPI struct { - Client s3manageriface.DownloaderAPI -} - -// Setup creates a new Downloader client. -func (a *DownloaderAPI) Setup(cfg iaws.Config) { - a.Client = NewS3Downloader(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *DownloaderAPI) IsEnabled() bool { - return a.Client != nil -} - -// Download is a convenience wrapper for downloading an object from S3. -func (a *DownloaderAPI) Download(ctx aws.Context, bucket, key string, dst io.WriterAt) (int64, error) { - input := &s3.GetObjectInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - } - - ctx = context.WithoutCancel(ctx) - size, err := a.Client.DownloadWithContext(ctx, dst, input) - if err != nil { - return 0, fmt.Errorf("s3manager download bucket %s key %s: %v", bucket, key, err) - } - - return size, nil -} - -// NewS3Uploader returns a configured Uploader client. -func NewS3Uploader(cfg iaws.Config) *s3manager.Uploader { - return s3manager.NewUploaderWithClient(NewS3(cfg)) -} - -// UploaderAPI wraps the Uploader API interface. -type UploaderAPI struct { - Client s3manageriface.UploaderAPI -} - -// Setup creates a new Uploader client. -func (a *UploaderAPI) Setup(cfg iaws.Config) { - a.Client = NewS3Uploader(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *UploaderAPI) IsEnabled() bool { - return a.Client != nil -} - -// Upload is a convenience wrapper for uploading an object to S3. -func (a *UploaderAPI) Upload(ctx aws.Context, bucket, key, storageClass string, src io.Reader) (*s3manager.UploadOutput, error) { - // temporary file is used so that the src can have its content identified and be uploaded to S3 - dst, err := os.CreateTemp("", "substation") - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - defer os.Remove(dst.Name()) - defer dst.Close() - - if _, err := io.Copy(dst, src); err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - mediaType, err := media.File(dst) - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - if _, err := dst.Seek(0, 0); err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - if storageClass == "" { - storageClass = "STANDARD" - } - - input := &s3manager.UploadInput{ - Bucket: aws.String(bucket), - Key: aws.String(key), - Body: dst, - ContentType: aws.String(mediaType), - StorageClass: aws.String(storageClass), - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.UploadWithContext(ctx, input) - if err != nil { - return nil, fmt.Errorf("s3manager upload bucket %s key %s: %v", bucket, key, err) - } - - return resp, nil -} diff --git a/internal/aws/s3manager/s3manager_test.go b/internal/aws/s3manager/s3manager_test.go deleted file mode 100644 index 5ae62b43..00000000 --- a/internal/aws/s3manager/s3manager_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package s3manager - -import ( - "context" - "io" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" -) - -type mockedDownload struct { - s3manageriface.DownloaderAPI - Resp int64 -} - -func (m mockedDownload) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*s3manager.Downloader)) (int64, error) { - return m.Resp, nil -} - -func TestDownload(t *testing.T) { - tests := []struct { - resp int64 - input struct { - bucket string - key string - } - expected int64 - }{ - { - resp: 1, - input: struct { - bucket string - key string - }{ - bucket: "foo", - key: "bar", - }, - expected: 1, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := DownloaderAPI{ - mockedDownload{Resp: test.resp}, - } - - var dst io.WriterAt - size, err := a.Download(ctx, test.input.bucket, test.input.key, dst) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if size != test.expected { - t.Errorf("expected %d, got %d", size, test.expected) - } - } -} - -type mockedUpload struct { - s3manageriface.UploaderAPI - Resp s3manager.UploadOutput -} - -func (m mockedUpload) UploadWithContext(ctx aws.Context, input *s3manager.UploadInput, options ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { - return &m.Resp, nil -} - -func TestUpload(t *testing.T) { - tests := []struct { - resp s3manager.UploadOutput - input struct { - buffer []byte - bucket string - key string - } - expected string - }{ - { - resp: s3manager.UploadOutput{ - Location: "foo", - }, - input: struct { - buffer []byte - bucket string - key string - }{ - buffer: []byte("foo"), - bucket: "bar", - key: "baz", - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := UploaderAPI{ - mockedUpload{Resp: test.resp}, - } - - src := strings.NewReader("foo") - resp, err := a.Upload(ctx, test.input.bucket, test.input.key, "", src) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if resp.Location != test.expected { - t.Errorf("expected %s, got %s", resp.Location, test.expected) - } - } -} diff --git a/internal/aws/secretsmanager/secretsmanager.go b/internal/aws/secretsmanager/secretsmanager.go deleted file mode 100644 index 751e8d19..00000000 --- a/internal/aws/secretsmanager/secretsmanager.go +++ /dev/null @@ -1,61 +0,0 @@ -package secretsmanager - -import ( - "context" - "fmt" - "os" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" -) - -// New returns a configured Secrets Manager client. -func New(cfg iaws.Config) *secretsmanager.SecretsManager { - conf, sess := iaws.New(cfg) - - c := secretsmanager.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps the Secrets Manager API interface. -type API struct { - Client secretsmanageriface.SecretsManagerAPI -} - -// Setup creates a new Secrets Manager client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// IsEnabled returns true if the client is enabled and ready for use. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// GetSecret is a convenience wrapper for getting a secret from Secrets Manager. -func (a *API) GetSecret(ctx aws.Context, secretName string) (secret string, err error) { - input := &secretsmanager.GetSecretValueInput{ - SecretId: aws.String(secretName), - VersionStage: aws.String("AWSCURRENT"), // VersionStage defaults to AWSCURRENT if unspecified - } - - ctx = context.WithoutCancel(ctx) - result, err := a.Client.GetSecretValueWithContext(ctx, input) - if err != nil { - return secret, fmt.Errorf("getsecretvalue secret %s: %v", secretName, err) - } - - if result.SecretString != nil { - secret = *result.SecretString - return secret, err - } - - return secret, err -} diff --git a/internal/aws/secretsmanager/secretsmanager_test.go b/internal/aws/secretsmanager/secretsmanager_test.go deleted file mode 100644 index 587bbdad..00000000 --- a/internal/aws/secretsmanager/secretsmanager_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package secretsmanager - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/secretsmanager" - "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" -) - -type mockedGetSecret struct { - secretsmanageriface.SecretsManagerAPI - Resp secretsmanager.GetSecretValueOutput -} - -func (m mockedGetSecret) GetSecretValueWithContext(ctx aws.Context, input *secretsmanager.GetSecretValueInput, opts ...request.Option) (*secretsmanager.GetSecretValueOutput, error) { - return &m.Resp, nil -} - -func TestGetSecret(t *testing.T) { - tests := []struct { - resp secretsmanager.GetSecretValueOutput - input string - expected string - }{ - { - resp: secretsmanager.GetSecretValueOutput{ - SecretString: aws.String("foo"), - }, - input: "fooer", - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedGetSecret{Resp: test.resp}, - } - - resp, err := a.GetSecret(ctx, test.input) - if err != nil { - t.Fatalf("%d, unexpected error", err) - } - - if resp != test.expected { - t.Errorf("expected %+v, got %s", resp, test.expected) - } - } -} diff --git a/internal/aws/sns/sns.go b/internal/aws/sns/sns.go deleted file mode 100644 index 8708ad73..00000000 --- a/internal/aws/sns/sns.go +++ /dev/null @@ -1,117 +0,0 @@ -package sns - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/aws/aws-sdk-go/service/sns/snsiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/google/uuid" -) - -// New returns a configured SNS client. -func New(cfg iaws.Config) *sns.SNS { - conf, sess := iaws.New(cfg) - - c := sns.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps an SNS client interface. -type API struct { - Client snsiface.SNSAPI -} - -// IsEnabled checks whether a new client has been set. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates an SNS client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// Publish is a convenience wrapper for publishing a message to an SNS topic. -func (a *API) Publish(ctx aws.Context, arn string, data []byte) (*sns.PublishOutput, error) { - req := &sns.PublishInput{ - Message: aws.String(string(data)), - TopicArn: aws.String(arn), - } - - if strings.HasSuffix(arn, ".fifo") { - mgid := uuid.New().String() - req.MessageGroupId = aws.String(mgid) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PublishWithContext(ctx, req) - if err != nil { - return nil, fmt.Errorf("publish: topic %s: %v", arn, err) - } - - return resp, nil -} - -// PublishBatch is a convenience wrapper for publishing a batch of messages to an SNS topic. -func (a *API) PublishBatch(ctx aws.Context, topic string, data [][]byte) (*sns.PublishBatchOutput, error) { - mgid := uuid.New().String() - - var entries []*sns.PublishBatchRequestEntry - for idx, d := range data { - entry := &sns.PublishBatchRequestEntry{ - Id: aws.String(strconv.Itoa(idx)), - Message: aws.String(string(d)), - } - - if strings.HasSuffix(topic, ".fifo") { - entry.MessageGroupId = aws.String(mgid) - } - - entries = append(entries, entry) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.PublishBatchWithContext( - ctx, - &sns.PublishBatchInput{ - PublishBatchRequestEntries: entries, - TopicArn: aws.String(topic), - }, - ) - - // if a message fails, then the message ID is used to select the - // original data that was in the message. this data is put in a - // new slice and recursively input into the function. - if resp.Failed != nil { - var retry [][]byte - for _, f := range resp.Failed { - idx, err := strconv.Atoi(*f.Id) - if err != nil { - return nil, err - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.PublishBatch(ctx, topic, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("publish_batch: topic %s: %v", topic, err) - } - - return resp, nil -} diff --git a/internal/aws/sns/sns_test.go b/internal/aws/sns/sns_test.go deleted file mode 100644 index 3e2f69d8..00000000 --- a/internal/aws/sns/sns_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package sns - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/aws/aws-sdk-go/service/sns/snsiface" -) - -type mockedPublish struct { - snsiface.SNSAPI - Resp sns.PublishOutput -} - -func (m mockedPublish) PublishWithContext(ctx aws.Context, in *sns.PublishInput, opts ...request.Option) (*sns.PublishOutput, error) { - return &m.Resp, nil -} - -func TestPublish(t *testing.T) { - tests := []struct { - resp sns.PublishOutput - expected string - }{ - { - resp: sns.PublishOutput{ - MessageId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPublish{Resp: test.resp}, - } - - resp, err := a.Publish(ctx, "", []byte("")) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.MessageId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.MessageId) - } - } -} - -type mockedPublishBatch struct { - snsiface.SNSAPI - Resp sns.PublishBatchOutput -} - -func (m mockedPublishBatch) PublishBatchWithContext(ctx aws.Context, in *sns.PublishBatchInput, opts ...request.Option) (*sns.PublishBatchOutput, error) { - return &m.Resp, nil -} - -func TestPublishBatch(t *testing.T) { - tests := []struct { - resp sns.PublishBatchOutput - expected []string - }{ - { - resp: sns.PublishBatchOutput{ - Successful: []*sns.PublishBatchResultEntry{ - { - MessageId: aws.String("foo"), - }, - { - MessageId: aws.String("bar"), - }, - { - MessageId: aws.String("baz"), - }, - }, - }, - - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedPublishBatch{Resp: test.resp}, - } - - resp, err := a.PublishBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.Successful { - if *resp.MessageId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.MessageId) - } - } - } -} diff --git a/internal/aws/sqs/sqs.go b/internal/aws/sqs/sqs.go deleted file mode 100644 index 5a31975f..00000000 --- a/internal/aws/sqs/sqs.go +++ /dev/null @@ -1,118 +0,0 @@ -package sqs - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/aws/aws-sdk-go/service/sqs/sqsiface" - "github.com/aws/aws-xray-sdk-go/xray" - iaws "github.com/brexhq/substation/internal/aws" - "github.com/google/uuid" -) - -// New returns a configured SQS client. -func New(cfg iaws.Config) *sqs.SQS { - conf, sess := iaws.New(cfg) - - c := sqs.New(sess, conf) - if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { - xray.AWS(c.Client) - } - - return c -} - -// API wraps an SQS client interface. -type API struct { - Client sqsiface.SQSAPI -} - -// IsEnabled checks whether a new client has been set. -func (a *API) IsEnabled() bool { - return a.Client != nil -} - -// Setup creates an SQS client. -func (a *API) Setup(cfg iaws.Config) { - a.Client = New(cfg) -} - -// SendMessage is a convenience wrapper for sending a message to an SQS queue. -func (a *API) SendMessage(ctx aws.Context, queue string, data []byte) (*sqs.SendMessageOutput, error) { - mgid := uuid.New().String() - - msg := &sqs.SendMessageInput{ - MessageBody: aws.String(string(data)), - QueueUrl: aws.String(queue), - } - - if strings.HasSuffix(queue, ".fifo") { - msg.MessageGroupId = aws.String(mgid) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.SendMessageWithContext(ctx, msg) - if err != nil { - return nil, fmt.Errorf("send_message: queue %s: %v", queue, err) - } - - return resp, nil -} - -// SendMessageBatch is a convenience wrapper for sending multiple messages to an SQS queue. This function becomes recursive for any messages that failed the SendMessage operation. -func (a *API) SendMessageBatch(ctx aws.Context, queue string, data [][]byte) (*sqs.SendMessageBatchOutput, error) { - mgid := uuid.New().String() - - var messages []*sqs.SendMessageBatchRequestEntry - for idx, d := range data { - entry := &sqs.SendMessageBatchRequestEntry{ - Id: aws.String(strconv.Itoa(idx)), - MessageBody: aws.String(string(d)), - } - - if strings.HasSuffix(queue, ".fifo") { - entry.MessageGroupId = aws.String(mgid) - } - - messages = append(messages, entry) - } - - ctx = context.WithoutCancel(ctx) - resp, err := a.Client.SendMessageBatchWithContext( - ctx, - &sqs.SendMessageBatchInput{ - Entries: messages, - QueueUrl: aws.String(queue), - }, - ) - - // if a message fails, then the message ID is used to select the - // original data that was in the message. this data is put in a - // new slice and recursively input into the function. - if resp.Failed != nil { - var retry [][]byte - for _, r := range resp.Failed { - idx, err := strconv.Atoi(aws.StringValue(r.Id)) - if err != nil { - return nil, fmt.Errorf("send_message_batch: queue %s: %v", queue, err) - } - - retry = append(retry, data[idx]) - } - - if len(retry) > 0 { - return a.SendMessageBatch(ctx, queue, retry) - } - } - - if err != nil { - return nil, fmt.Errorf("send_message_batch: queue %s: %v", queue, err) - } - - return resp, nil -} diff --git a/internal/aws/sqs/sqs_test.go b/internal/aws/sqs/sqs_test.go deleted file mode 100644 index 297232ae..00000000 --- a/internal/aws/sqs/sqs_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package sqs - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sqs" - "github.com/aws/aws-sdk-go/service/sqs/sqsiface" -) - -type mockedSendMessage struct { - sqsiface.SQSAPI - Resp sqs.SendMessageOutput -} - -func (m mockedSendMessage) SendMessageWithContext(ctx aws.Context, in *sqs.SendMessageInput, opts ...request.Option) (*sqs.SendMessageOutput, error) { - return &m.Resp, nil -} - -func TestSendMessage(t *testing.T) { - tests := []struct { - resp sqs.SendMessageOutput - expected string - }{ - { - resp: sqs.SendMessageOutput{ - MessageId: aws.String("foo"), - }, - expected: "foo", - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedSendMessage{Resp: test.resp}, - } - - resp, err := a.SendMessage(ctx, "", []byte("")) - if err != nil { - t.Fatalf("%v", err) - } - - if *resp.MessageId != test.expected { - t.Errorf("expected %+v, got %s", test.expected, *resp.MessageId) - } - } -} - -type mockedSendMessageBatch struct { - sqsiface.SQSAPI - Resp sqs.SendMessageBatchOutput -} - -func (m mockedSendMessageBatch) SendMessageBatchWithContext(ctx aws.Context, in *sqs.SendMessageBatchInput, opts ...request.Option) (*sqs.SendMessageBatchOutput, error) { - return &m.Resp, nil -} - -func TestSendMessageBatch(t *testing.T) { - tests := []struct { - resp sqs.SendMessageBatchOutput - expected []string - }{ - { - resp: sqs.SendMessageBatchOutput{ - Successful: []*sqs.SendMessageBatchResultEntry{ - { - MessageId: aws.String("foo"), - }, - { - MessageId: aws.String("bar"), - }, - { - MessageId: aws.String("baz"), - }, - }, - }, - expected: []string{"foo", "bar", "baz"}, - }, - } - - ctx := context.TODO() - - for _, test := range tests { - a := API{ - mockedSendMessageBatch{Resp: test.resp}, - } - - resp, err := a.SendMessageBatch(ctx, "", [][]byte{}) - if err != nil { - t.Fatalf("%v", err) - } - - for idx, resp := range resp.Successful { - if *resp.MessageId != test.expected[idx] { - t.Errorf("expected %+v, got %s", test.expected[idx], *resp.MessageId) - } - } - } -} diff --git a/internal/bufio/bufio.go b/internal/bufio/bufio.go index 179761ba..b57d6c4d 100644 --- a/internal/bufio/bufio.go +++ b/internal/bufio/bufio.go @@ -8,7 +8,7 @@ import ( "os" "strconv" - "github.com/brexhq/substation/internal/media" + "github.com/brexhq/substation/v2/internal/media" "github.com/klauspost/compress/snappy" "github.com/klauspost/compress/zstd" ) diff --git a/internal/bufio/example_test.go b/internal/bufio/example_test.go index 106dac1d..cd9a7223 100644 --- a/internal/bufio/example_test.go +++ b/internal/bufio/example_test.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/brexhq/substation/internal/bufio" + "github.com/brexhq/substation/v2/internal/bufio" ) func ExampleNewScanner_setup() { diff --git a/internal/config/config.go b/internal/config/config.go index 11df2e45..e0a53e01 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -15,9 +15,32 @@ package config import ( + "context" "encoding/json" + "fmt" + "os" + "strconv" - "github.com/brexhq/substation/config" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-xray-sdk-go/instrumentation/awsv2" + + "github.com/brexhq/substation/v2/config" +) + +var ( + // ErrInvalidFactoryInput is returned when an unsupported input is referenced in any factory function. + ErrInvalidFactoryInput = fmt.Errorf("invalid factory input") + + // ErrMissingRequiredOption is returned when a component does not have the required options to properly run. + ErrMissingRequiredOption = fmt.Errorf("missing required option") + + // ErrInvalidOption is returned when an invalid option is received in a constructor. + ErrInvalidOption = fmt.Errorf("invalid option") ) type Object struct { @@ -30,13 +53,6 @@ type Object struct { BatchKey string `json:"batch_key"` } -type AWS struct { - // Region is the AWS region that the AWS client will use. - Region string `json:"region"` - // RoleARN is the AWS IAM role that the AWS client will assume. - RoleARN string `json:"role_arn"` -} - type Metric struct { // Name is the name of the metric. Name string `json:"name"` @@ -58,9 +74,6 @@ type Retry struct { // Delay is the amount of time to wait before retrying the action. This can be // combined with the Count field to create a backoff strategy. Delay string `json:"delay"` - // ErrorMessages are regular expressions that match error messages and determine - // if the action should be retried. - ErrorMessages []string `json:"error_messages"` } type Batch struct { @@ -82,3 +95,66 @@ func Decode(input, output interface{}) error { } return json.Unmarshal(b, output) } + +type AWS struct { + // ARN is the AWS resource that the action will interact with. + ARN string `json:"arn"` + // AssumeRoleARN is the ARN of the role that the action will assume. + AssumeRoleARN string `json:"assume_role_arn"` +} + +// NewAWS returns a valid AWS SDK v2 configuration. +func NewAWS(ctx context.Context, cfg AWS) (aws.Config, error) { + arnx, _ := arn.Parse(cfg.ARN) // Ignore missing ARN errors. + arny, _ := arn.Parse(cfg.AssumeRoleARN) // Ignore missing ARN errors. + + var region string + if arnx.Region != "" { + region = arnx.Region + } else if arny.Region != "" { + region = arny.Region + } else if v, ok := os.LookupEnv("AWS_REGION"); ok { + region = v + } else if v, ok := os.LookupEnv("AWS_DEFAULT_REGION"); ok { + region = v + } + + var creds aws.CredentialsProvider // nil is a valid default. + if cfg.AssumeRoleARN != "" { + conf, err := awsconfig.LoadDefaultConfig(ctx, + awsconfig.WithRegion(region), + ) + if err != nil { + return aws.Config{}, err + } + + stsSvc := sts.NewFromConfig(conf) + creds = stscreds.NewAssumeRoleProvider(stsSvc, cfg.AssumeRoleARN) + } + + maxRetry := 3 // Matches the standard retryer. + if v, ok := os.LookupEnv("AWS_MAX_ATTEMPTS"); ok { + m, err := strconv.Atoi(v) + if err != nil { + return aws.Config{}, err + } + + maxRetry = m + } + + conf, err := awsconfig.LoadDefaultConfig(ctx, + awsconfig.WithRegion(region), + awsconfig.WithCredentialsProvider(creds), + awsconfig.WithRetryer(func() aws.Retryer { + return retry.NewStandard(func(o *retry.StandardOptions) { + o.MaxAttempts = maxRetry + }) + }), + ) + + if _, ok := os.LookupEnv("AWS_XRAY_DAEMON_ADDRESS"); ok { + awsv2.AWSV2Instrumentor(&conf.APIOptions) + } + + return conf, err +} diff --git a/internal/errors/errors.go b/internal/errors/errors.go deleted file mode 100644 index b41e0215..00000000 --- a/internal/errors/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package errors - -import "fmt" - -// ErrInvalidFactoryInput is returned when an unsupported input is referenced in any factory function. -var ErrInvalidFactoryInput = fmt.Errorf("invalid factory input") - -// ErrMissingRequiredOption is returned when a component does not have the required options to properly run. -var ErrMissingRequiredOption = fmt.Errorf("missing required option") - -// ErrInvalidOption is returned when an invalid option is received in a constructor. -var ErrInvalidOption = fmt.Errorf("invalid option") diff --git a/internal/file/example_test.go b/internal/file/example_test.go index eea009e2..9a2e363c 100644 --- a/internal/file/example_test.go +++ b/internal/file/example_test.go @@ -7,7 +7,7 @@ import ( "os" "strings" - "github.com/brexhq/substation/internal/file" + "github.com/brexhq/substation/v2/internal/file" ) func ExampleGet_local() { diff --git a/internal/file/file.go b/internal/file/file.go index d3477fc8..027d5507 100644 --- a/internal/file/file.go +++ b/internal/file/file.go @@ -10,15 +10,17 @@ import ( "strings" "time" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/s3manager" - "github.com/brexhq/substation/internal/http" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/google/uuid" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/http" ) var ( httpClient http.HTTP - s3downloader s3manager.DownloaderAPI + s3downloader *manager.Downloader ) // errEmptyFile is returned when Get is called but finds an empty file. @@ -87,15 +89,27 @@ func Get(ctx context.Context, location string) (string, error) { return dst.Name(), nil } + //nolint: nestif // ignore nesting complexity if strings.HasPrefix(location, "s3://") { - if !s3downloader.IsEnabled() { - s3downloader.Setup(aws.Config{}) + if s3downloader == nil { + awsCfg, err := iconfig.NewAWS(ctx, iconfig.AWS{}) + if err != nil { + return dst.Name(), fmt.Errorf("get %s: %v", location, err) + } + + c := s3.NewFromConfig(awsCfg) + s3downloader = manager.NewDownloader(c) } // "s3://bucket/key" becomes ["bucket" "key"] paths := strings.SplitN(strings.TrimPrefix(location, "s3://"), "/", 2) - size, err := s3downloader.Download(ctx, paths[0], paths[1], dst) + // Download the file from S3. + ctx = context.WithoutCancel(ctx) + size, err := s3downloader.Download(ctx, dst, &s3.GetObjectInput{ + Bucket: &paths[0], + Key: &paths[1], + }) if err != nil { return dst.Name(), fmt.Errorf("get %s: %v", location, err) } diff --git a/internal/kv/aws_dynamodb.go b/internal/kv/aws_dynamodb.go index 106dccb4..4ca22975 100644 --- a/internal/kv/aws_dynamodb.go +++ b/internal/kv/aws_dynamodb.go @@ -2,18 +2,19 @@ package kv import ( "context" + "errors" "fmt" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/brexhq/substation/config" - iaws "github.com/brexhq/substation/internal/aws" - idynamodb "github.com/brexhq/substation/internal/aws/dynamodb" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) // kvAWSDynamoDB is a read-write key-value store that is backed by an AWS DynamoDB table. @@ -23,10 +24,7 @@ import ( // // - Does not support Global Secondary Indexes type kvAWSDynamoDB struct { - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` - // TableName is the DynamoDB table that items are read and written to. - TableName string `json:"table_name"` + AWS iconfig.AWS `json:"aws"` Attributes struct { // PartitionKey is the table's parition key attribute. // @@ -49,7 +47,7 @@ type kvAWSDynamoDB struct { // // This is optional and defaults to false (eventually consistent reads). ConsistentRead bool `json:"consistent_read"` - client idynamodb.API + client *dynamodb.Client } // Create a new AWS DynamoDB KV store. @@ -59,8 +57,8 @@ func newKVAWSDynamoDB(cfg config.Config) (*kvAWSDynamoDB, error) { return nil, err } - if store.TableName == "" { - return nil, fmt.Errorf("kv: aws_dynamodb: table %+v: %v", &store, errors.ErrMissingRequiredOption) + if store.AWS.ARN == "" { + return nil, fmt.Errorf("kv: aws_dynamodb: aws.arn %+v: %v", &store, iconfig.ErrMissingRequiredOption) } return &store, nil @@ -72,42 +70,42 @@ func (store *kvAWSDynamoDB) String() string { // Lock adds an item to the DynamoDB table with a conditional check. func (kv *kvAWSDynamoDB) Lock(ctx context.Context, key string, ttl int64) error { - attr := map[string]interface{}{ + attrEx := expression. + AttributeNotExists(expression.Name(kv.Attributes.PartitionKey)). + Or(expression.Name(kv.Attributes.TTL).LessThanEqual(expression.Value(time.Now().Unix()))) + + m := map[string]interface{}{ kv.Attributes.PartitionKey: key, kv.Attributes.TTL: ttl, } if kv.Attributes.SortKey != "" { - attr[kv.Attributes.SortKey] = "substation:kv_store" - } - - // Since the sort key is optional and static, it is not included in the check. - exp := "attribute_not_exists(#pk) OR #ttl <= :now" - expAttrNames := map[string]*string{ - "#pk": &kv.Attributes.PartitionKey, - "#ttl": &kv.Attributes.TTL, - } - expAttrVals := map[string]interface{}{ - ":now": time.Now().Unix(), + m[kv.Attributes.SortKey] = "substation:kv_store" } - a, err := dynamodbattribute.MarshalMap(attr) + i, err := attributevalue.MarshalMap(m) if err != nil { return err } - v, err := dynamodbattribute.MarshalMap(expAttrVals) + expr, err := expression.NewBuilder().WithCondition(attrEx).Build() if err != nil { return err } // If the item already exists and the TTL has not expired, then this returns ErrNoLock. The // caller is expected to handle this error and retry the call if necessary. - if _, err := kv.client.PutItemWithCondition(ctx, kv.TableName, a, exp, expAttrNames, v); err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "ConditionalCheckFailedException" { - return ErrNoLock - } + ctx = context.WithoutCancel(ctx) + if _, err := kv.client.PutItem(ctx, &dynamodb.PutItemInput{ + TableName: aws.String(kv.AWS.ARN), + Item: i, + ConditionExpression: expr.Condition(), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + }); err != nil { + var ccf *types.ConditionalCheckFailedException + if errors.As(err, &ccf) { + return ErrNoLock } return err @@ -125,12 +123,16 @@ func (store *kvAWSDynamoDB) Unlock(ctx context.Context, key string) error { m[store.Attributes.SortKey] = "substation:kv_store" } - item, err := dynamodbattribute.MarshalMap(m) + item, err := attributevalue.MarshalMap(m) if err != nil { return err } - if _, err := store.client.DeleteItem(ctx, store.TableName, item); err != nil { + ctx = context.WithoutCancel(ctx) + if _, err := store.client.DeleteItem(ctx, &dynamodb.DeleteItemInput{ + TableName: aws.String(store.AWS.ARN), + Key: item, + }); err != nil { return err } @@ -152,14 +154,23 @@ func (store *kvAWSDynamoDB) Get(ctx context.Context, key string) (interface{}, e m[store.Attributes.SortKey] = "substation:kv_store" } - resp, err := store.client.GetItem(ctx, store.TableName, m, store.ConsistentRead) + item, err := attributevalue.MarshalMap(m) if err != nil { - return "", err + return nil, err + } + + ctx = context.WithoutCancel(ctx) + resp, err := store.client.GetItem(ctx, &dynamodb.GetItemInput{ + TableName: aws.String(store.AWS.ARN), + Key: item, + }) + if err != nil { + return nil, err } if val, found := resp.Item[store.Attributes.Value]; found { var i interface{} - if err := dynamodbattribute.Unmarshal(val, &i); err != nil { + if err := attributevalue.Unmarshal(val, &i); err != nil { return nil, err } @@ -169,7 +180,6 @@ func (store *kvAWSDynamoDB) Get(ctx context.Context, key string) (interface{}, e return nil, nil } -// SetWithTTL adds an item to the DynamoDB table. func (store *kvAWSDynamoDB) Set(ctx context.Context, key string, val interface{}) error { m := map[string]interface{}{ store.Attributes.PartitionKey: key, @@ -180,12 +190,16 @@ func (store *kvAWSDynamoDB) Set(ctx context.Context, key string, val interface{} m[store.Attributes.SortKey] = "substation:kv_store" } - record, err := dynamodbattribute.MarshalMap(m) + item, err := attributevalue.MarshalMap(m) if err != nil { return err } - if _, err := store.client.PutItem(ctx, store.TableName, record); err != nil { + ctx = context.WithoutCancel(ctx) + if _, err := store.client.PutItem(ctx, &dynamodb.PutItemInput{ + TableName: aws.String(store.AWS.ARN), + Item: item, + }); err != nil { return err } @@ -195,7 +209,7 @@ func (store *kvAWSDynamoDB) Set(ctx context.Context, key string, val interface{} // SetWithTTL adds an item to the DynamoDB table with a time-to-live (TTL) attribute. func (store *kvAWSDynamoDB) SetWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { if store.Attributes.TTL == "" { - return errors.ErrMissingRequiredOption + return iconfig.ErrMissingRequiredOption } m := map[string]interface{}{ @@ -208,12 +222,16 @@ func (store *kvAWSDynamoDB) SetWithTTL(ctx context.Context, key string, val inte m[store.Attributes.SortKey] = "substation:kv_store" } - record, err := dynamodbattribute.MarshalMap(m) + item, err := attributevalue.MarshalMap(m) if err != nil { return err } - if _, err := store.client.PutItem(ctx, store.TableName, record); err != nil { + ctx = context.WithoutCancel(ctx) + if _, err := store.client.PutItem(ctx, &dynamodb.PutItemInput{ + TableName: aws.String(store.AWS.ARN), + Item: item, + }); err != nil { return err } @@ -225,71 +243,91 @@ func (store *kvAWSDynamoDB) SetWithTTL(ctx context.Context, key string, val inte // updated with the new value. func (store *kvAWSDynamoDB) SetAddWithTTL(ctx context.Context, key string, val interface{}, ttl int64) error { if store.Attributes.Value == "" { - return errors.ErrMissingRequiredOption + return iconfig.ErrMissingRequiredOption } - input := &dynamodb.UpdateItemInput{ - TableName: aws.String(store.TableName), - Key: map[string]*dynamodb.AttributeValue{ - store.Attributes.PartitionKey: { - S: aws.String(key), - }, - }, - ExpressionAttributeNames: map[string]*string{ - "#v": aws.String(store.Attributes.Value), - }, - ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{}, - UpdateExpression: aws.String("ADD #v :value"), - } + // DynamoDB supports string, number, and binary data types for sets, and + // numbers are represented as strings. + var av types.AttributeValue + switch v := val.(type) { + case float64: + av = &types.AttributeValueMemberNS{Value: []string{fmt.Sprintf("%f", v)}} + case []byte: + av = &types.AttributeValueMemberBS{Value: [][]byte{v}} + case string: + av = &types.AttributeValueMemberSS{Value: []string{v}} + case []interface{}: + // The slice of interfaces must be converted depending on the type of all elements. + // Precedence is given to float64, then []byte, and finally string. + var ns []string + var bs [][]byte + + for _, i := range v { + switch i := i.(type) { + case float64: + ns = append(ns, fmt.Sprintf("%f", i)) + case []float64: + for _, n := range v { + ns = append(ns, fmt.Sprintf("%f", n)) + } + case []byte: + bs = append(bs, i) + case [][]byte: + bs = append(bs, i...) + } + } - if store.Attributes.SortKey != "" { - input.Key[store.Attributes.SortKey] = &dynamodb.AttributeValue{ - S: aws.String("substation:kv_store"), + if len(ns) == len(v) { + av = &types.AttributeValueMemberNS{Value: ns} + + break + } else if len(bs) == len(v) { + av = &types.AttributeValueMemberBS{Value: bs} + + break + } + + // If the elements are not uniform, then convert all elements to strings. + var ss []string + for _, i := range v { + ss = append(ss, fmt.Sprintf("%v", i)) } + + av = &types.AttributeValueMemberSS{Value: ss} } // Overwrite the TTL value if the attribute exists. + updateEx := expression.Add(expression.Name(store.Attributes.Value), expression.Value(av)) if store.Attributes.TTL != "" { - input.ExpressionAttributeValues[":ttl"] = &dynamodb.AttributeValue{ - N: aws.String(fmt.Sprintf("%d", ttl)), - } - input.ExpressionAttributeNames["#ttl"] = aws.String(store.Attributes.TTL) + updateEx = updateEx.Set(expression.Name(store.Attributes.TTL), expression.Value(ttl)) + } - // Concatenates the TTL attribute to the UpdateExpression. This produces - // the string " ADD #v :value SET #ttl = :ttl". - input.UpdateExpression = aws.String(fmt.Sprintf("%s SET #ttl = :ttl", *input.UpdateExpression)) + m := map[string]interface{}{ + store.Attributes.PartitionKey: key, } - // DynamoDB supports string, number, and binary data types for sets, and - // numbers are represented as strings. - var av dynamodb.AttributeValue - switch v := val.(type) { - case string: - av.SS = []*string{aws.String(v)} - case []string: - for _, s := range v { - av.SS = append(av.SS, aws.String(s)) - } - case int, float64: - av.NS = []*string{aws.String(fmt.Sprintf("%d", v))} - case []int: - for _, n := range v { - av.NS = append(av.NS, aws.String(fmt.Sprintf("%d", n))) - } - case []float64: - for _, n := range v { - av.NS = append(av.NS, aws.String(fmt.Sprintf("%f", n))) - } - case []byte: - av.BS = [][]byte{v} - case [][]byte: - av.BS = append(av.BS, v...) + if store.Attributes.SortKey != "" { + m[store.Attributes.SortKey] = "substation:kv_store" } - // Referenced in the UpdateExpression ADD operator. - input.ExpressionAttributeValues[":value"] = &av + item, err := attributevalue.MarshalMap(m) + if err != nil { + return err + } - if _, err := store.client.UpdateItem(ctx, input); err != nil { + expr, err := expression.NewBuilder().WithUpdate(updateEx).Build() + if err != nil { + return err + } + + ctx = context.WithoutCancel(ctx) + if _, err := store.client.UpdateItem(ctx, &dynamodb.UpdateItemInput{ + TableName: aws.String(store.AWS.ARN), + Key: item, + UpdateExpression: expr.Update(), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + }); err != nil { return err } @@ -298,26 +336,26 @@ func (store *kvAWSDynamoDB) SetAddWithTTL(ctx context.Context, key string, val i // IsEnabled returns true if the DynamoDB client is ready for use. func (store *kvAWSDynamoDB) IsEnabled() bool { - return store.client.IsEnabled() + return store.client != nil } // Setup creates a new DynamoDB client. func (store *kvAWSDynamoDB) Setup(ctx context.Context) error { - if store.TableName == "" || store.Attributes.PartitionKey == "" { - return errors.ErrMissingRequiredOption + if store.AWS.ARN == "" || store.Attributes.PartitionKey == "" { + return iconfig.ErrMissingRequiredOption } // Avoids unnecessary setup. - if store.client.IsEnabled() { + if store.client != nil { return nil } - store.client.Setup(iaws.Config{ - Region: store.AWS.Region, - RoleARN: store.AWS.RoleARN, - MaxRetries: store.Retry.Count, - RetryableErrors: store.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, store.AWS) + if err != nil { + return err + } + + store.client = dynamodb.NewFromConfig(awsCfg) return nil } diff --git a/internal/kv/csv_file.go b/internal/kv/csv_file.go index 79bc0af3..2b4fa69a 100644 --- a/internal/kv/csv_file.go +++ b/internal/kv/csv_file.go @@ -11,10 +11,10 @@ import ( "sync" "unicode/utf8" - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" ) // errCSVFileColumnNotFound is returned when the column is not found in the CSV header. @@ -65,12 +65,12 @@ type kvCSVFile struct { // Create a new CSV file KV store. func newKVCSVFile(cfg config.Config) (*kvCSVFile, error) { var store kvCSVFile - if err := _config.Decode(cfg.Settings, &store); err != nil { + if err := iconfig.Decode(cfg.Settings, &store); err != nil { return nil, err } if store.File == "" || store.Column == "" { - return nil, fmt.Errorf("kv: csv: options %+v: %v", &store, errors.ErrMissingRequiredOption) + return nil, fmt.Errorf("kv: csv: options %+v: %v", &store, iconfig.ErrMissingRequiredOption) } return &store, nil diff --git a/internal/kv/example_test.go b/internal/kv/example_test.go index 13806812..f322e981 100644 --- a/internal/kv/example_test.go +++ b/internal/kv/example_test.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/kv" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/internal/kv" ) func Example_memory() { diff --git a/internal/kv/json_file.go b/internal/kv/json_file.go index 73d9ef2d..7902bd70 100644 --- a/internal/kv/json_file.go +++ b/internal/kv/json_file.go @@ -8,11 +8,12 @@ import ( "strings" "sync" - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" "github.com/tidwall/gjson" + + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" ) // errJSONFileInvalid is returned when the file contains invalid JSON. @@ -35,13 +36,13 @@ type kvJSONFile struct { // Create a new JSON file KV store. func newKVJSONFile(cfg config.Config) (*kvJSONFile, error) { var store kvJSONFile - if err := _config.Decode(cfg.Settings, &store); err != nil { + if err := iconfig.Decode(cfg.Settings, &store); err != nil { return nil, err } store.mu = new(sync.Mutex) if store.File == "" { - return nil, fmt.Errorf("kv: json: options %+v: %v", &store, errors.ErrMissingRequiredOption) + return nil, fmt.Errorf("kv: json: options %+v: %v", &store, iconfig.ErrMissingRequiredOption) } return &store, nil diff --git a/internal/kv/kv.go b/internal/kv/kv.go index 717bcc37..10dd14b6 100644 --- a/internal/kv/kv.go +++ b/internal/kv/kv.go @@ -6,8 +6,9 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) var ( @@ -77,7 +78,7 @@ func New(cfg config.Config) (Storer, error) { case "text_file": return newKVTextFile(cfg) default: - return nil, fmt.Errorf("kv_store: %s: %v", t, errors.ErrInvalidFactoryInput) + return nil, fmt.Errorf("kv_store: %s: %v", t, iconfig.ErrInvalidFactoryInput) } } @@ -119,7 +120,7 @@ func NewLocker(cfg config.Config) (Locker, error) { case "memory": return newKVMemory(cfg) default: - return nil, fmt.Errorf("kv_store locker: %s: %v", t, errors.ErrInvalidFactoryInput) + return nil, fmt.Errorf("kv_store locker: %s: %v", t, iconfig.ErrInvalidFactoryInput) } } diff --git a/internal/kv/memory.go b/internal/kv/memory.go index b59c524c..dba0ec92 100644 --- a/internal/kv/memory.go +++ b/internal/kv/memory.go @@ -7,8 +7,9 @@ import ( "sync" "time" - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) // kvMemory is a read-write key-value store that is stored in memory. @@ -29,7 +30,7 @@ type kvMemory struct { // Create a new memory KV store. func newKVMemory(cfg config.Config) (*kvMemory, error) { var store kvMemory - if err := _config.Decode(cfg.Settings, &store); err != nil { + if err := iconfig.Decode(cfg.Settings, &store); err != nil { return nil, err } diff --git a/internal/kv/mmdb.go b/internal/kv/mmdb.go index 192f818c..e6f0c72a 100644 --- a/internal/kv/mmdb.go +++ b/internal/kv/mmdb.go @@ -9,11 +9,12 @@ import ( "os" "sync" - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" "github.com/oschwald/maxminddb-golang" + + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" ) // errMMDBKeyMustBeAddr is returned when the key used in a Get call is not a valid @@ -37,12 +38,12 @@ type kvMMDB struct { // Create a new MMDB KV store. func newKVMMDB(cfg config.Config) (*kvMMDB, error) { var store kvMMDB - if err := _config.Decode(cfg.Settings, &store); err != nil { + if err := iconfig.Decode(cfg.Settings, &store); err != nil { return nil, err } if store.File == "" { - return nil, fmt.Errorf("kv: mmdb: options %+v: %v", &store, errors.ErrMissingRequiredOption) + return nil, fmt.Errorf("kv: mmdb: options %+v: %v", &store, iconfig.ErrMissingRequiredOption) } return &store, nil diff --git a/internal/kv/text_file.go b/internal/kv/text_file.go index bb09fda4..73d86f58 100644 --- a/internal/kv/text_file.go +++ b/internal/kv/text_file.go @@ -7,10 +7,10 @@ import ( "os" "sync" - "github.com/brexhq/substation/config" - _config "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" ) // kvTextFile is a read-only key-value store that is derived from a newline delimited @@ -39,12 +39,12 @@ type kvTextFile struct { // Create a new text file KV store. func newKVTextFile(cfg config.Config) (*kvTextFile, error) { var store kvTextFile - if err := _config.Decode(cfg.Settings, &store); err != nil { + if err := iconfig.Decode(cfg.Settings, &store); err != nil { return nil, err } if store.File == "" { - return nil, fmt.Errorf("kv: text_file: options %+v: %v", &store, errors.ErrMissingRequiredOption) + return nil, fmt.Errorf("kv: text_file: options %+v: %v", &store, iconfig.ErrMissingRequiredOption) } return &store, nil diff --git a/internal/media/example_test.go b/internal/media/example_test.go index 7cb63878..7c09e2e7 100644 --- a/internal/media/example_test.go +++ b/internal/media/example_test.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/brexhq/substation/internal/media" + "github.com/brexhq/substation/v2/internal/media" ) func ExampleBytes() { diff --git a/internal/metrics/README.md b/internal/metrics/README.md index 4053cac8..72327447 100644 --- a/internal/metrics/README.md +++ b/internal/metrics/README.md @@ -2,4 +2,4 @@ Contains interfaces and methods for generating application metrics and sending them to external services. Metrics can be generated anywhere in the application and optionally sent to a single external service. The naming convention for metrics names and attributes is PascalCase, also known as upper camel case (e.g. UpperCamelCase). -Information for each metrics generator is available in the [GoDoc](https://pkg.go.dev/github.com/brexhq/substation/internal/metrics). +Information for each metrics generator is available in the [GoDoc](https://pkg.go.dev/github.com/brexhq/substation/v2/internal/metrics). diff --git a/internal/metrics/aws_cloudwatch_embedded_metrics.go b/internal/metrics/aws_cloudwatch_embedded_metrics.go index f1c63b86..b94cd98c 100644 --- a/internal/metrics/aws_cloudwatch_embedded_metrics.go +++ b/internal/metrics/aws_cloudwatch_embedded_metrics.go @@ -5,9 +5,11 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" "github.com/tidwall/sjson" + + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type awsCloudWatchEmbeddedMetricsConfig struct{} diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index ee0006aa..6db623fb 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -4,8 +4,9 @@ import ( "context" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) const ( @@ -44,6 +45,6 @@ func New(ctx context.Context, cfg config.Config) (Generator, error) { case "aws_cloudwatch_embedded_metrics": return newAWSCloudWatchEmbeddedMetrics(ctx, cfg) default: - return nil, fmt.Errorf("metrics: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) + return nil, fmt.Errorf("metrics: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, iconfig.ErrInvalidFactoryInput) } } diff --git a/internal/secrets/aws_secrets_manager.go b/internal/secrets/aws_secrets_manager.go index 97323807..fb71aa0b 100644 --- a/internal/secrets/aws_secrets_manager.go +++ b/internal/secrets/aws_secrets_manager.go @@ -5,19 +5,17 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/secretsmanager" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type awsSecretsManagerConfig struct { - ID string `json:"id"` - Name string `json:"name"` - TTLOffset string `json:"ttl_offset"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` + ID string `json:"id"` + TTLOffset string `json:"ttl_offset"` + AWS iconfig.AWS `json:"aws"` } func (c *awsSecretsManagerConfig) Decode(in interface{}) error { @@ -26,25 +24,24 @@ func (c *awsSecretsManagerConfig) Decode(in interface{}) error { func (c *awsSecretsManagerConfig) Validate() error { if c.ID == "" { - return fmt.Errorf("id: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("id: %v", iconfig.ErrMissingRequiredOption) } - if c.Name == "" { - return fmt.Errorf("name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } type awsSecretsManager struct { - conf awsSecretsManagerConfig + conf awsSecretsManagerConfig + client *secretsmanager.Client ttl int64 - // client is safe for concurrent access. - client secretsmanager.API } -func newAWSSecretsManager(_ context.Context, cfg config.Config) (*awsSecretsManager, error) { +func newAWSSecretsManager(ctx context.Context, cfg config.Config) (*awsSecretsManager, error) { conf := awsSecretsManagerConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("secrets: aws_secrets_manager: %v", err) @@ -61,7 +58,7 @@ func newAWSSecretsManager(_ context.Context, cfg config.Config) (*awsSecretsMana dur, err := time.ParseDuration(ttl) if err != nil { - return nil, fmt.Errorf("secrets: environment_variable: %v", err) + return nil, fmt.Errorf("secrets: aws_secrets_manager: %v", err) } c := &awsSecretsManager{ @@ -69,26 +66,29 @@ func newAWSSecretsManager(_ context.Context, cfg config.Config) (*awsSecretsMana ttl: time.Now().Add(dur).Unix(), } - c.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("secrets: aws_secrets_manager: %v", err) + } + + c.client = secretsmanager.NewFromConfig(awsCfg) return c, nil } func (c *awsSecretsManager) Retrieve(ctx context.Context) error { - v, err := c.client.GetSecret(ctx, c.conf.Name) + ctx = context.WithoutCancel(ctx) + v, err := c.client.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{ + SecretId: &c.conf.AWS.ARN, + }) if err != nil { - return fmt.Errorf("secrets: environment_variable: name %s: %v", c.conf.Name, err) + return fmt.Errorf("secrets: aws_secrets_manager: %v", err) } // SetWithTTL isn't used here because the TTL is managed by // transform/utility_secret.go. - if err := cache.Set(ctx, c.conf.ID, v); err != nil { - return fmt.Errorf("secrets: environment_variable: id %s: %v", c.conf.ID, err) + if err := cache.Set(ctx, c.conf.ID, v.SecretString); err != nil { + return fmt.Errorf("secrets: aws_secrets_manager: id %s: %v", c.conf.ID, err) } return nil diff --git a/internal/secrets/environment_variable.go b/internal/secrets/environment_variable.go index b9ec740f..9bcc8ea2 100644 --- a/internal/secrets/environment_variable.go +++ b/internal/secrets/environment_variable.go @@ -6,9 +6,9 @@ import ( "os" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type environmentVariableConfig struct { @@ -23,11 +23,11 @@ func (c *environmentVariableConfig) Decode(in interface{}) error { func (c *environmentVariableConfig) Validate() error { if c.ID == "" { - return fmt.Errorf("id: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("id: %v", iconfig.ErrMissingRequiredOption) } if c.Name == "" { - return fmt.Errorf("name: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("name: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/internal/secrets/secrets.go b/internal/secrets/secrets.go index 866c7c53..1656d89f 100644 --- a/internal/secrets/secrets.go +++ b/internal/secrets/secrets.go @@ -7,9 +7,10 @@ import ( "regexp" "strings" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" + "github.com/brexhq/substation/v2/config" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/kv" ) var ( @@ -35,7 +36,7 @@ func New(ctx context.Context, cfg config.Config) (Retriever, error) { case "environment_variable": return newEnvironmentVariable(ctx, cfg) default: - return nil, fmt.Errorf("secrets: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, errors.ErrInvalidFactoryInput) + return nil, fmt.Errorf("secrets: new: type %q settings %+v: %v", cfg.Type, cfg.Settings, iconfig.ErrInvalidFactoryInput) } } diff --git a/internal/secrets/secrets_test.go b/internal/secrets/secrets_test.go index 7222c5da..5d02445d 100644 --- a/internal/secrets/secrets_test.go +++ b/internal/secrets/secrets_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" + "github.com/brexhq/substation/v2/config" ) func TestCollect(t *testing.T) { diff --git a/message/message.go b/message/message.go index 1b316691..328c35f2 100644 --- a/message/message.go +++ b/message/message.go @@ -8,7 +8,7 @@ import ( "strings" "unicode/utf8" - "github.com/brexhq/substation/internal/base64" + "github.com/brexhq/substation/v2/internal/base64" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) diff --git a/substation.go b/substation.go index c5455364..061910c0 100644 --- a/substation.go +++ b/substation.go @@ -5,9 +5,9 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + "github.com/brexhq/substation/v2/transform" ) var errNoTransforms = fmt.Errorf("no transforms configured") diff --git a/build/config/substation.libsonnet b/substation.libsonnet similarity index 57% rename from build/config/substation.libsonnet rename to substation.libsonnet index 7de38134..8570f557 100644 --- a/build/config/substation.libsonnet +++ b/substation.libsonnet @@ -1,12 +1,56 @@ +local helpers = { + // If the input is not an array, then this returns it as an array. + make_array(i): if !std.isArray(i) then [i] else i, + abbv(settings): std.mergePatch(settings, { + object: if std.objectHas(settings, 'object') then $.abbv_obj(settings.object) else if std.objectHas(settings, 'obj') then $.abbv_obj(settings.obj) else null, + obj: null, + }), + abbv_obj(s): { + source_key: if std.objectHas(s, 'src') then s.src else if std.objectHas(s, 'source_key') then s.source_key else null, + src: null, + target_key: if std.objectHas(s, 'trg') then s.trg else if std.objectHas(s, 'target_key') then s.target_key else null, + trg: null, + batch_key: if std.objectHas(s, 'btch') then s.batch else if std.objectHas(s, 'batch_key') then s.batch_key else null, + }, + id(type, settings): std.join('-', [std.md5(type)[:8], std.md5(std.toString(settings))[:8]]), +}; + { // Mirrors interfaces from the condition package. cnd: $.condition, condition: { - // Operators. - all(i): { operator: 'all', inspectors: $.helpers.make_array(i) }, - any(i): { operator: 'any', inspectors: $.helpers.make_array(i) }, - none(i): { operator: 'none', inspectors: $.helpers.make_array(i) }, - // Inspectors. + all(i): $.condition.meta.all({ conditions: helpers.make_array(i) }), + any(i): $.condition.meta.any({ conditions: helpers.make_array(i) }), + none(i): $.condition.meta.none({ conditions: helpers.make_array(i) }), + meta: { + all(settings={}): { + local default = { + object: $.config.object, + conditions: [], + }, + + type: 'meta_all', + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, + any(settings={}): { + local default = { + object: $.config.object, + conditions: [], + }, + + type: 'meta_any', + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, + none(settings={}): { + local default = { + object: $.config.object, + conditions: [], + }, + + type: 'meta_none', + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, + }, fmt: $.condition.format, format: { json(settings={}): { @@ -19,7 +63,7 @@ }, type: 'format_mime', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, num: $.condition.number, @@ -33,21 +77,21 @@ local default = $.condition.number.default, type: 'number_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.number.less_than(settings=settings), less_than(settings={}): { local default = $.condition.number.default, type: 'number_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.number.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.number.default, type: 'number_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, bitwise: { and(settings={}): { @@ -57,7 +101,7 @@ }, type: 'number_bitwise_and', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, not(settings={}): { local default = { @@ -65,7 +109,7 @@ }, type: 'number_bitwise_not', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, or(settings={}): { local default = { @@ -74,7 +118,7 @@ }, type: 'number_bitwise_or', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, xor(settings={}): { local default = { @@ -83,7 +127,7 @@ }, type: 'number_bitwise_xor', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, len: $.condition.number.length, @@ -98,55 +142,22 @@ local default = $.condition.number.length.default, type: 'number_length_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.number.length.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.number.length.default, type: 'number_length_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.number.length.less_than(settings=settings), less_than(settings={}): { local default = $.condition.number.length.default, type: 'number_length_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - }, - }, - meta: { - condition(settings={}): { - local default = { condition: null }, - - type: 'meta_condition', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - err(settings={}): { - local default = { - inspector: null, - error_messages: [".*"], - }, - - type: 'meta_err', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - for_each(settings={}): { - local default = { - object: $.config.object, - type: null, - inspector: null, + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, - - type: 'meta_for_each', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - negate(settings={}): { - local default = { inspector: null }, - - type: 'meta_negate', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), }, }, net: $.condition.network, @@ -159,55 +170,55 @@ local default = $.condition.network.ip.default, type: 'network_ip_global_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, link_local_multicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_link_local_multicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, link_local_unicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_link_local_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, loopback(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_loopback', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, multicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_multicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, private(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_private', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unicast(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_unicast', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unspecified(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_unspecified', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, valid(settings={}): { local default = $.condition.network.ip.default, type: 'network_ip_valid', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, @@ -222,42 +233,42 @@ local default = $.condition.string.default, type: 'string_contains', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, eq(settings={}): $.condition.string.equal_to(settings=settings), equal_to(settings={}): { local default = $.condition.string.default, type: 'string_equal_to', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gt(settings={}): $.condition.string.greater_than(settings=settings), greater_than(settings={}): { local default = $.condition.string.default, type: 'string_greater_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, lt(settings={}): $.condition.string.less_than(settings=settings), less_than(settings={}): { local default = $.condition.string.default, type: 'string_less_than', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, prefix(settings={}): $.condition.string.starts_with(settings=settings), starts_with(settings={}): { local default = $.condition.string.default, type: 'string_starts_with', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, suffix(settings={}): $.condition.string.ends_with(settings=settings), ends_with(settings={}): { local default = $.condition.string.default, type: 'string_ends_with', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, match(settings={}): { local default = { @@ -266,7 +277,7 @@ }, type: 'string_match', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, util: $.condition.utility, @@ -286,23 +297,23 @@ array(settings={}): { local type = 'aggregate_from_array', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.aggregate.from.string(settings=settings), string(settings={}): { local type = 'aggregate_from_string', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), separator: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, to: { @@ -310,25 +321,25 @@ array(settings={}): { local type = 'aggregate_to_array', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, batch: $.config.batch, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.aggregate.to.string(settings=settings), string(settings={}): { local type = 'aggregate_to_string', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, separator: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, @@ -337,57 +348,53 @@ join(settings={}): { local type = 'array_join', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, separator: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, zip(settings={}): { local type = 'array_zip', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, enrich: { aws: { - dynamodb(settings={}): { - local type = 'enrich_aws_dynamodb', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - aws: $.config.aws, - retry: $.config.retry, - table_name: null, - partition_key: null, - sort_key: null, - key_condition_expression: null, - limit: 1, - scan_index_forward: false, + dynamodb: { + query(settings={}): { + local type = 'enrich_aws_dynamodb_query', + local default = { + id: helpers.id(type, settings), + object: $.config.object, + aws: $.config.aws, + attributes: { partition_key: null, sort_key: null }, + limit: 1, + scan_index_forward: false, + }, + + type: type, + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), }, lambda(settings={}): { local type = 'enrich_aws_lambda', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, aws: $.config.aws, - retry: $.config.retry, - function_name: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, dns: { @@ -397,24 +404,24 @@ }, domain_lookup(settings={}): { local type = 'enrich_dns_domain_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, + local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, ip_lookup(settings={}): { local type = 'enrich_dns_ip_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, + local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, txt_lookup(settings={}): { local type = 'enrich_dns_txt_lookup', - local default = $.transform.enrich.dns.default { id: $.helpers.id(type, settings) }, + local default = $.transform.enrich.dns.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, http: { @@ -426,17 +433,17 @@ }, get(settings={}): { local type = 'enrich_http_get', - local default = $.transform.enrich.http.default { id: $.helpers.id(type, settings) }, + local default = $.transform.enrich.http.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, post(settings={}): { local type = 'enrich_http_post', - local default = $.transform.enrich.http.default { body_key: null, id: $.helpers.id(type, settings) }, + local default = $.transform.enrich.http.default { body_key: null, id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, kv_store: { @@ -446,35 +453,33 @@ kv_store: null, close_kv_store: false, }, - // Deprecated: Use `item.get` or `iget` instead. - get(settings={}): { - local type = 'enrich_kv_store_get', - local default = $.transform.enrich.kv_store.default { id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, - // Deprecated: Use `item.set` or `iset` instead. - set(settings={}): { - local type = 'enrich_kv_store_set', - local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: $.helpers.id(type, settings) }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, iget: $.transform.enrich.kv_store.item.get, iset: $.transform.enrich.kv_store.item.set, item: { - get: $.transform.enrich.kv_store.get, - set: $.transform.enrich.kv_store.set, + get(settings={}): { + local type = 'enrich_kv_store_get', + local default = $.transform.enrich.kv_store.default { id: helpers.id(type, settings) }, + + type: type, + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, + set(settings={}): { + local type = 'enrich_kv_store_set', + local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: helpers.id(type, settings) }, + + type: type, + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, }, - // In future releases this will also be `set.add`. - sadd(settings={}): { - local type = 'enrich_kv_store_set_add', - local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: $.helpers.id(type, settings) }, + sadd: $.transform.enrich.kv_store.set.add, + set: { + add(settings={}): { + local type = 'enrich_kv_store_set_add', + local default = $.transform.enrich.kv_store.default { ttl_key: null, ttl_offset: '0s', id: helpers.id(type, settings) }, - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + type: type, + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, }, }, }, @@ -487,50 +492,50 @@ b64(settings={}): $.transform.format.from.base64(settings=settings), base64(settings={}): { local type = 'format_from_base64', - local default = $.transform.format.default { id: $.helpers.id(type, settings) }, + local default = $.transform.format.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gz(settings={}): $.transform.format.from.gzip(settings=settings), gzip(settings={}): { local type = 'format_from_gzip', - local default = { id: $.helpers.id(type, settings) }, + local default = { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, pretty_print(settings={}): { local type = 'format_from_pretty_print', - local default = { id: $.helpers.id(type, settings) }, + local default = { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, zip(settings={}): { local type = 'format_from_zip', - local default = { id: $.helpers.id(type, settings) }, + local default = { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - } + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, }, to: { b64(settings={}): $.transform.format.to.base64(settings=settings), base64(settings={}): { local type = 'format_to_base64', - local default = $.transform.format.default { id: $.helpers.id(type, settings) }, + local default = $.transform.format.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, gz(settings={}): $.transform.format.to.gzip(settings=settings), gzip(settings={}): { local type = 'format_to_gzip', - local default = { id: $.helpers.id(type, settings) }, + local default = { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, @@ -540,17 +545,17 @@ }, md5(settings={}): { local type = 'hash_md5', - local default = $.transform.hash.default { id: $.helpers.id(type, settings) }, + local default = $.transform.hash.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, sha256(settings={}): { local type = 'hash_sha256', - local default = $.transform.hash.default { id: $.helpers.id(type, settings) }, + local default = $.transform.hash.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, num: $.transform.number, @@ -559,25 +564,25 @@ maximum(settings={}): { local type = 'number_maximum', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, value: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, min(settings={}): $.transform.number.minimum(settings=settings), minimum(settings={}): { local type = 'number_minimum', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, value: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, math: { default: { @@ -586,34 +591,34 @@ add(settings={}): $.transform.number.math.addition(settings=settings), addition(settings={}): { local type = 'number_math_addition', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, + local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, sub(settings={}): $.transform.number.math.subtraction(settings=settings), subtraction(settings={}): { local type = 'number_math_subtraction', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, + local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, mul(settings={}): $.transform.number.math.multiplication(settings=settings), multiplication(settings={}): { local type = 'number_math_multiplication', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, + local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, div(settings={}): $.transform.number.math.division(settings=settings), division(settings={}): { local type = 'number_math_division', - local default = $.transform.number.math.default { id: $.helpers.id(type, settings) }, + local default = $.transform.number.math.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, @@ -621,34 +626,31 @@ err(settings={}): { local type = 'meta_err', local default = { - id: $.helpers.id(type, settings), - transform: null, + id: helpers.id(type, settings), transforms: null, - error_messages: null, + error_messages: ['.*'], }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, for_each(settings={}): { local type = 'meta_for_each', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, - transform: null, transforms: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, kv_store: { lock(settings={}): { local type = 'meta_kv_store_lock', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object { ttl_key: null }, - transform: null, transforms: null, kv_store: null, prefix: null, @@ -656,56 +658,44 @@ }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, metric: { duration(settings={}): { local type = 'meta_metric_duration', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), metric: $.config.metric, - transform: null, transforms: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, - pipe(settings={}): $.transform.meta.pipeline(settings=settings), - pipeline(settings={}): { - local type = 'meta_pipeline', - local default = { - id: $.helpers.id(type, settings), - object: $.config.object, - transforms: null, - }, - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), - }, retry(settings={}): { local type = 'meta_retry', local default = { - id: $.helpers.id(type, settings), - retry: $.config.retry { error_messages: [".*"] }, - condition: null, + id: helpers.id(type, settings), + retry: $.config.retry, transforms: null, + condition: null, + error_messages: ['.*'], }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, switch(settings={}): { local type = 'meta_switch', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), cases: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, net: $.transform.network, @@ -716,25 +706,25 @@ }, registered_domain(settings={}): { local type = 'network_domain_registered_domain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, + local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, subdomain(settings={}): { local type = 'network_domain_subdomain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, + local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, tld(settings={}): $.transform.network.domain.top_level_domain(settings=settings), top_level_domain(settings={}): { local type = 'network_domain_top_level_domain', - local default = $.transform.network.domain.default { id: $.helpers.id(type, settings) }, + local default = $.transform.network.domain.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, @@ -746,89 +736,107 @@ cp(settings={}): $.transform.object.copy(settings=settings), copy(settings={}): { local type = 'object_copy', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, del(settings={}): $.transform.object.delete(settings=settings), delete(settings={}): { local type = 'object_delete', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, insert(settings={}): { local type = 'object_insert', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, jq(settings={}): { local type = 'object_jq', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), filter: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, to: { bool(settings={}): $.transform.object.to.boolean(settings=settings), boolean(settings={}): { local type = 'object_to_boolean', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, float(settings={}): { local type = 'object_to_float', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, int(settings={}): $.transform.object.to.integer(settings=settings), integer(settings={}): { local type = 'object_to_integer', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, str(settings={}): $.transform.object.to.string(settings=settings), string(settings={}): { local type = 'object_to_string', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, uint(settings={}): $.transform.object.to.unsigned_integer(settings=settings), unsigned_integer(settings={}): { local type = 'object_to_unsigned_integer', - local default = $.transform.object.default { id: $.helpers.id(type, settings) }, + local default = $.transform.object.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, send: { aws: { - dynamodb(settings={}): { - local type = 'send_aws_dynamodb', + dynamodb: { + put(settings={}): { + local type = 'send_aws_dynamodb_put', + local default = { + id: helpers.id(type, settings), + batch: $.config.batch, + aws: $.config.aws, + auxiliary_transforms: null, + }, + + local s = std.mergePatch(settings, { + auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, + aux_tforms: null, + }), + + type: type, + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), + }, + }, + firehose(settings={}): $.transform.send.aws.data_firehose(settings=settings), + data_firehose(settings={}): { + local type = 'send_aws_data_firehose', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - table_name: null, + auxiliary_transforms: null, }, local s = std.mergePatch(settings, { @@ -837,56 +845,32 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, eventbridge(settings={}): { local type = 'send_aws_eventbridge', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - arn: null, - description: null, - }, - local s = std.mergePatch(settings, { - auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, - aux_tforms: null, - }), - - type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), - }, - firehose(settings={}): $.transform.send.aws.kinesis_data_firehose(settings=settings), - kinesis_data_firehose(settings={}): { - local type = 'send_aws_kinesis_data_firehose', - local default = { - id: $.helpers.id(type, settings), - batch: $.config.batch, auxiliary_transforms: null, - aws: $.config.aws, - retry: $.config.retry, - stream_name: null, + description: null, }, - local s = std.mergePatch(settings, { auxiliary_transforms: if std.objectHas(settings, 'auxiliary_transforms') then settings.auxiliary_transforms else if std.objectHas(settings, 'aux_tforms') then settings.aux_tforms else null, aux_tforms: null, }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, kinesis_data_stream(settings={}): { local type = 'send_aws_kinesis_data_stream', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - stream_name: null, + auxiliary_transforms: null, use_batch_key_as_partition_key: false, enable_record_aggregation: false, }, @@ -897,33 +881,29 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, lambda(settings={}): { local type = 'send_aws_lambda', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - function_name: null, + auxiliary_transforms: null, }, type: type, - settings: std.mergePatch(default, $.helpers.abbv(settings)), + settings: std.mergePatch(default, helpers.abbv(settings)), }, s3(settings={}): { local type = 'send_aws_s3', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - bucket_name: null, - storage_class: 'STANDARD', file_path: $.file_path, + auxiliary_transforms: null, + storage_class: 'STANDARD', }, local s = std.mergePatch(settings, { @@ -932,17 +912,15 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, sns(settings={}): { local type = 'send_aws_sns', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - arn: null, + auxiliary_transforms: null, }, local s = std.mergePatch(settings, { @@ -951,17 +929,15 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, sqs(settings={}): { local type = 'send_aws_sqs', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, - auxiliary_transforms: null, aws: $.config.aws, - retry: $.config.retry, - arn: null, + auxiliary_transforms: null, }, local s = std.mergePatch(settings, { @@ -970,13 +946,13 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, file(settings={}): { local type = 'send_file', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, file_path: $.file_path, @@ -988,13 +964,13 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, http: { post(settings={}): { local type = 'send_http_post', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, url: null, @@ -1009,13 +985,13 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, stdout(settings={}): { local type = 'send_stdout', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, auxiliary_transforms: null, }, @@ -1026,7 +1002,7 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, }, str: $.transform.string, @@ -1034,31 +1010,31 @@ append(settings={}): { local type = 'string_append', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, suffix: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, capture(settings={}): { local type = 'string_capture', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, pattern: null, count: 0, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, repl: $.transform.string.replace, replace(settings={}): { local type = 'string_replace', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, pattern: null, replacement: null, @@ -1071,12 +1047,12 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, split(settings={}): { local type = 'string_split', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, separator: null, }, @@ -1087,7 +1063,7 @@ }), type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(s))), + settings: std.prune(std.mergePatch(default, helpers.abbv(s))), }, to: { default: { @@ -1095,35 +1071,35 @@ }, lower(settings={}): { local type = 'string_to_lower', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, + local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, upper(settings={}): { local type = 'string_to_upper', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, + local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, snake(settings={}): { local type = 'string_to_snake', - local default = $.transform.string.to.default { id: $.helpers.id(type, settings) }, + local default = $.transform.string.to.default { id: helpers.id(type, settings) }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, uuid(settings={}): { local type = 'string_uuid', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, time: { @@ -1132,80 +1108,80 @@ string(settings={}): { local type = 'time_from_string', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, format: null, - location: null, + location: 'UTC', }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix(settings={}): { local type = 'time_from_unix', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix_milli(settings={}): { local type = 'time_from_unix_milli', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, now(settings={}): { local type = 'time_now', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, to: { str(settings={}): $.transform.time.to.string(settings=settings), string(settings={}): { local type = 'time_to_string', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, format: null, - location: null, + location: 'UTC', }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, unix(settings={}): { local type = 'time_to_unix', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, unix_milli(settings={}): { local type = 'time_to_unix_milli', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, util: $.transform.utility, @@ -1213,271 +1189,164 @@ control(settings={}): { local type = 'utility_control', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), batch: $.config.batch, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, delay(settings={}): { local type = 'utility_delay', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), duration: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, drop(settings={}): { local type = 'utility_drop', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, err(settings={}): { local type = 'utility_err', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), message: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, metric: { bytes(settings={}): { local type = 'utility_metric_bytes', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), metric: $.config.metric, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, count(settings={}): { local type = 'utility_metric_count', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), metric: $.config.metric, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, freshness(settings={}): { local type = 'utility_metric_freshness', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), threshold: null, metric: $.config.metric, object: $.config.object, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, secret(settings={}): { local type = 'utility_secret', local default = { - id: $.helpers.id(type, settings), + id: helpers.id(type, settings), secret: null, }, type: type, - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, }, // Mirrors interfaces from the internal/kv_store package. kv_store: { - aws_dynamodb(settings={}): { - local default = { - aws: $.config.aws, - retry: $.config.retry, - table_name: null, - attributes: { partition_key: null, sort_key: null, value: null, ttl: null }, - consistent_read: false, - }, + aws: { + dynamodb(settings={}): { + local default = { + aws: $.config.aws, + attributes: { partition_key: null, sort_key: null, value: null, ttl: null }, + consistent_read: false, + }, - type: 'aws_dynamodb', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + type: 'aws_dynamodb', + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), + }, }, csv_file(settings={}): { local default = { file: null, column: null, delimiter: ',', header: null }, type: 'csv_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, json_file(settings=$.defaults.kv_store.json_file.settings): { local default = { file: null, is_lines: false }, type: 'json_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, memory(settings={}): { local default = { capacity: 1024 }, type: 'memory', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, mmdb(settings={}): { local default = { file: null }, type: 'mmdb', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, text_file(settings={}): { local default = { file: null }, type: 'text_file', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, - // Mirrors structs from the internal/config package. - config: { - aws: { region: null, role_arn: null }, - batch: { count: 1000, size: 1000 * 1000, duration: '1m' }, - metric: { name: null, attributes: null, destination: null }, - object: { source_key: null, target_key: null, batch_key: null }, - request: { timeout: '1s' }, - retry: { count: 3, delay: '1s', error_messages: null }, - }, - // Mirrors config from the internal/file package. - file_path: { prefix: null, time_format: '2006/01/02', uuid: true, suffix: null }, // Mirrors interfaces from the internal/secrets package. secrets: { default: { id: null, ttl: null }, aws: { secrets_manager(settings={}): { local default = { + aws: $.config.aws, id: null, - name: null, ttl_offset: null, - aws: $.config.aws, - retry: $.config.retry, }, type: 'aws_secrets_manager', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, environment_variable(settings={}): { local default = { id: null, name: null, ttl_offset: null }, type: 'environment_variable', - settings: std.prune(std.mergePatch(default, $.helpers.abbv(settings))), + settings: std.prune(std.mergePatch(default, helpers.abbv(settings))), }, }, - // Commonly used condition and transform patterns. - pattern: { - cnd: $.pattern.condition, - condition: { - obj(key): { - object: { source_key: key }, - }, - // Negates any inspector. - negate(inspector): $.condition.meta.negate(settings={ inspector: inspector }), - net: $.pattern.condition.network, - network: { - ip: { - // Checks if an IP address is internal. - // - // Use with the ANY operator to match internal IP addresses. - // Use with the NONE operator to match external IP addresses. - internal(key=null): [ - $.condition.network.ip.link_local_multicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.link_local_unicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.loopback(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.multicast(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.private(settings=$.pattern.condition.obj(key)), - $.condition.network.ip.unspecified(settings=$.pattern.condition.obj(key)), - ], - }, - }, - num: $.pattern.condition.number, - number: { - len: $.pattern.condition.number.length, - length: { - // Checks if data is equal to zero. - // - // Use with the ANY / ALL operator to match empty data. - // Use with the NONE operator to match non-empty data. - eq_zero(key=null): - $.condition.number.length.equal_to(settings=$.pattern.condition.obj(key) { value: 0 }), - // Checks if data is greater than zero. - // - // Use with the ANY / ALL operator to match non-empty data. - // Use with the NONE operator to match empty data. - gt_zero(key=null): - $.condition.number.length.greater_than(settings=$.pattern.condition.obj(key) { value: 0 }), - }, - }, - }, - tf: $.pattern.transform, - transform: { - // Conditional applies a transform when a single condition is met. If - // the condition does not contain a valid operator, then it is assumed - // to be an ANY operator. - conditional(condition, transform): { - local type = 'meta_switch', - local c = if std.objectHas(condition, 'type') then { operator: 'any', inspectors: [condition] } else condition, - - type: type, - settings: { id: $.helpers.id(type, transform), cases: [{ condition: c, transform: transform }] }, - }, - fmt: $.pattern.transform.format, - format: { - // Creates JSON Lines text from data. Only valid JSON text is included. - jsonl: [ - $.pattern.tf.conditional( - condition=$.cnd.meta.negate({ inspector: $.cnd.fmt.json() }), - transform=$.tf.util.drop(), - ), - $.tf.agg.to.string({ separator: '\n' }), - $.tf.str.append({ suffix: '\n' }), - ], - }, - num: $.pattern.transform.number, - number: { - clamp(source_key, target_key, min, max): [ - $.tf.number.maximum({ object: { source_key: source_key, target_key: target_key }, value: min }), - $.tf.number.minimum({ object: { source_key: target_key, target_key: target_key }, value: max }), - ], - }, - }, - }, - // Utility functions that can be used in conditions and transforms. - helpers: { - // If the input is not an array, then this returns it as an array. - make_array(i): if !std.isArray(i) then [i] else i, - obj: $.helpers.object, - object: { - // If key is `foo` and arr is `bar`, then the result is `foo.bar`. - // If key is `foo` and arr is `[bar, baz]`, then the result is `foo.bar.baz`. - append(key, arr): std.join('.', $.helpers.make_array(key) + $.helpers.make_array(arr)), - // If key is `foo`, then the result is `foo.-1`. - append_array(key): key + '.-1', - // If key is `foo` and e is `0`, then the result is `foo.0`. - get_element(key, e=0): std.join('.', [key, if std.isNumber(e) then std.toString(e) else e]), - }, - abbv(settings): std.mergePatch(settings, { - object: if std.objectHas(settings, 'object') then $.helpers.abbv_obj(settings.object) else if std.objectHas(settings, 'obj') then $.helpers.abbv_obj(settings.obj) else null, - obj: null, - }), - abbv_obj(s): { - source_key: if std.objectHas(s, 'src') then s.src else if std.objectHas(s, 'source_key') then s.source_key else null, - src: null, - target_key: if std.objectHas(s, 'trg') then s.trg else if std.objectHas(s, 'target_key') then s.target_key else null, - trg: null, - batch_key: if std.objectHas(s, 'btch') then s.batch else if std.objectHas(s, 'batch_key') then s.batch_key else null, - }, - id(type, settings): std.join('-', [std.md5(type)[:8], std.md5(std.toString(settings))[:8]]), + // Mirrors structs from the internal/config package. + config: { + aws: { arn: null, assume_role_arn: null }, + batch: { count: 1000, size: 1000 * 1000, duration: '1m' }, + metric: { name: null, attributes: null, destination: null }, + object: { source_key: null, target_key: null, batch_key: null }, + request: { timeout: '1s' }, + retry: { count: 3, delay: '1s' }, }, + // Mirrors config from the internal/file package. + file_path: { prefix: null, time_format: '2006/01/02', uuid: true, suffix: null }, } diff --git a/substation_test.go b/substation_test.go index 3573d743..c136bd05 100644 --- a/substation_test.go +++ b/substation_test.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" + "github.com/brexhq/substation/v2" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + "github.com/brexhq/substation/v2/transform" ) func ExampleSubstation() { diff --git a/substation_test.jsonnet b/substation_test.jsonnet new file mode 100644 index 00000000..a13766e4 --- /dev/null +++ b/substation_test.jsonnet @@ -0,0 +1,21 @@ +local sub = import 'substation.libsonnet'; + +local src = 'source'; +local trg = 'target'; + +{ + condition: { + all: sub.condition.all([$.condition.string.contains, $.condition.string.match]), + any: sub.condition.any([$.condition.string.contains, $.condition.string.match]), + none: sub.condition.none([$.condition.string.contains, $.condition.string.match]), + meta: { + all: sub.condition.meta.all({ inspectors: [$.condition.string.contains, $.condition.string.match] }), + any: sub.condition.meta.any({ inspectors: [$.condition.string.contains, $.condition.string.match] }), + none: sub.condition.meta.none({ inspectors: [$.condition.string.contains, $.condition.string.match] }), + }, + string: { + contains: sub.condition.string.contains({ obj: { src: src }, value: 'z' }), + match: sub.condition.string.match({ obj: { src: src }, pattern: 'z' }), + }, + }, +} diff --git a/transform/aggregate.go b/transform/aggregate.go index f9883a28..fbd03d07 100644 --- a/transform/aggregate.go +++ b/transform/aggregate.go @@ -5,8 +5,7 @@ import ( "fmt" "slices" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type aggregateArrayConfig struct { @@ -42,7 +41,7 @@ func (c *aggregateStrConfig) Decode(in interface{}) error { func (c *aggregateStrConfig) Validate() error { if c.Separator == "" { - return fmt.Errorf("separator: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("separator: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/aggregate_from_array.go b/transform/aggregate_from_array.go index 4097591d..ab27d797 100644 --- a/transform/aggregate_from_array.go +++ b/transform/aggregate_from_array.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" "github.com/tidwall/gjson" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newAggregateFromArray(_ context.Context, cfg config.Config) (*aggregateFromArray, error) { diff --git a/transform/aggregate_from_array_test.go b/transform/aggregate_from_array_test.go index b7b7c931..c244e89c 100644 --- a/transform/aggregate_from_array_test.go +++ b/transform/aggregate_from_array_test.go @@ -6,8 +6,8 @@ import ( "golang.org/x/exp/slices" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &aggregateFromArray{} diff --git a/transform/aggregate_from_string.go b/transform/aggregate_from_string.go index 6a529b00..b6a7164e 100644 --- a/transform/aggregate_from_string.go +++ b/transform/aggregate_from_string.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newAggregateFromString(_ context.Context, cfg config.Config) (*aggregateFromString, error) { diff --git a/transform/aggregate_from_string_test.go b/transform/aggregate_from_string_test.go index 7adc6ca7..c8545f56 100644 --- a/transform/aggregate_from_string_test.go +++ b/transform/aggregate_from_string_test.go @@ -4,8 +4,8 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" "golang.org/x/exp/slices" ) diff --git a/transform/aggregate_to_array.go b/transform/aggregate_to_array.go index cd6e7546..bf663798 100644 --- a/transform/aggregate_to_array.go +++ b/transform/aggregate_to_array.go @@ -6,9 +6,9 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/internal/aggregate" + "github.com/brexhq/substation/v2/message" ) func newAggregateToArray(_ context.Context, cfg config.Config) (*aggregateToArray, error) { @@ -94,7 +94,7 @@ func (tf *aggregateToArray) Transform(ctx context.Context, msg *message.Message) // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{outMsg}, nil diff --git a/transform/aggregate_to_array_test.go b/transform/aggregate_to_array_test.go index b9ae79f6..b88d22d7 100644 --- a/transform/aggregate_to_array_test.go +++ b/transform/aggregate_to_array_test.go @@ -6,8 +6,8 @@ import ( "golang.org/x/exp/slices" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &aggregateToArray{} diff --git a/transform/aggregate_to_string.go b/transform/aggregate_to_string.go index 7eb5f489..d3996efd 100644 --- a/transform/aggregate_to_string.go +++ b/transform/aggregate_to_string.go @@ -6,9 +6,9 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/internal/aggregate" + "github.com/brexhq/substation/v2/message" ) func newAggregateToString(_ context.Context, cfg config.Config) (*aggregateToString, error) { @@ -84,7 +84,7 @@ func (tf *aggregateToString) Transform(ctx context.Context, msg *message.Message // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{outMsg}, nil diff --git a/transform/aggregate_to_string_test.go b/transform/aggregate_to_string_test.go index f510ad10..3f66b5b9 100644 --- a/transform/aggregate_to_string_test.go +++ b/transform/aggregate_to_string_test.go @@ -4,8 +4,8 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" "golang.org/x/exp/slices" ) diff --git a/transform/array_join.go b/transform/array_join.go index 362b9bfb..464f4044 100644 --- a/transform/array_join.go +++ b/transform/array_join.go @@ -6,10 +6,10 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type arrayJoinConfig struct { @@ -26,11 +26,11 @@ func (c *arrayJoinConfig) Decode(in interface{}) error { func (c *arrayJoinConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/array_join_test.go b/transform/array_join_test.go index 0e6f6f9a..f441acdc 100644 --- a/transform/array_join_test.go +++ b/transform/array_join_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &arrayJoin{} diff --git a/transform/array_zip.go b/transform/array_zip.go index 778d9e72..9504fcca 100644 --- a/transform/array_zip.go +++ b/transform/array_zip.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type arrayZipConfig struct { @@ -22,11 +22,11 @@ func (c *arrayZipConfig) Decode(in interface{}) error { func (c *arrayZipConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/array_zip_test.go b/transform/array_zip_test.go index 0b7eae92..b5dd1f5f 100644 --- a/transform/array_zip_test.go +++ b/transform/array_zip_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &arrayZip{} diff --git a/transform/enrich.go b/transform/enrich.go index 64e19f33..4e645d90 100644 --- a/transform/enrich.go +++ b/transform/enrich.go @@ -7,8 +7,7 @@ import ( "io" gohttp "net/http" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // enrichHTTPInterp is used for interpolating data into URLs. @@ -26,11 +25,11 @@ func (c *enrichDNSConfig) Decode(in interface{}) error { func (c *enrichDNSConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Request.Timeout == "" { diff --git a/transform/enrich_aws_dynamodb.go b/transform/enrich_aws_dynamodb.go deleted file mode 100644 index 90583e4c..00000000 --- a/transform/enrich_aws_dynamodb.go +++ /dev/null @@ -1,177 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/dynamodb" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -type enrichAWSDynamoDBConfig struct { - // TableName is the DynamoDB table that is queried. - TableName string `json:"table_name"` - // PartitionKey is the DynamoDB partition key. - PartitionKey string `json:"partition_key"` - // SortKey is the DynamoDB sort key. - // - // This is optional and has no default. - SortKey string `json:"sort_key"` - // KeyConditionExpression is the DynamoDB key condition - // expression string (see documentation). - KeyConditionExpression string `json:"key_condition_expression"` - // Limits determines the maximum number of items to evalute. - // - // This is optional and defaults to evaluating all items. - Limit int64 `json:"limit"` - // ScanIndexForward specifies the order of index traversal. - // - // Must be one of: - // - true (traversal is performed in ascending order) - // - false (traversal is performed in descending order) - // - // This is optional and defaults to true. - ScanIndexForward bool `json:"scan_index_forward"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *enrichAWSDynamoDBConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *enrichAWSDynamoDBConfig) Validate() error { - if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if c.PartitionKey == "" { - return fmt.Errorf("partition_key: %v", errors.ErrMissingRequiredOption) - } - - if c.TableName == "" { - return fmt.Errorf("table_name: %v", errors.ErrMissingRequiredOption) - } - - if c.KeyConditionExpression == "" { - return fmt.Errorf("key_condition_expression: %v", errors.ErrMissingRequiredOption) - } - return nil -} - -func newEnrichAWSDynamoDB(_ context.Context, cfg config.Config) (*enrichAWSDynamoDB, error) { - conf := enrichAWSDynamoDBConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform enrich_aws_dynamodb: %v", err) - } - - if conf.ID == "" { - conf.ID = "enrich_aws_dynamodb" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := enrichAWSDynamoDB{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - return &tf, nil -} - -type enrichAWSDynamoDB struct { - conf enrichAWSDynamoDBConfig - - // client is safe for concurrent access. - client dynamodb.API -} - -func (tf *enrichAWSDynamoDB) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - return []*message.Message{msg}, nil - } - - var tmp *message.Message - if tf.conf.Object.SourceKey != "" { - value := msg.GetValue(tf.conf.Object.SourceKey) - tmp = message.New().SetData(value.Bytes()) - } else { - tmp = msg - } - - if !json.Valid(tmp.Data()) { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errMsgInvalidObject) - } - - pk := tmp.GetValue(tf.conf.PartitionKey) - if !pk.Exists() { - return []*message.Message{msg}, nil - } - - sk := tmp.GetValue(tf.conf.SortKey) - value, err := tf.dynamodb(ctx, pk.String(), sk.String()) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // No match. - if len(value) == 0 { - return []*message.Message{msg}, nil - } - - if err := msg.SetValue(tf.conf.Object.TargetKey, value); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return []*message.Message{msg}, nil -} - -func (tf *enrichAWSDynamoDB) dynamodb(ctx context.Context, pk, sk string) ([]map[string]interface{}, error) { - resp, err := tf.client.Query( - ctx, - tf.conf.TableName, - pk, sk, - tf.conf.KeyConditionExpression, - tf.conf.Limit, - tf.conf.ScanIndexForward, - ) - if err != nil { - return nil, err - } - - var items []map[string]interface{} - for _, i := range resp.Items { - var item map[string]interface{} - err = dynamodbattribute.UnmarshalMap(i, &item) - if err != nil { - return nil, err - } - - items = append(items, item) - } - return items, nil -} - -func (tf *enrichAWSDynamoDB) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/transform/enrich_aws_dynamodb_query.go b/transform/enrich_aws_dynamodb_query.go new file mode 100644 index 00000000..5cfffa1e --- /dev/null +++ b/transform/enrich_aws_dynamodb_query.go @@ -0,0 +1,169 @@ +package transform + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" +) + +type enrichAWSDynamoDBQueryQueryConfig struct { + Attributes struct { + // PartitionKey is the table's parition key attribute. + // + // This is required for all tables. + PartitionKey string `json:"partition_key"` + // SortKey is the table's sort (range) key attribute. + // + // This must be used if the table uses a composite primary key schema + // (partition key and sort key). Only string types are supported. + SortKey string `json:"sort_key"` + } `json:"attributes"` + // Limits determines the maximum number of items to evalute. + // + // This is optional and defaults to evaluating all items. + Limit int32 `json:"limit"` + // ScanIndexForward specifies the order of index traversal. + // + // Must be one of: + // - true (traversal is performed in ascending order) + // - false (traversal is performed in descending order) + // + // This is optional and defaults to true. + ScanIndexForward bool `json:"scan_index_forward"` + + ID string `json:"id"` + Object iconfig.Object `json:"object"` + AWS iconfig.AWS `json:"aws"` +} + +func (c *enrichAWSDynamoDBQueryQueryConfig) Decode(in interface{}) error { + return iconfig.Decode(in, c) +} + +func (c *enrichAWSDynamoDBQueryQueryConfig) Validate() error { + if c.Object.TargetKey == "" { + return fmt.Errorf("object.target_key: %v", iconfig.ErrMissingRequiredOption) + } + + if c.Attributes.PartitionKey == "" { + return fmt.Errorf("attributes.partition_key: %v", iconfig.ErrMissingRequiredOption) + } + + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) + } + + return nil +} + +func newEnrichAWSDynamoDBQuery(ctx context.Context, cfg config.Config) (*enrichAWSDynamoDBQuery, error) { + conf := enrichAWSDynamoDBQueryQueryConfig{} + if err := conf.Decode(cfg.Settings); err != nil { + return nil, fmt.Errorf("transform enrich_aws_dynamodb_query: %v", err) + } + + if conf.ID == "" { + conf.ID = "enrich_aws_dynamodb_query" + } + + if err := conf.Validate(); err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf := enrichAWSDynamoDBQuery{ + conf: conf, + } + + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = dynamodb.NewFromConfig(awsCfg) + + return &tf, nil +} + +type enrichAWSDynamoDBQuery struct { + conf enrichAWSDynamoDBQueryQueryConfig + client *dynamodb.Client +} + +func (tf *enrichAWSDynamoDBQuery) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { + if msg.IsControl() { + return []*message.Message{msg}, nil + } + + value := msg.GetValue(tf.conf.Object.SourceKey) + if !value.Exists() { + return []*message.Message{msg}, nil + } + + // This supports one of two states: + // - A single partition key, captured as a string. + // - A composite key (partition key and sort key), captured as an array of two strings. + // + // If the value is an array, we assume it is a composite key. + var keyEx expression.KeyConditionBuilder + if value.IsArray() && len(value.Array()) == 2 && tf.conf.Attributes.SortKey != "" { + keyEx = expression.Key(tf.conf.Attributes.PartitionKey).Equal(expression.Value(value.Array()[0].String())). + And(expression.Key(tf.conf.Attributes.SortKey).Equal(expression.Value(value.Array()[1].String()))) + } else if !value.IsArray() { + keyEx = expression.Key(tf.conf.Attributes.PartitionKey).Equal(expression.Value(value.String())) + } else { // This is invalid, so we return the original message. + return []*message.Message{msg}, nil + } + + expr, err := expression.NewBuilder().WithKeyCondition(keyEx).Build() + if err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + + ctx = context.WithoutCancel(ctx) + resp, err := tf.client.Query(ctx, &dynamodb.QueryInput{ + TableName: &tf.conf.AWS.ARN, + KeyConditionExpression: expr.KeyCondition(), + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + Limit: aws.Int32(tf.conf.Limit), + ScanIndexForward: aws.Bool(tf.conf.ScanIndexForward), + }) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + + var items []map[string]interface{} + for _, i := range resp.Items { + var item map[string]interface{} + if err := attributevalue.UnmarshalMap(i, &item); err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + + items = append(items, item) + } + + if len(items) == 0 { + return []*message.Message{msg}, nil + } + + if err := msg.SetValue(tf.conf.Object.TargetKey, items); err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + + return []*message.Message{msg}, nil +} + +func (tf *enrichAWSDynamoDBQuery) String() string { + b, _ := json.Marshal(tf.conf) + return string(b) +} diff --git a/transform/enrich_aws_dynamodb_test.go b/transform/enrich_aws_dynamodb_test.go deleted file mode 100644 index 4e563ade..00000000 --- a/transform/enrich_aws_dynamodb_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/brexhq/substation/config" - ddb "github.com/brexhq/substation/internal/aws/dynamodb" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &enrichAWSDynamoDB{} - -type enrichAWSDynamoDBMockedQuery struct { - dynamodbiface.DynamoDBAPI - Resp dynamodb.QueryOutput -} - -func (m enrichAWSDynamoDBMockedQuery) QueryWithContext(ctx aws.Context, input *dynamodb.QueryInput, opts ...request.Option) (*dynamodb.QueryOutput, error) { - return &m.Resp, nil -} - -var enrichAWSDynamoDBTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error - api ddb.API -}{ - { - "success", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "a", - }, - "table_name": "tab", - "partition_key": "PK", - "key_condition_expression": "kce", - }, - }, - []byte(`{"PK":"b"}`), - [][]byte{ - []byte(`{"PK":"b","a":[{"b":"c"}]}`), - }, - nil, - ddb.API{ - Client: enrichAWSDynamoDBMockedQuery{ - Resp: dynamodb.QueryOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "b": { - S: aws.String("c"), - }, - }, - }, - }, - }, - }, - }, -} - -func TestEnrichAWSDynamoDB(t *testing.T) { - ctx := context.TODO() - for _, test := range enrichAWSDynamoDBTests { - tf, err := newEnrichAWSDynamoDB(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - tf.client = test.api - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - } -} - -func benchmarkEnrichAWSDynamoDB(b *testing.B, tf *enrichAWSDynamoDB, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkEnrichAWSDynamoDB(b *testing.B) { - ctx := context.TODO() - for _, test := range enrichAWSDynamoDBTests { - b.Run(test.name, - func(b *testing.B) { - tf, err := newEnrichAWSDynamoDB(ctx, test.cfg) - if err != nil { - b.Fatal(err) - } - tf.client = test.api - - benchmarkEnrichAWSDynamoDB(b, tf, test.test) - }, - ) - } -} diff --git a/transform/enrich_aws_lambda.go b/transform/enrich_aws_lambda.go index 85415be6..d48dc502 100644 --- a/transform/enrich_aws_lambda.go +++ b/transform/enrich_aws_lambda.go @@ -5,23 +5,19 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/lambda" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/lambda" "github.com/tidwall/gjson" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type enrichAWSLambdaConfig struct { - // FunctionName is the AWS Lambda function to synchronously invoke. - FunctionName string `json:"function_name"` - ID string `json:"id"` Object iconfig.Object `json:"object"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *enrichAWSLambdaConfig) Decode(in interface{}) error { @@ -30,21 +26,21 @@ func (c *enrichAWSLambdaConfig) Decode(in interface{}) error { func (c *enrichAWSLambdaConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } - if c.FunctionName == "" { - return fmt.Errorf("function_name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newEnrichAWSLambda(_ context.Context, cfg config.Config) (*enrichAWSLambda, error) { +func newEnrichAWSLambda(ctx context.Context, cfg config.Config) (*enrichAWSLambda, error) { conf := enrichAWSLambdaConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform enrich_aws_lambda: %v", err) @@ -62,22 +58,19 @@ func newEnrichAWSLambda(_ context.Context, cfg config.Config) (*enrichAWSLambda, conf: conf, } - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = lambda.NewFromConfig(awsCfg) return &tf, nil } type enrichAWSLambda struct { - conf enrichAWSLambdaConfig - - // client is safe for concurrent access. - client lambda.API + conf enrichAWSLambdaConfig + client *lambda.Client } func (tf *enrichAWSLambda) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { @@ -94,7 +87,11 @@ func (tf *enrichAWSLambda) Transform(ctx context.Context, msg *message.Message) return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errMsgInvalidObject) } - resp, err := tf.client.Invoke(ctx, tf.conf.FunctionName, value.Bytes()) + ctx = context.WithoutCancel(ctx) + resp, err := tf.client.Invoke(ctx, &lambda.InvokeInput{ + FunctionName: &tf.conf.AWS.ARN, + Payload: value.Bytes(), + }) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } diff --git a/transform/enrich_aws_lambda_test.go b/transform/enrich_aws_lambda_test.go deleted file mode 100644 index a3c343f3..00000000 --- a/transform/enrich_aws_lambda_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/lambda" - "github.com/aws/aws-sdk-go/service/lambda/lambdaiface" - "github.com/brexhq/substation/config" - lamb "github.com/brexhq/substation/internal/aws/lambda" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &enrichAWSLambda{} - -type enrichAWSLambdaMockedInvoke struct { - lambdaiface.LambdaAPI - Resp lambda.InvokeOutput -} - -func (m enrichAWSLambdaMockedInvoke) InvokeWithContext(ctx aws.Context, input *lambda.InvokeInput, opts ...request.Option) (*lambda.InvokeOutput, error) { - return &m.Resp, nil -} - -var enrichAWSLambdaTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte - err error - api lamb.API -}{ - { - "success", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "function_name": "func", - }, - }, - []byte(`{"a":{"b":"c"}}`), - [][]byte{ - []byte(`{"a":{"d":"e"}}`), - }, - nil, - lamb.API{ - Client: enrichAWSLambdaMockedInvoke{ - Resp: lambda.InvokeOutput{ - Payload: []byte(`{"d":"e"}`), - }, - }, - }, - }, -} - -func TestEnrichAWSLambda(t *testing.T) { - ctx := context.TODO() - for _, test := range enrichAWSLambdaTests { - tf, err := newEnrichAWSLambda(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - tf.client = test.api - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - } -} - -func benchmarkEnrichAWSLambda(b *testing.B, tf *enrichAWSLambda, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkEnrichAWSLambda(b *testing.B) { - ctx := context.TODO() - for _, test := range enrichAWSLambdaTests { - b.Run(test.name, - func(b *testing.B) { - tf, err := newEnrichAWSLambda(ctx, test.cfg) - if err != nil { - b.Fatal(err) - } - tf.client = test.api - - benchmarkEnrichAWSLambda(b, tf, test.test) - }, - ) - } -} diff --git a/transform/enrich_dns_domain_lookup.go b/transform/enrich_dns_domain_lookup.go index 0f0f4a9e..3d450fad 100644 --- a/transform/enrich_dns_domain_lookup.go +++ b/transform/enrich_dns_domain_lookup.go @@ -9,8 +9,8 @@ import ( "net" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newEnrichDNSDomainLookup(_ context.Context, cfg config.Config) (*enrichDNSDomainLookup, error) { diff --git a/transform/enrich_dns_ip_lookup.go b/transform/enrich_dns_ip_lookup.go index e41dec2d..6aca6d43 100644 --- a/transform/enrich_dns_ip_lookup.go +++ b/transform/enrich_dns_ip_lookup.go @@ -9,8 +9,8 @@ import ( "net" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newEnrichDNSIPLookup(_ context.Context, cfg config.Config) (*enrichDNSIPLookup, error) { diff --git a/transform/enrich_dns_txt_lookup.go b/transform/enrich_dns_txt_lookup.go index 8720e73d..bb9b893e 100644 --- a/transform/enrich_dns_txt_lookup.go +++ b/transform/enrich_dns_txt_lookup.go @@ -9,8 +9,8 @@ import ( "net" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newEnrichDNSTxtLookup(_ context.Context, cfg config.Config) (*enrichDNSTxtLookup, error) { diff --git a/transform/enrich_http_get.go b/transform/enrich_http_get.go index 47ecc92a..fafb13ec 100644 --- a/transform/enrich_http_get.go +++ b/transform/enrich_http_get.go @@ -8,12 +8,12 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/http" + "github.com/brexhq/substation/v2/internal/secrets" ) type enrichHTTPGetConfig struct { @@ -39,7 +39,7 @@ func (c *enrichHTTPGetConfig) Decode(in interface{}) error { func (c *enrichHTTPGetConfig) Validate() error { if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("url: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/enrich_http_post.go b/transform/enrich_http_post.go index 28b312e6..84f62fbc 100644 --- a/transform/enrich_http_post.go +++ b/transform/enrich_http_post.go @@ -8,12 +8,12 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/http" + "github.com/brexhq/substation/v2/internal/secrets" ) type enrichHTTPPostObjectConfig struct { @@ -47,11 +47,11 @@ func (c *enrichHTTPPostConfig) Decode(in interface{}) error { func (c *enrichHTTPPostConfig) Validate() error { if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("url: %v", iconfig.ErrMissingRequiredOption) } if c.Object.BodyKey == "" { - return fmt.Errorf("body_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("body_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/enrich_kv_store_item_get.go b/transform/enrich_kv_store_item_get.go index d6b53741..7247598d 100644 --- a/transform/enrich_kv_store_item_get.go +++ b/transform/enrich_kv_store_item_get.go @@ -7,11 +7,11 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/kv" ) type enrichKVStoreItemGetConfig struct { @@ -37,15 +37,15 @@ func (c *enrichKVStoreItemGetConfig) Decode(in interface{}) error { func (c *enrichKVStoreItemGetConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("kv_store: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/enrich_kv_store_item_set.go b/transform/enrich_kv_store_item_set.go index 87857c3a..007fbd62 100644 --- a/transform/enrich_kv_store_item_set.go +++ b/transform/enrich_kv_store_item_set.go @@ -8,11 +8,11 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/kv" ) type enrichKVStoreItemSetObjectConfig struct { @@ -60,15 +60,15 @@ func (c *enrichKVStoreItemSetConfig) Decode(in interface{}) error { func (c *enrichKVStoreItemSetConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("kv_store: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -151,24 +151,24 @@ func (tf *enrichKVStoreItemSet) Transform(ctx context.Context, msg *message.Mess value := msg.GetValue(tf.conf.Object.TTLKey) ttl := truncateTTL(value) + tf.ttl - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { + if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } } else if tf.conf.Object.TTLKey != "" { value := msg.GetValue(tf.conf.Object.TTLKey) ttl := truncateTTL(value) - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { + if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } } else if tf.ttl != 0 { ttl := time.Now().Add(time.Duration(tf.ttl) * time.Second).Unix() - if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String(), ttl); err != nil { + if err := tf.kvStore.SetWithTTL(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value(), ttl); err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } } else { - if err := tf.kvStore.Set(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).String()); err != nil { + if err := tf.kvStore.Set(ctx, key, msg.GetValue(tf.conf.Object.TargetKey).Value()); err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } } diff --git a/transform/enrich_kv_store_set_add.go b/transform/enrich_kv_store_set_add.go index 72824b54..3ccea9c4 100644 --- a/transform/enrich_kv_store_set_add.go +++ b/transform/enrich_kv_store_set_add.go @@ -6,11 +6,11 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/kv" ) type enrichKVStoreSetAddObjectConfig struct { @@ -58,15 +58,15 @@ func (c *enrichKVStoreSetAddConfig) Decode(in interface{}) error { func (c *enrichKVStoreSetAddConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("kv_store: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/format.go b/transform/format.go index 01f86ee7..4143e499 100644 --- a/transform/format.go +++ b/transform/format.go @@ -6,8 +6,7 @@ import ( "fmt" "io" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type formatBase64Config struct { @@ -21,11 +20,11 @@ func (c *formatBase64Config) Decode(in interface{}) error { func (c *formatBase64Config) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/format_from_base64.go b/transform/format_from_base64.go index cf2fc67e..622f4b36 100644 --- a/transform/format_from_base64.go +++ b/transform/format_from_base64.go @@ -6,9 +6,10 @@ import ( "fmt" "unicode/utf8" - "github.com/brexhq/substation/config" - ibase64 "github.com/brexhq/substation/internal/base64" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + ibase64 "github.com/brexhq/substation/v2/internal/base64" ) // errFormatFromBase64DecodeBinary is returned when the Base64 transform is configured diff --git a/transform/format_from_base64_test.go b/transform/format_from_base64_test.go index 41ebc0e7..a626abaa 100644 --- a/transform/format_from_base64_test.go +++ b/transform/format_from_base64_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatFromBase64{} diff --git a/transform/format_from_gzip.go b/transform/format_from_gzip.go index 6ddbb9a6..b343295c 100644 --- a/transform/format_from_gzip.go +++ b/transform/format_from_gzip.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newFormatFromGzip(_ context.Context, cfg config.Config) (*formatFromGzip, error) { diff --git a/transform/format_from_gzip_test.go b/transform/format_from_gzip_test.go index 14260dfa..561f829f 100644 --- a/transform/format_from_gzip_test.go +++ b/transform/format_from_gzip_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatFromGzip{} diff --git a/transform/format_from_pretty_print.go b/transform/format_from_pretty_print.go index 72bacc1d..3e9b895d 100644 --- a/transform/format_from_pretty_print.go +++ b/transform/format_from_pretty_print.go @@ -6,9 +6,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) const ( diff --git a/transform/format_from_pretty_print_test.go b/transform/format_from_pretty_print_test.go index 83d0f02f..b21a9427 100644 --- a/transform/format_from_pretty_print_test.go +++ b/transform/format_from_pretty_print_test.go @@ -5,8 +5,8 @@ import ( "context" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatFromPrettyPrint{} diff --git a/transform/format_from_zip.go b/transform/format_from_zip.go index 74b01943..9805a8ea 100644 --- a/transform/format_from_zip.go +++ b/transform/format_from_zip.go @@ -7,9 +7,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type formatZipConfig struct { diff --git a/transform/format_from_zip_test.go b/transform/format_from_zip_test.go index 395f3eba..6f0fca19 100644 --- a/transform/format_from_zip_test.go +++ b/transform/format_from_zip_test.go @@ -5,8 +5,8 @@ import ( "slices" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatFromZip{} diff --git a/transform/format_to_base64.go b/transform/format_to_base64.go index adc634d8..0ff5a801 100644 --- a/transform/format_to_base64.go +++ b/transform/format_to_base64.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - ibase64 "github.com/brexhq/substation/internal/base64" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + ibase64 "github.com/brexhq/substation/v2/internal/base64" ) func newFormatToBase64(_ context.Context, cfg config.Config) (*formatToBase64, error) { diff --git a/transform/format_to_base64_test.go b/transform/format_to_base64_test.go index 65f430aa..84f4334c 100644 --- a/transform/format_to_base64_test.go +++ b/transform/format_to_base64_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatToBase64{} diff --git a/transform/format_to_gzip.go b/transform/format_to_gzip.go index 543798c3..31232795 100644 --- a/transform/format_to_gzip.go +++ b/transform/format_to_gzip.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newFormatToGzip(_ context.Context, cfg config.Config) (*formatToGzip, error) { diff --git a/transform/format_to_gzip_test.go b/transform/format_to_gzip_test.go index 8f590e27..13052877 100644 --- a/transform/format_to_gzip_test.go +++ b/transform/format_to_gzip_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &formatToGzip{} diff --git a/transform/hash.go b/transform/hash.go index 5526e291..a9dd9ac0 100644 --- a/transform/hash.go +++ b/transform/hash.go @@ -3,8 +3,7 @@ package transform import ( "fmt" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type hashConfig struct { @@ -18,11 +17,11 @@ func (c *hashConfig) Decode(in interface{}) error { func (c *hashConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/hash_md5.go b/transform/hash_md5.go index 3083dbb3..719c2709 100644 --- a/transform/hash_md5.go +++ b/transform/hash_md5.go @@ -6,8 +6,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newHashMD5(_ context.Context, cfg config.Config) (*hashMD5, error) { diff --git a/transform/hash_md5_test.go b/transform/hash_md5_test.go index 35118208..561ce005 100644 --- a/transform/hash_md5_test.go +++ b/transform/hash_md5_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &hashMD5{} diff --git a/transform/hash_sha256.go b/transform/hash_sha256.go index f70c860f..1e0c4ba8 100644 --- a/transform/hash_sha256.go +++ b/transform/hash_sha256.go @@ -6,8 +6,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newHashSHA256(_ context.Context, cfg config.Config) (*hashSHA256, error) { diff --git a/transform/hash_sha256_test.go b/transform/hash_sha256_test.go index 2cc89677..b28923de 100644 --- a/transform/hash_sha256_test.go +++ b/transform/hash_sha256_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &hashSHA256{} diff --git a/transform/meta_err.go b/transform/meta_err.go index 4e8e2b0b..f0e23b87 100644 --- a/transform/meta_err.go +++ b/transform/meta_err.go @@ -6,25 +6,17 @@ import ( "fmt" "regexp" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type metaErrConfig struct { - // Transform that is applied with error handling. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` // Transforms that are applied in series with error handling. Transforms []config.Config `json:"transforms"` - // ErrorMessages are regular expressions that match error messages and determine // if the error should be caught. - // - // This is optional and defaults to an empty list (all errors are caught). ErrorMessages []string `json:"error_messages"` ID string `json:"id"` @@ -35,8 +27,10 @@ func (c *metaErrConfig) Decode(in interface{}) error { } func (c *metaErrConfig) Validate() error { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) + for _, t := range c.Transforms { + if t.Type == "" { + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) + } } return nil @@ -60,15 +54,6 @@ func newMetaErr(ctx context.Context, cfg config.Config) (*metaErr, error) { conf: conf, } - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tf = tfer - } - tf.tfs = make([]Transformer, len(conf.Transforms)) for i, t := range conf.Transforms { tfer, err := New(ctx, t) @@ -95,27 +80,13 @@ func newMetaErr(ctx context.Context, cfg config.Config) (*metaErr, error) { type metaErr struct { conf metaErrConfig - tf Transformer tfs []Transformer errorMessages []*regexp.Regexp } func (tf *metaErr) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, tf.tfs, msg) if err != nil { - // Deprecated: Remove this block in a future release. - if len(tf.errorMessages) == 0 { - return []*message.Message{msg}, nil - } - for _, e := range tf.errorMessages { if e.MatchString(err.Error()) { return []*message.Message{msg}, nil diff --git a/transform/meta_err_test.go b/transform/meta_err_test.go index 945378d0..168c2020 100644 --- a/transform/meta_err_test.go +++ b/transform/meta_err_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &metaErr{} @@ -17,62 +17,6 @@ var metaErrTests = []struct { test []byte expected [][]byte }{ - { - "utility_err", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "utility_err", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "error_messages string", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - "error_messages": []string{ - "test error", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, { "error_messages string", config.Config{ @@ -95,26 +39,6 @@ var metaErrTests = []struct { []byte(`{"a":"b"}`), }, }, - { - "error_messages regex", - config.Config{ - Settings: map[string]interface{}{ - "transform": config.Config{ - Settings: map[string]interface{}{ - "message": "test error", - }, - Type: "utility_err", - }, - "error_messages": []string{ - "^test", - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, { "error_messages regex", config.Config{ diff --git a/transform/meta_for_each.go b/transform/meta_for_each.go index d31c2b5e..2a84d35a 100644 --- a/transform/meta_for_each.go +++ b/transform/meta_for_each.go @@ -5,18 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type metaForEachConfig struct { - // Transform that is applied to each item in the array. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` // Transforms that are applied in series to the data in the array. Transforms []config.Config @@ -30,15 +25,21 @@ func (c *metaForEachConfig) Decode(in interface{}) error { func (c *metaForEachConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) + } + + if len(c.Transforms) == 0 { + return fmt.Errorf("transforms: %v", iconfig.ErrMissingRequiredOption) } - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("type: %v", errors.ErrMissingRequiredOption) + for _, t := range c.Transforms { + if t.Type == "" { + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) + } } return nil @@ -62,14 +63,6 @@ func newMetaForEach(ctx context.Context, cfg config.Config) (*metaForEach, error conf: conf, } - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.tf = tfer - } - tf.tfs = make([]Transformer, len(conf.Transforms)) for i, t := range conf.Transforms { tfer, err := New(ctx, t) @@ -86,21 +79,12 @@ func newMetaForEach(ctx context.Context, cfg config.Config) (*metaForEach, error type metaForEach struct { conf metaForEachConfig - tf Transformer tfs []Transformer } func (tf *metaForEach) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - var msgs []*message.Message - var err error - if msg.IsControl() { - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, tf.tfs, msg) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } @@ -119,13 +103,8 @@ func (tf *metaForEach) Transform(ctx context.Context, msg *message.Message) ([]* var arr []interface{} for _, res := range value.Array() { - tmpMsg := message.New().SetData(res.Bytes()) - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, tmpMsg) - } else { - msgs, err = tf.tf.Transform(ctx, tmpMsg) - } - + m := message.New().SetData(res.Bytes()).SetMetadata(msg.Metadata()) + msgs, err := Apply(ctx, tf.tfs, m) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } diff --git a/transform/meta_for_each_test.go b/transform/meta_for_each_test.go index 7a234efb..8492c26d 100644 --- a/transform/meta_for_each_test.go +++ b/transform/meta_for_each_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &metaForEach{} @@ -17,34 +17,6 @@ var metaForEachTests = []struct { test []byte expected [][]byte }{ - { - "meta_pipeline", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "b", - }, - "transform": config.Config{ - Type: "meta_pipeline", - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Type: "format_from_base64", - }, - { - Type: "format_from_gzip", - }, - }, - }, - }, - }, - }, - []byte(`{"a":["H4sIAMpcy2IA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA","H4sIAI/bzmIA/wXAMQ0AAADCMK1MAv6Pph2qjP92AwAAAA=="]}`), - [][]byte{ - []byte(`{"a":["H4sIAMpcy2IA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA","H4sIAI/bzmIA/wXAMQ0AAADCMK1MAv6Pph2qjP92AwAAAA=="],"b":["foo","bar"]}`), - }, - }, { "format_from_base64", config.Config{ @@ -53,8 +25,10 @@ var metaForEachTests = []struct { "source_key": "secrets", "target_key": "decoded", }, - "transform": config.Config{ - Type: "format_from_base64", + "transforms": []config.Config{ + { + Type: "format_from_base64", + }, }, }, }, @@ -71,10 +45,12 @@ var metaForEachTests = []struct { "source_key": "user_email", "target_key": "user_name", }, - "transform": config.Config{ - Type: "string_capture", - Settings: map[string]interface{}{ - "pattern": "^([^@]*)@.*$", + "transforms": []config.Config{ + { + Type: "string_capture", + Settings: map[string]interface{}{ + "pattern": "^([^@]*)@.*$", + }, }, }, }, @@ -92,8 +68,10 @@ var metaForEachTests = []struct { "source_key": "upcase", "target_key": "downcase", }, - "transform": config.Config{ - Type: "string_to_lower", + "transforms": []config.Config{ + { + Type: "string_to_lower", + }, }, }, }, @@ -110,8 +88,10 @@ var metaForEachTests = []struct { "source_key": "domain", "target_key": "subdomain", }, - "transform": config.Config{ - Type: "network_domain_subdomain", + "transforms": []config.Config{ + { + Type: "network_domain_subdomain", + }, }, }, }, @@ -128,8 +108,10 @@ var metaForEachTests = []struct { "source_key": "a", "target_key": "b", }, - "transform": config.Config{ - Type: "hash_sha256", + "transforms": []config.Config{ + { + Type: "hash_sha256", + }, }, }, }, @@ -146,13 +128,15 @@ var metaForEachTests = []struct { "source_key": "a", "target_key": "b", }, - "transform": config.Config{ - Type: "object_insert", - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "target_key": "baz", + "transforms": []config.Config{ + { + Type: "object_insert", + Settings: map[string]interface{}{ + "object": map[string]interface{}{ + "target_key": "baz", + }, + "value": "qux", }, - "value": "qux", }, }, }, @@ -170,11 +154,13 @@ var metaForEachTests = []struct { "source_key": "a", "target_key": "b", }, - "transform": config.Config{ - Type: "string_replace", - Settings: map[string]interface{}{ - "pattern": "r", - "replacement": "z", + "transforms": []config.Config{ + { + Type: "string_replace", + Settings: map[string]interface{}{ + "pattern": "r", + "replacement": "z", + }, }, }, }, @@ -192,10 +178,12 @@ var metaForEachTests = []struct { "source_key": "a", "target_key": "b", }, - "transform": config.Config{ - Type: "time_from_string", - Settings: map[string]interface{}{ - "format": "2006-01-02T15:04:05Z", + "transforms": []config.Config{ + { + Type: "time_from_string", + Settings: map[string]interface{}{ + "format": "2006-01-02T15:04:05Z", + }, }, }, }, diff --git a/transform/meta_kv_store_lock.go b/transform/meta_kv_store_lock.go index 44f83b16..5179c1c1 100644 --- a/transform/meta_kv_store_lock.go +++ b/transform/meta_kv_store_lock.go @@ -7,11 +7,11 @@ import ( "sync" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/kv" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/kv" ) type metaVStoreLockObjectConfig struct { @@ -42,12 +42,6 @@ type metaKVStoreLockConfig struct { // // This is optional and defaults to using no TTL when setting values into the store. TTLOffset string `json:"ttl_offset"` - - // Transform that is applied after the lock is acquired. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` // Transforms that are applied in series after the lock is acquired. Transforms []config.Config `json:"transforms"` @@ -61,12 +55,18 @@ func (c *metaKVStoreLockConfig) Decode(in interface{}) error { } func (c *metaKVStoreLockConfig) Validate() error { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) + if len(c.Transforms) == 0 { + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) + } + + for _, t := range c.Transforms { + if t.Type == "" { + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) + } } if c.KVStore.Type == "" { - return fmt.Errorf("kv_store: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("kv_store: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -90,15 +90,6 @@ func newMetaKVStoreLock(ctx context.Context, cfg config.Config) (*metaKVStoreLoc conf: conf, } - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tf = tfer - } - tf.tfs = make([]Transformer, len(conf.Transforms)) for i, t := range conf.Transforms { tfer, err := New(ctx, t) @@ -136,7 +127,6 @@ func newMetaKVStoreLock(ctx context.Context, cfg config.Config) (*metaKVStoreLoc // held, then an error is returned. The lock is applied with a time-to-live (TTL) value, which is // used to determine when the lock is automatically released. type metaKVStoreLock struct { - tf Transformer tfs []Transformer conf metaKVStoreLockConfig @@ -154,15 +144,7 @@ func (tf *metaKVStoreLock) Transform(ctx context.Context, msg *message.Message) defer tf.mu.Unlock() if msg.IsControl() { - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, tf.tfs, msg) if err != nil { for _, key := range tf.keys { _ = tf.locker.Unlock(ctx, key) @@ -217,15 +199,7 @@ func (tf *metaKVStoreLock) Transform(ctx context.Context, msg *message.Message) tf.keys = append(tf.keys, lockKey) - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, tf.tfs, msg) if err != nil { for _, key := range tf.keys { _ = tf.locker.Unlock(ctx, key) diff --git a/transform/meta_metric_duration.go b/transform/meta_metric_duration.go index edd2cea2..7867bf8f 100644 --- a/transform/meta_metric_duration.go +++ b/transform/meta_metric_duration.go @@ -6,23 +6,19 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/metrics" ) type metaMetricDurationConfig struct { - ID string `json:"id"` - Metric iconfig.Metric `json:"metric"` - - // Transform that has its duration measured. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` // Transforms that have their total duration measured. Transforms []config.Config `json:"transforms"` + + ID string `json:"id"` + Metric iconfig.Metric `json:"metric"` } func (c *metaMetricDurationConfig) Decode(in interface{}) error { @@ -50,14 +46,6 @@ func newMetaMetricsDuration(ctx context.Context, cfg config.Config) (*metaMetric metric: m, } - if conf.Transform.Type != "" { - tfer, err := New(ctx, conf.Transform) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - tf.tf = tfer - } - tf.tfs = make([]Transformer, len(conf.Transforms)) for i, t := range conf.Transforms { tfer, err := New(ctx, t) @@ -73,7 +61,6 @@ func newMetaMetricsDuration(ctx context.Context, cfg config.Config) (*metaMetric type metaMetricDuration struct { conf metaMetricDurationConfig - tf Transformer tfs []Transformer // This is measured in nanoseconds. @@ -91,15 +78,7 @@ func (tf *metaMetricDuration) Transform(ctx context.Context, msg *message.Messag return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } - var msgs []*message.Message - var err error - - if len(tf.tfs) > 0 { - msgs, err = Apply(ctx, tf.tfs, msg) - } else { - msgs, err = tf.tf.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, tf.tfs, msg) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } @@ -112,11 +91,7 @@ func (tf *metaMetricDuration) Transform(ctx context.Context, msg *message.Messag tf.duration += time.Since(start) }() - if len(tf.tfs) > 0 { - return Apply(ctx, tf.tfs, msg) - } - - return tf.tf.Transform(ctx, msg) + return Apply(ctx, tf.tfs, msg) } func (tf *metaMetricDuration) String() string { diff --git a/transform/meta_pipeline.go b/transform/meta_pipeline.go deleted file mode 100644 index b402d407..00000000 --- a/transform/meta_pipeline.go +++ /dev/null @@ -1,139 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// errMetaPipelineArrayInput is returned when the transform is configured to process -// an object, but the input is an array. Array values are not supported by this transform, -// instead the input should be run through the metaForEach transform (which can encapsulate -// the pipeline transform). -var errMetaPipelineArrayInput = fmt.Errorf("input is an array") - -type metaPipelineConfig struct { - // Transforms that are applied in series to the data. - Transforms []config.Config `json:"transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` -} - -func (c *metaPipelineConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *metaPipelineConfig) Validate() error { - if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) - } - - if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) - } - - if len(c.Transforms) == 0 { - return fmt.Errorf("transforms: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -// Deprecated: newMetaPipeline exists for backwards compatibility and will be -// removed in a future release. Use the Transforms fields on other meta transforms -// instead. -func newMetaPipeline(ctx context.Context, cfg config.Config) (*metaPipeline, error) { - conf := metaPipelineConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform meta_pipeline: %v", err) - } - - if conf.ID == "" { - conf.ID = "meta_pipeline" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := metaPipeline{ - conf: conf, - isObject: conf.Object.SourceKey != "" && conf.Object.TargetKey != "", - } - - var tform []Transformer - for _, c := range conf.Transforms { - t, err := New(ctx, c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tform = append(tform, t) - } - tf.tf = tform - - return &tf, nil -} - -type metaPipeline struct { - conf metaPipelineConfig - isObject bool - - tf []Transformer -} - -func (tf *metaPipeline) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - if msg.IsControl() { - msgs, err := Apply(ctx, tf.tf, msg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - if !tf.isObject { - msgs, err := Apply(ctx, tf.tf, msg) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - return msgs, nil - } - - value := msg.GetValue(tf.conf.Object.SourceKey) - if !value.Exists() { - return []*message.Message{msg}, nil - } - - if value.IsArray() { - return nil, fmt.Errorf("transform %s: key %s: %v", tf.conf.ID, tf.conf.Object.SourceKey, errMetaPipelineArrayInput) - } - - res, err := Apply(ctx, tf.tf, message.New().SetData(value.Bytes())) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - var output []*message.Message - for _, msg := range res { - if err := msg.SetValue(tf.conf.Object.TargetKey, msg.Data()); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - output = append(output, msg) - } - - return output, nil -} - -func (tf *metaPipeline) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} diff --git a/transform/meta_pipeline_test.go b/transform/meta_pipeline_test.go deleted file mode 100644 index 607ad65d..00000000 --- a/transform/meta_pipeline_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package transform - -import ( - "context" - "reflect" - "testing" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" -) - -var _ Transformer = &metaPipeline{} - -var metaPipelineTests = []struct { - name string - cfg config.Config - test []byte - expected [][]byte -}{ - { - "object", - config.Config{ - Settings: map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "a", - }, - "transforms": []config.Config{ - { - Type: "format_from_base64", - Settings: map[string]interface{}{}, - }, - { - Type: "format_from_gzip", - Settings: map[string]interface{}{}, - }, - }, - }, - }, - []byte(`{"a":"H4sIAO291GIA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA"}`), - [][]byte{ - []byte(`{"a":"foo"}`), - }, - }, - { - "data", - config.Config{ - Settings: map[string]interface{}{ - "transforms": []config.Config{ - { - Type: "format_from_base64", - Settings: map[string]interface{}{}, - }, - { - Type: "format_from_gzip", - Settings: map[string]interface{}{}, - }, - }, - }, - }, - []byte(`H4sIAO291GIA/wXAIQ0AAACAsLbY93csBiFlc4wDAAAA`), - [][]byte{ - []byte(`foo`), - }, - }, -} - -func TestMetaPipeline(t *testing.T) { - ctx := context.TODO() - for _, test := range metaPipelineTests { - t.Run(test.name, func(t *testing.T) { - tf, err := newMetaPipeline(ctx, test.cfg) - if err != nil { - t.Fatal(err) - } - - msg := message.New().SetData(test.test) - result, err := tf.Transform(ctx, msg) - if err != nil { - t.Error(err) - } - - var data [][]byte - for _, c := range result { - data = append(data, c.Data()) - } - - if !reflect.DeepEqual(data, test.expected) { - t.Errorf("expected %s, got %s", test.expected, data) - } - }) - } -} - -func benchmarkMetaPipeline(b *testing.B, tf *metaPipeline, data []byte) { - ctx := context.TODO() - for i := 0; i < b.N; i++ { - msg := message.New().SetData(data) - _, _ = tf.Transform(ctx, msg) - } -} - -func BenchmarkMetaPipeline(b *testing.B) { - for _, test := range metaPipelineTests { - tf, err := newMetaPipeline(context.TODO(), test.cfg) - if err != nil { - b.Fatal(err) - } - - b.Run(test.name, - func(b *testing.B) { - benchmarkMetaPipeline(b, tf, test.test) - }, - ) - } -} diff --git a/transform/meta_retry.go b/transform/meta_retry.go index 6bd36697..fb918b13 100644 --- a/transform/meta_retry.go +++ b/transform/meta_retry.go @@ -7,11 +7,11 @@ import ( "regexp" "time" - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/condition" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) // errMetaRetryLimitReached is returned when the configured retry @@ -21,11 +21,14 @@ var errMetaRetryLimitReached = fmt.Errorf("retry limit reached") type metaRetryConfig struct { // Transforms that are applied in series, then checked for success - // based on the condition or errors. + // based on the condition or iconfig. Transforms []config.Config `json:"transforms"` // Condition that must be true for the transforms to be considered - // a success. - Condition condition.Config `json:"condition"` + // a success, otherwise the transforms are retried. + Condition config.Config `json:"condition"` + // ErrorMessages are regular expressions that match error messages + // and determine if the transforms should be retried. + ErrorMessages []string `json:"error_messages"` Retry iconfig.Retry `json:"retry"` ID string `json:"id"` @@ -38,7 +41,7 @@ func (c *metaRetryConfig) Decode(in interface{}) error { func (c *metaRetryConfig) Validate() error { for _, t := range c.Transforms { if t.Type == "" { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) } } @@ -59,42 +62,45 @@ func newMetaRetry(ctx context.Context, cfg config.Config) (*metaRetry, error) { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } - tforms := make([]Transformer, len(conf.Transforms)) + tf := metaRetry{ + conf: conf, + } + + tf.transforms = make([]Transformer, len(conf.Transforms)) for i, t := range conf.Transforms { tfer, err := New(ctx, t) if err != nil { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } - tforms[i] = tfer + tf.transforms[i] = tfer } - cnd, err := condition.New(ctx, conf.Condition) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + // If no condition is configured, then the transforms are always + // successful. + tf.condition = &metaSwitchDefaultInspector{} + if conf.Condition.Type != "" { + cnd, err := condition.New(ctx, conf.Condition) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + tf.condition = cnd } del, err := time.ParseDuration(conf.Retry.Delay) if err != nil { return nil, fmt.Errorf("transform %s: delay: %v", conf.ID, err) } + tf.delay = del - errs := make([]*regexp.Regexp, len(conf.Retry.ErrorMessages)) - for i, e := range conf.Retry.ErrorMessages { + tf.errorMessages = make([]*regexp.Regexp, len(conf.ErrorMessages)) + for i, e := range conf.ErrorMessages { r, err := regexp.Compile(e) if err != nil { return nil, fmt.Errorf("transform %s: error_messages: %v", conf.ID, err) } - errs[i] = r - } - - tf := metaRetry{ - conf: conf, - transforms: tforms, - condition: cnd, - delay: del, - errorMessages: errs, + tf.errorMessages[i] = r } return &tf, nil @@ -103,7 +109,7 @@ func newMetaRetry(ctx context.Context, cfg config.Config) (*metaRetry, error) { type metaRetry struct { conf metaRetryConfig - condition condition.Operator + condition condition.Conditioner transforms []Transformer delay time.Duration errorMessages []*regexp.Regexp @@ -146,7 +152,7 @@ LOOP: continue } - ok, err := tf.condition.Operate(ctx, m) + ok, err := tf.condition.Condition(ctx, m) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } diff --git a/transform/meta_switch.go b/transform/meta_switch.go index 041e6437..d8919f1d 100644 --- a/transform/meta_switch.go +++ b/transform/meta_switch.go @@ -5,22 +5,16 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/condition" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/condition" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type metaSwitchCaseConfig struct { // Condition that must be true for the transforms to be applied. - Condition condition.Config `json:"condition"` - - // Transform that is applied when the condition is true. - // - // Deprecated: Transform exists for backwards compatibility and will be - // removed in a future release. Use Transforms instead. - Transform config.Config `json:"transform"` + Condition config.Config `json:"condition"` // Transforms that are applied in series when the condition is true. Transforms []config.Config `json:"transforms"` } @@ -40,12 +34,18 @@ func (c *metaSwitchConfig) Decode(in interface{}) error { func (c *metaSwitchConfig) Validate() error { if len(c.Cases) == 0 { - return fmt.Errorf("cases: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("cases: %v", iconfig.ErrMissingRequiredOption) } for _, c := range c.Cases { - if c.Transform.Type == "" && len(c.Transforms) == 0 { - return fmt.Errorf("transform: %v", errors.ErrMissingRequiredOption) + if len(c.Transforms) == 0 { + return fmt.Errorf("transform: %v", iconfig.ErrMissingRequiredOption) + } + + for _, t := range c.Transforms { + if t.Type == "" { + return fmt.Errorf("type: %v", iconfig.ErrMissingRequiredOption) + } } } @@ -53,11 +53,16 @@ func (c *metaSwitchConfig) Validate() error { } type metaSwitchConditional struct { - operator condition.Operator - transformer Transformer + condition condition.Conditioner transformers []Transformer } +type metaSwitchDefaultInspector struct{} + +func (i *metaSwitchDefaultInspector) Condition(ctx context.Context, msg *message.Message) (bool, error) { + return true, nil +} + func newMetaSwitch(ctx context.Context, cfg config.Config) (*metaSwitch, error) { conf := metaSwitchConfig{} if err := conf.Decode(cfg.Settings); err != nil { @@ -76,25 +81,21 @@ func newMetaSwitch(ctx context.Context, cfg config.Config) (*metaSwitch, error) for i, s := range conf.Cases { conditional := metaSwitchConditional{} - op, err := condition.New(ctx, s.Condition) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - conditional.operator = op - - if s.Transform.Type != "" { - tf, err := New(ctx, s.Transform) + // If no condition is configured, then the transforms are always + // applied. + conditional.condition = &metaSwitchDefaultInspector{} + if s.Condition.Type != "" { + op, err := condition.New(ctx, s.Condition) if err != nil { - return nil, fmt.Errorf("transform meta_switch: %v", err) + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } - - conditional.transformer = tf + conditional.condition = op } for _, c := range s.Transforms { tf, err := New(ctx, c) if err != nil { - return nil, fmt.Errorf("transform meta_switch: %v", err) + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } conditional.transformers = append(conditional.transformers, tf) @@ -121,15 +122,7 @@ func (tf *metaSwitch) Transform(ctx context.Context, msg *message.Message) ([]*m if msg.IsControl() { var messages []*message.Message for _, c := range tf.conditional { - var msgs []*message.Message - var err error - - if len(c.transformers) > 0 { - msgs, err = Apply(ctx, c.transformers, msg) - } else { - msgs, err = c.transformer.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, c.transformers, msg) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } @@ -153,7 +146,7 @@ func (tf *metaSwitch) Transform(ctx context.Context, msg *message.Message) ([]*m } for _, c := range tf.conditional { - ok, err := c.operator.Operate(ctx, msg) + ok, err := c.condition.Condition(ctx, msg) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } @@ -162,13 +155,7 @@ func (tf *metaSwitch) Transform(ctx context.Context, msg *message.Message) ([]*m continue } - var msgs []*message.Message - if len(c.transformers) > 0 { - msgs, err = Apply(ctx, c.transformers, msg) - } else { - msgs, err = c.transformer.Transform(ctx, msg) - } - + msgs, err := Apply(ctx, c.transformers, msg) if err != nil { return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) } diff --git a/transform/meta_switch_test.go b/transform/meta_switch_test.go index 41c4bb8b..96c24563 100644 --- a/transform/meta_switch_test.go +++ b/transform/meta_switch_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &metaSwitch{} @@ -26,25 +26,29 @@ var metaSwitchTests = []struct { "cases": []map[string]interface{}{ { "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", + "type": "any", + "settings": map[string]interface{}{ + "conditions": []map[string]interface{}{ + { + "type": "string_contains", + "settings": map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "a", + }, + "value": "b", }, - "value": "b", }, }, }, }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", + "transforms": []map[string]interface{}{ + { + "type": "object_copy", + "settings": map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "a", + "target_key": "c", + }, }, }, }, @@ -64,15 +68,17 @@ var metaSwitchTests = []struct { "cases": []map[string]interface{}{ { "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", + "type": "any", + "settings": map[string]interface{}{ + "conditions": []map[string]interface{}{ + { + "type": "string_contains", + "settings": map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "a", + }, + "value": "b", }, - "value": "b", }, }, }, @@ -108,66 +114,12 @@ var metaSwitchTests = []struct { "cases": []map[string]interface{}{ { "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - { - "transform": map[string]interface{}{ - "type": "object_copy", + "type": "string_contains", "settings": map[string]interface{}{ "object": map[string]interface{}{ "source_key": "a", - "target_key": "x", - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b","x":"b"}`), - }, - }, - { - "if_else", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, }, + "value": "c", }, }, "transforms": []map[string]interface{}{ @@ -189,7 +141,7 @@ var metaSwitchTests = []struct { "settings": map[string]interface{}{ "object": map[string]interface{}{ "source_key": "a", - "target_key": "x", + "target_key": "z", }, }, }, @@ -200,11 +152,9 @@ var metaSwitchTests = []struct { }, []byte(`{"a":"b"}`), [][]byte{ - []byte(`{"a":"b","x":"b"}`), + []byte(`{"a":"b","z":"b"}`), }, }, - // This test simulates an if/else if block by having all conditions - // fail. The data should be unchanged. { "if_else_if", config.Config{ @@ -212,78 +162,17 @@ var metaSwitchTests = []struct { "cases": []map[string]interface{}{ { "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", + "type": "any", "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "c", - }, - }, - }, - }, - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", + "conditions": []map[string]interface{}{ + { + "type": "string_contains", + "settings": map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "a", + }, + "value": "c", }, - "value": "d", - }, - }, - }, - }, - "transform": map[string]interface{}{ - "type": "object_copy", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - "target_key": "d", - }, - }, - }, - }, - }, - }, - }, - []byte(`{"a":"b"}`), - [][]byte{ - []byte(`{"a":"b"}`), - }, - }, - { - "if_else_if", - config.Config{ - Settings: map[string]interface{}{ - "cases": []map[string]interface{}{ - { - "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", - }, - "value": "c", }, }, }, @@ -302,15 +191,17 @@ var metaSwitchTests = []struct { }, { "condition": map[string]interface{}{ - "operator": "any", - "inspectors": []map[string]interface{}{ - { - "type": "string_contains", - "settings": map[string]interface{}{ - "object": map[string]interface{}{ - "source_key": "a", + "type": "any", + "settings": map[string]interface{}{ + "conditions": []map[string]interface{}{ + { + "type": "string_contains", + "settings": map[string]interface{}{ + "object": map[string]interface{}{ + "source_key": "a", + }, + "value": "d", }, - "value": "d", }, }, }, diff --git a/transform/network.go b/transform/network.go index 82ab4e52..31259837 100644 --- a/transform/network.go +++ b/transform/network.go @@ -3,8 +3,7 @@ package transform import ( "fmt" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type networkDomainConfig struct { @@ -18,11 +17,11 @@ func (c *networkDomainConfig) Decode(in interface{}) error { func (c *networkDomainConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/network_domain_registered_domain.go b/transform/network_domain_registered_domain.go index da59e94d..9d62f232 100644 --- a/transform/network_domain_registered_domain.go +++ b/transform/network_domain_registered_domain.go @@ -7,8 +7,8 @@ import ( "golang.org/x/net/publicsuffix" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkDomainRegisteredDomain(_ context.Context, cfg config.Config) (*networkDomainRegisteredDomain, error) { diff --git a/transform/network_domain_registered_domain_test.go b/transform/network_domain_registered_domain_test.go index 1d076b12..eabfb80a 100644 --- a/transform/network_domain_registered_domain_test.go +++ b/transform/network_domain_registered_domain_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &networkDomainRegisteredDomain{} diff --git a/transform/network_domain_subdomain.go b/transform/network_domain_subdomain.go index cba23bcb..2d7cd015 100644 --- a/transform/network_domain_subdomain.go +++ b/transform/network_domain_subdomain.go @@ -8,8 +8,8 @@ import ( "golang.org/x/net/publicsuffix" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) // errFmtSubdomainNoSubdomain is returned when a domain without a subdomain is diff --git a/transform/network_domain_subdomain_test.go b/transform/network_domain_subdomain_test.go index 636bb2f2..4faf5f33 100644 --- a/transform/network_domain_subdomain_test.go +++ b/transform/network_domain_subdomain_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &networkDomainSubdomain{} diff --git a/transform/network_domain_top_level_domain.go b/transform/network_domain_top_level_domain.go index 79acca37..12b761e1 100644 --- a/transform/network_domain_top_level_domain.go +++ b/transform/network_domain_top_level_domain.go @@ -7,8 +7,8 @@ import ( "golang.org/x/net/publicsuffix" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newNetworkDomainTopLevelDomain(_ context.Context, cfg config.Config) (*networkDomainTopLevelDomain, error) { diff --git a/transform/network_domain_top_level_domain_test.go b/transform/network_domain_top_level_domain_test.go index d0cc7fbe..af61dd50 100644 --- a/transform/network_domain_top_level_domain_test.go +++ b/transform/network_domain_top_level_domain_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &networkDomainTopLevelDomain{} diff --git a/transform/number.go b/transform/number.go index ec02512b..8057f4cd 100644 --- a/transform/number.go +++ b/transform/number.go @@ -4,8 +4,7 @@ import ( "fmt" "strconv" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Use this config for any Number transform that only requires a single value. @@ -23,11 +22,11 @@ func (c *numberValConfig) Decode(in interface{}) error { // 0.0 is a valid value and should not be checked. func (c *numberValConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -44,11 +43,11 @@ func (c *numberMathConfig) Decode(in interface{}) error { func (c *numberMathConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/number_math_addition.go b/transform/number_math_addition.go index 02f673ea..63bc2745 100644 --- a/transform/number_math_addition.go +++ b/transform/number_math_addition.go @@ -6,9 +6,10 @@ import ( "fmt" "strconv" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMathAddition(_ context.Context, cfg config.Config) (*numberMathAddition, error) { diff --git a/transform/number_math_addition_test.go b/transform/number_math_addition_test.go index c45a5ed2..1991d5da 100644 --- a/transform/number_math_addition_test.go +++ b/transform/number_math_addition_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMathAddition{} diff --git a/transform/number_math_division.go b/transform/number_math_division.go index 60e2735c..84dd9427 100644 --- a/transform/number_math_division.go +++ b/transform/number_math_division.go @@ -6,9 +6,10 @@ import ( "fmt" "strconv" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMathDivision(_ context.Context, cfg config.Config) (*numberMathDivision, error) { diff --git a/transform/number_math_division_test.go b/transform/number_math_division_test.go index b7163ad1..b2118ba0 100644 --- a/transform/number_math_division_test.go +++ b/transform/number_math_division_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMathDivision{} diff --git a/transform/number_math_multiplication.go b/transform/number_math_multiplication.go index 56d27573..c4530ae8 100644 --- a/transform/number_math_multiplication.go +++ b/transform/number_math_multiplication.go @@ -6,9 +6,10 @@ import ( "fmt" "strconv" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMathMultiplication(_ context.Context, cfg config.Config) (*numberMathMultiplication, error) { diff --git a/transform/number_math_multiplication_test.go b/transform/number_math_multiplication_test.go index 9516e879..f45915e4 100644 --- a/transform/number_math_multiplication_test.go +++ b/transform/number_math_multiplication_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMathMultiplication{} diff --git a/transform/number_math_subtraction.go b/transform/number_math_subtraction.go index cca26e92..f1b64f24 100644 --- a/transform/number_math_subtraction.go +++ b/transform/number_math_subtraction.go @@ -6,9 +6,10 @@ import ( "fmt" "strconv" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMathSubtraction(_ context.Context, cfg config.Config) (*numberMathSubtraction, error) { diff --git a/transform/number_math_subtraction_test.go b/transform/number_math_subtraction_test.go index 83403dc9..91a6ca0b 100644 --- a/transform/number_math_subtraction_test.go +++ b/transform/number_math_subtraction_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMathSubtraction{} diff --git a/transform/number_maximum.go b/transform/number_maximum.go index 45f28ef4..f387395c 100644 --- a/transform/number_maximum.go +++ b/transform/number_maximum.go @@ -6,9 +6,10 @@ import ( "fmt" "math" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMaximum(_ context.Context, cfg config.Config) (*numberMaximum, error) { diff --git a/transform/number_maximum_test.go b/transform/number_maximum_test.go index 1850d557..2bd3dba3 100644 --- a/transform/number_maximum_test.go +++ b/transform/number_maximum_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMaximum{} diff --git a/transform/number_minimum.go b/transform/number_minimum.go index 124f1d93..fc0ae8dd 100644 --- a/transform/number_minimum.go +++ b/transform/number_minimum.go @@ -6,9 +6,10 @@ import ( "fmt" "math" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) func newNumberMinimum(_ context.Context, cfg config.Config) (*numberMinimum, error) { diff --git a/transform/number_minimum_test.go b/transform/number_minimum_test.go index d45d649e..7c0965fe 100644 --- a/transform/number_minimum_test.go +++ b/transform/number_minimum_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &numberMinimum{} diff --git a/transform/object_copy.go b/transform/object_copy.go index edfad8c4..a3557a2f 100644 --- a/transform/object_copy.go +++ b/transform/object_copy.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectCopyConfig struct { diff --git a/transform/object_copy_test.go b/transform/object_copy_test.go index 37f36e67..34eccd2c 100644 --- a/transform/object_copy_test.go +++ b/transform/object_copy_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectCopy{} diff --git a/transform/object_delete.go b/transform/object_delete.go index 12bb042a..2d0f5da7 100644 --- a/transform/object_delete.go +++ b/transform/object_delete.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectDeleteConfig struct { @@ -22,7 +22,7 @@ func (c *objectDeleteConfig) Decode(in interface{}) error { func (c *objectDeleteConfig) Validate() error { if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_delete_test.go b/transform/object_delete_test.go index 86e43855..9e7e8727 100644 --- a/transform/object_delete_test.go +++ b/transform/object_delete_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectDelete{} diff --git a/transform/object_insert.go b/transform/object_insert.go index 21a78303..2a92f97a 100644 --- a/transform/object_insert.go +++ b/transform/object_insert.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectInsertConfig struct { @@ -25,11 +25,11 @@ func (c *objectInsertConfig) Decode(in interface{}) error { func (c *objectInsertConfig) Validate() error { if c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Value == nil { - return fmt.Errorf("value: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("value: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_insert_test.go b/transform/object_insert_test.go index 31bd1ad2..d244128b 100644 --- a/transform/object_insert_test.go +++ b/transform/object_insert_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectInsert{} diff --git a/transform/object_jq.go b/transform/object_jq.go index 70c68727..255ade54 100644 --- a/transform/object_jq.go +++ b/transform/object_jq.go @@ -5,11 +5,12 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" "github.com/itchyny/gojq" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) // errObjectJQNoOutputGenerated is returned when jq generates no output. @@ -28,7 +29,7 @@ func (c *objectJQConfig) Decode(in interface{}) error { func (c *objectJQConfig) Validate() error { if c.Filter == "" { - return fmt.Errorf("query: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("query: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_jq_test.go b/transform/object_jq_test.go index fcdd5f81..04fded85 100644 --- a/transform/object_jq_test.go +++ b/transform/object_jq_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectJQ{} diff --git a/transform/object_to_boolean.go b/transform/object_to_boolean.go index e6c84806..75bceb06 100644 --- a/transform/object_to_boolean.go +++ b/transform/object_to_boolean.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectToBooleanConfig struct { @@ -22,11 +22,11 @@ func (c *objectToBooleanConfig) Decode(in interface{}) error { func (c *objectToBooleanConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_to_boolean_test.go b/transform/object_to_boolean_test.go index 8bb4ccb4..e7618601 100644 --- a/transform/object_to_boolean_test.go +++ b/transform/object_to_boolean_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectToBoolean{} diff --git a/transform/object_to_float.go b/transform/object_to_float.go index 24d1ac5e..c24c286d 100644 --- a/transform/object_to_float.go +++ b/transform/object_to_float.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectToFloatConfig struct { @@ -22,11 +22,11 @@ func (c *objectToFloatConfig) Decode(in interface{}) error { func (c *objectToFloatConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_to_float_test.go b/transform/object_to_float_test.go index 42a54f35..c6630514 100644 --- a/transform/object_to_float_test.go +++ b/transform/object_to_float_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectToFloat{} diff --git a/transform/object_to_integer.go b/transform/object_to_integer.go index 4cdd4fc7..c3c83725 100644 --- a/transform/object_to_integer.go +++ b/transform/object_to_integer.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectToIntegerConfig struct { @@ -22,11 +22,11 @@ func (c *objectToIntegerConfig) Decode(in interface{}) error { func (c *objectToIntegerConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_to_integer_test.go b/transform/object_to_integer_test.go index 21af653a..56bd7510 100644 --- a/transform/object_to_integer_test.go +++ b/transform/object_to_integer_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectToInteger{} diff --git a/transform/object_to_string.go b/transform/object_to_string.go index 6bf886dd..a1217487 100644 --- a/transform/object_to_string.go +++ b/transform/object_to_string.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectToStringConfig struct { @@ -22,11 +22,11 @@ func (c *objectToStringConfig) Decode(in interface{}) error { func (c *objectToStringConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_to_string_test.go b/transform/object_to_string_test.go index c7b9b182..45bdfc96 100644 --- a/transform/object_to_string_test.go +++ b/transform/object_to_string_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectToString{} diff --git a/transform/object_to_unsigned_integer.go b/transform/object_to_unsigned_integer.go index 90f59889..e9dcc074 100644 --- a/transform/object_to_unsigned_integer.go +++ b/transform/object_to_unsigned_integer.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type objectToUnsignedIntegerConfig struct { @@ -22,11 +22,11 @@ func (c *objectToUnsignedIntegerConfig) Decode(in interface{}) error { func (c *objectToUnsignedIntegerConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/object_to_unsigned_integer_test.go b/transform/object_to_unsigned_integer_test.go index e4709bc2..7e8df15d 100644 --- a/transform/object_to_unsigned_integer_test.go +++ b/transform/object_to_unsigned_integer_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &objectToUnsignedInteger{} diff --git a/transform/send.go b/transform/send.go index db5b6739..c910647e 100644 --- a/transform/send.go +++ b/transform/send.go @@ -4,13 +4,13 @@ import ( "context" "fmt" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/message" ) -// errSendBatchMisconfigured is returned when data cannot be successfully added +// errBatchNoMoreData is returned when data cannot be successfully added // to a batch. This is usually due to a misconfiguration, such as a size, count, // or duration limit. -var errSendBatchMisconfigured = fmt.Errorf("data could not be added to batch") +var errBatchNoMoreData = fmt.Errorf("data could not be added to batch") func withTransforms(ctx context.Context, tf []Transformer, items [][]byte) ([][]byte, error) { if tf == nil { diff --git a/transform/send_aws_data_firehose.go b/transform/send_aws_data_firehose.go new file mode 100644 index 00000000..9f524e70 --- /dev/null +++ b/transform/send_aws_data_firehose.go @@ -0,0 +1,209 @@ +package transform + +import ( + "context" + "encoding/json" + "fmt" + "sync" + + "github.com/aws/aws-sdk-go-v2/service/firehose" + "github.com/aws/aws-sdk-go-v2/service/firehose/types" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" +) + +// Records greater than 1000 KiB in size cannot be put into Kinesis Firehose. +const sendAWSDataFirehoseMessageSizeLimit = 1024 * 1000 + +// errSendAWSDataFirehoseRecordSizeLimit is returned when data exceeds the +// Kinesis Firehose record size limit. If this error occurs, +// then drop or reduce the size of the data before attempting to +// send it to Kinesis Firehose. +var errSendAWSDataFirehoseRecordSizeLimit = fmt.Errorf("data exceeded size limit") + +type sendAWSDataFirehoseConfig struct { + // AuxTransforms are applied to batched data before it is sent. + AuxTransforms []config.Config `json:"auxiliary_transforms"` + + ID string `json:"id"` + Object iconfig.Object `json:"object"` + Batch iconfig.Batch `json:"batch"` + AWS iconfig.AWS `json:"aws"` +} + +func (c *sendAWSDataFirehoseConfig) Decode(in interface{}) error { + return iconfig.Decode(in, c) +} + +func (c *sendAWSDataFirehoseConfig) Validate() error { + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) + } + + return nil +} + +func newSendAWSDataFirehose(ctx context.Context, cfg config.Config) (*sendAWSDataFirehose, error) { + conf := sendAWSDataFirehoseConfig{} + if err := conf.Decode(cfg.Settings); err != nil { + return nil, fmt.Errorf("transform send_aws_kinesis_data_firehose: %v", err) + } + + if conf.ID == "" { + conf.ID = "send_aws_kinesis_data_firehose" + } + + if err := conf.Validate(); err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf := sendAWSDataFirehose{ + conf: conf, + } + + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = firehose.NewFromConfig(awsCfg) + + // Data Firehose limits batch operations to 500 records. + count := 500 + if conf.Batch.Count > 0 && conf.Batch.Count <= count { + count = conf.Batch.Count + } + + // Data Firehose limits batch operations to 4 MiB. + size := sendAWSDataFirehoseMessageSizeLimit * 4 + if conf.Batch.Size > 0 && conf.Batch.Size <= size { + size = conf.Batch.Size + } + + agg, err := aggregate.New(aggregate.Config{ + Count: count, + Size: size, + Duration: conf.Batch.Duration, + }) + if err != nil { + return nil, err + } + tf.agg = agg + + if len(conf.AuxTransforms) > 0 { + tf.tforms = make([]Transformer, len(conf.AuxTransforms)) + for i, c := range conf.AuxTransforms { + t, err := New(context.Background(), c) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.tforms[i] = t + } + } + + return &tf, nil +} + +type sendAWSDataFirehose struct { + conf sendAWSDataFirehoseConfig + client *firehose.Client + + mu sync.Mutex + agg *aggregate.Aggregate + tforms []Transformer +} + +func (tf *sendAWSDataFirehose) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { + tf.mu.Lock() + defer tf.mu.Unlock() + + if msg.IsControl() { + for key := range tf.agg.GetAll() { + if tf.agg.Count(key) == 0 { + continue + } + + if err := tf.send(ctx, key); err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + } + + tf.agg.ResetAll() + return []*message.Message{msg}, nil + } + + if len(msg.Data()) > sendAWSDataFirehoseMessageSizeLimit { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSDataFirehoseRecordSizeLimit) + } + + // If this value does not exist, then all data is batched together. + key := msg.GetValue(tf.conf.Object.BatchKey).String() + if ok := tf.agg.Add(key, msg.Data()); ok { + return []*message.Message{msg}, nil + } + + if err := tf.send(ctx, key); err != nil { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } + + // If data cannot be added after reset, then the batch is misconfgured. + tf.agg.Reset(key) + if ok := tf.agg.Add(key, msg.Data()); !ok { + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) + } + + return []*message.Message{msg}, nil +} + +func (tf *sendAWSDataFirehose) String() string { + b, _ := json.Marshal(tf.conf) + return string(b) +} + +func (tf *sendAWSDataFirehose) send(ctx context.Context, key string) error { + data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) + if err != nil { + return err + } + + ctx = context.WithoutCancel(ctx) + return tf.putRecords(ctx, data) +} + +func (tf *sendAWSDataFirehose) putRecords(ctx context.Context, data [][]byte) error { + var records []types.Record + for _, d := range data { + records = append(records, types.Record{ + Data: d, + }) + } + + resp, err := tf.client.PutRecordBatch(ctx, &firehose.PutRecordBatchInput{ + DeliveryStreamName: &tf.conf.AWS.ARN, + Records: records, + }) + if resp.FailedPutCount != nil && *resp.FailedPutCount > 0 { + var retry [][]byte + + for i, r := range resp.RequestResponses { + if r.ErrorCode != nil { + retry = append(retry, data[i]) + } + } + + if len(retry) > 0 { + return tf.putRecords(ctx, retry) + } + } + + if err != nil { + return err + } + + return nil +} diff --git a/transform/send_aws_dynamodb.go b/transform/send_aws_dynamodb_put.go similarity index 53% rename from transform/send_aws_dynamodb.go rename to transform/send_aws_dynamodb_put.go index b29c1098..6ff94b70 100644 --- a/transform/send_aws_dynamodb.go +++ b/transform/send_aws_dynamodb_put.go @@ -3,18 +3,19 @@ package transform import ( "context" "encoding/json" + "errors" "fmt" "sync" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - idynamodb "github.com/brexhq/substation/internal/aws/dynamodb" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Items greater than 400 KB in size cannot be put into DynamoDB. @@ -32,16 +33,13 @@ var errSendAWSDynamoDBItemSizeLimit = fmt.Errorf("data exceeded size limit") var errSendAWSDynamoDBNonObject = fmt.Errorf("input must be object") type sendAWSDynamoDBConfig struct { - // TableName is the DynamoDB table that items are written to. - TableName string `json:"table_name"` // AuxTransforms are applied to batched data before it is sent. AuxTransforms []config.Config `json:"auxiliary_transforms"` ID string `json:"id"` Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` + Batch iconfig.Batch `json:"batch"` } func (c *sendAWSDynamoDBConfig) Decode(in interface{}) error { @@ -49,42 +47,53 @@ func (c *sendAWSDynamoDBConfig) Decode(in interface{}) error { } func (c *sendAWSDynamoDBConfig) Validate() error { - if c.TableName == "" { - return fmt.Errorf("table_name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newSendAWSDynamoDB(_ context.Context, cfg config.Config) (*sendAWSDynamoDB, error) { +func newSendAWSDynamoDBPut(ctx context.Context, cfg config.Config) (*sendAWSDynamoDBPut, error) { conf := sendAWSDynamoDBConfig{} if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_dynamodb: %v", err) + return nil, fmt.Errorf("transform send_aws_dynamodb_put: %v", err) } if conf.ID == "" { - conf.ID = "send_aws_dynamodb" + conf.ID = "send_aws_dynamodb_put" } if err := conf.Validate(); err != nil { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } - tf := sendAWSDynamoDB{ + tf := sendAWSDynamoDBPut{ conf: conf, } - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = dynamodb.NewFromConfig(awsCfg) + + // DynamoDB limits batch operations to 25 records. + count := 25 + if conf.Batch.Count > 0 && conf.Batch.Count <= count { + count = conf.Batch.Count + } + + // DynamoDB limits batch operations to 16 MiB. + size := 1000 * 1000 * 16 + if conf.Batch.Size > 0 && conf.Batch.Size <= size { + size = conf.Batch.Size + } agg, err := aggregate.New(aggregate.Config{ - // DynamoDB limits batch operations to 25 records and 16 MiB. - Count: 25, - Size: 1000 * 1000 * 16, + Count: count, + Size: size, Duration: conf.Batch.Duration, }) if err != nil { @@ -107,18 +116,16 @@ func newSendAWSDynamoDB(_ context.Context, cfg config.Config) (*sendAWSDynamoDB, return &tf, nil } -type sendAWSDynamoDB struct { - conf sendAWSDynamoDBConfig - - // client is safe for concurrent use. - client idynamodb.API +type sendAWSDynamoDBPut struct { + conf sendAWSDynamoDBConfig + client *dynamodb.Client mu sync.Mutex agg *aggregate.Aggregate tforms []Transformer } -func (tf *sendAWSDynamoDB) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { +func (tf *sendAWSDynamoDBPut) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { tf.mu.Lock() defer tf.mu.Unlock() @@ -158,40 +165,72 @@ func (tf *sendAWSDynamoDB) Transform(ctx context.Context, msg *message.Message) // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil } -func (tf *sendAWSDynamoDB) String() string { +func (tf *sendAWSDynamoDBPut) String() string { b, _ := json.Marshal(tf.conf) return string(b) } -func (tf *sendAWSDynamoDB) send(ctx context.Context, key string) error { +func (tf *sendAWSDynamoDBPut) send(ctx context.Context, key string) error { data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) if err != nil { return err } - var items []map[string]*dynamodb.AttributeValue + var attrs []map[string]types.AttributeValue for _, b := range data { - m := make(map[string]any) - for k, v := range bytesToValue(b).Map() { - m[k] = v.Value() + var item map[string]interface{} + if err := json.Unmarshal(b, &item); err != nil { + return fmt.Errorf("transform %s: %v", tf.conf.ID, err) } - i, err := dynamodbattribute.MarshalMap(m) + m, err := attributevalue.MarshalMap(item) if err != nil { - return err + return fmt.Errorf("transform %s: %v", tf.conf.ID, err) } - items = append(items, i) + attrs = append(attrs, m) } - if _, err := tf.client.BatchPutItem(ctx, tf.conf.TableName, items); err != nil { - return err + ctx = context.WithoutCancel(ctx) + return tf.putItems(ctx, attrs) +} + +func (tf *sendAWSDynamoDBPut) putItems(ctx context.Context, attrs []map[string]types.AttributeValue) error { + var items []types.WriteRequest + for _, attr := range attrs { + items = append(items, types.WriteRequest{ + PutRequest: &types.PutRequest{ + Item: attr, + }, + }) + } + + resp, err := tf.client.BatchWriteItem(ctx, &dynamodb.BatchWriteItemInput{ + RequestItems: map[string][]types.WriteRequest{ + tf.conf.AWS.ARN: items, + }, + }) + if err != nil { + var e *types.ProvisionedThroughputExceededException + if errors.As(err, &e) { + var retry []map[string]types.AttributeValue + + for _, item := range resp.UnprocessedItems[tf.conf.AWS.ARN] { + retry = append(retry, item.PutRequest.Item) + } + + if len(retry) > 0 { + return tf.putItems(ctx, retry) + } + } else { + return fmt.Errorf("transform %s: %v", tf.conf.ID, err) + } } return nil diff --git a/transform/send_aws_eventbridge.go b/transform/send_aws_eventbridge.go index e2b50acb..d8984b53 100644 --- a/transform/send_aws_eventbridge.go +++ b/transform/send_aws_eventbridge.go @@ -9,11 +9,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/eventbridge" "github.com/aws/aws-sdk-go-v2/service/eventbridge/types" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Records greater than 256 KB in size cannot be @@ -27,8 +27,6 @@ const sendAWSEventBridgeMessageSizeLimit = 1024 * 1024 * 256 var errSendAWSEventBridgeMessageSizeLimit = fmt.Errorf("data exceeded size limit") type sendAWSEventBridgeConfig struct { - // ARN is the EventBridge bus to send messages to. - ARN string `json:"arn"` // Describes the type of the messages sent to EventBridge. Description string `json:"description"` // AuxTransforms are applied to batched data before it is sent. @@ -38,7 +36,6 @@ type sendAWSEventBridgeConfig struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSEventBridgeConfig) Decode(in interface{}) error { @@ -66,13 +63,7 @@ func newSendAWSEventBridge(ctx context.Context, cfg config.Config) (*sendAWSEven conf: conf, } - // Setup the AWS client. - awsCfg, err := aws.NewV2(ctx, aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) if err != nil { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } @@ -152,7 +143,7 @@ func (tf *sendAWSEventBridge) Transform(ctx context.Context, msg *message.Messag // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil } @@ -183,19 +174,17 @@ func (tf *sendAWSEventBridge) send(ctx context.Context, key string) error { } // If empty, this is the default event bus. - if tf.conf.ARN != "" { - entry.EventBusName = &tf.conf.ARN + if tf.conf.AWS.ARN != "" { + entry.EventBusName = &tf.conf.AWS.ARN } entries[i] = entry } ctx = context.WithoutCancel(ctx) - input := &eventbridge.PutEventsInput{ + if _, err = tf.client.PutEvents(ctx, &eventbridge.PutEventsInput{ Entries: entries, - } - - if _, err = tf.client.PutEvents(ctx, input); err != nil { + }); err != nil { return err } diff --git a/transform/send_aws_kinesis_data_firehose.go b/transform/send_aws_kinesis_data_firehose.go deleted file mode 100644 index 3fc6faf1..00000000 --- a/transform/send_aws_kinesis_data_firehose.go +++ /dev/null @@ -1,173 +0,0 @@ -package transform - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/firehose" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" -) - -// Records greater than 1000 KiB in size cannot be put into Kinesis Firehose. -const sendAWSKinesisDataFirehoseMessageSizeLimit = 1024 * 1000 - -// errSendAWSKinesisDataFirehoseRecordSizeLimit is returned when data exceeds the -// Kinesis Firehose record size limit. If this error occurs, -// then drop or reduce the size of the data before attempting to -// send it to Kinesis Firehose. -var errSendAWSKinesisDataFirehoseRecordSizeLimit = fmt.Errorf("data exceeded size limit") - -type sendAWSKinesisDataFirehoseConfig struct { - // StreamName is the Firehose Delivery Stream that records are sent to. - StreamName string `json:"stream_name"` - // AuxTransforms are applied to batched data before it is sent. - AuxTransforms []config.Config `json:"auxiliary_transforms"` - - ID string `json:"id"` - Object iconfig.Object `json:"object"` - Batch iconfig.Batch `json:"batch"` - AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` -} - -func (c *sendAWSKinesisDataFirehoseConfig) Decode(in interface{}) error { - return iconfig.Decode(in, c) -} - -func (c *sendAWSKinesisDataFirehoseConfig) Validate() error { - if c.StreamName == "" { - return fmt.Errorf("stream_name: %v", errors.ErrMissingRequiredOption) - } - - return nil -} - -func newSendAWSKinesisDataFirehose(_ context.Context, cfg config.Config) (*sendAWSKinesisDataFirehose, error) { - conf := sendAWSKinesisDataFirehoseConfig{} - if err := conf.Decode(cfg.Settings); err != nil { - return nil, fmt.Errorf("transform send_aws_kinesis_data_firehose: %v", err) - } - - if conf.ID == "" { - conf.ID = "send_aws_kinesis_data_firehose" - } - - if err := conf.Validate(); err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf := sendAWSKinesisDataFirehose{ - conf: conf, - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) - - agg, err := aggregate.New(aggregate.Config{ - // Firehose limits batch operations to 500 records and 4 MiB. - Count: 500, - Size: sendAWSKinesisDataFirehoseMessageSizeLimit * 4, - Duration: conf.Batch.Duration, - }) - if err != nil { - return nil, err - } - tf.agg = agg - - if len(conf.AuxTransforms) > 0 { - tf.tforms = make([]Transformer, len(conf.AuxTransforms)) - for i, c := range conf.AuxTransforms { - t, err := New(context.Background(), c) - if err != nil { - return nil, fmt.Errorf("transform %s: %v", conf.ID, err) - } - - tf.tforms[i] = t - } - } - - return &tf, nil -} - -type sendAWSKinesisDataFirehose struct { - conf sendAWSKinesisDataFirehoseConfig - - // client is safe for concurrent use. - client firehose.API - - mu sync.Mutex - agg *aggregate.Aggregate - tforms []Transformer -} - -func (tf *sendAWSKinesisDataFirehose) Transform(ctx context.Context, msg *message.Message) ([]*message.Message, error) { - tf.mu.Lock() - defer tf.mu.Unlock() - - if msg.IsControl() { - for key := range tf.agg.GetAll() { - if tf.agg.Count(key) == 0 { - continue - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - } - - tf.agg.ResetAll() - return []*message.Message{msg}, nil - } - - if len(msg.Data()) > sendAWSKinesisDataFirehoseMessageSizeLimit { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendAWSKinesisDataFirehoseRecordSizeLimit) - } - - // If this value does not exist, then all data is batched together. - key := msg.GetValue(tf.conf.Object.BatchKey).String() - if ok := tf.agg.Add(key, msg.Data()); ok { - return []*message.Message{msg}, nil - } - - if err := tf.send(ctx, key); err != nil { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, err) - } - - // If data cannot be added after reset, then the batch is misconfgured. - tf.agg.Reset(key) - if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) - } - - return []*message.Message{msg}, nil -} - -func (tf *sendAWSKinesisDataFirehose) String() string { - b, _ := json.Marshal(tf.conf) - return string(b) -} - -func (tf *sendAWSKinesisDataFirehose) send(ctx context.Context, key string) error { - data, err := withTransforms(ctx, tf.tforms, tf.agg.Get(key)) - if err != nil { - return err - } - - if _, err := tf.client.PutRecordBatch(ctx, tf.conf.StreamName, data); err != nil { - return err - } - - return nil -} diff --git a/transform/send_aws_kinesis_data_stream.go b/transform/send_aws_kinesis_data_stream.go index 60fcf892..9e23ffa1 100644 --- a/transform/send_aws_kinesis_data_stream.go +++ b/transform/send_aws_kinesis_data_stream.go @@ -2,18 +2,24 @@ package transform import ( "context" + "crypto/md5" "encoding/json" "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/kinesis" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + rec "github.com/awslabs/kinesis-aggregation/go/v2/records" "github.com/google/uuid" + + //nolint: staticcheck // not ready to switch package + "github.com/golang/protobuf/proto" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Records greater than 1 MB in size cannot be @@ -27,8 +33,6 @@ const sendAWSKinesisDataStreamMessageSizeLimit = 1000 * 1000 var errSendAWSKinesisDataStreamMessageSizeLimit = fmt.Errorf("data exceeded size limit") type sendAWSKinesisDataStreamConfig struct { - // StreamName is the Kinesis Data Stream that records are sent to. - StreamName string `json:"stream_name"` // UseBatchKeyAsPartitionKey determines if the batch key should be used as the partition key. UseBatchKeyAsPartitionKey bool `json:"use_batch_key_as_partition_key"` // EnableRecordAggregation determines if records should be aggregated. @@ -40,7 +44,6 @@ type sendAWSKinesisDataStreamConfig struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSKinesisDataStreamConfig) Decode(in interface{}) error { @@ -48,14 +51,14 @@ func (c *sendAWSKinesisDataStreamConfig) Decode(in interface{}) error { } func (c *sendAWSKinesisDataStreamConfig) Validate() error { - if c.StreamName == "" { - return fmt.Errorf("stream_name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newSendAWSKinesisDataStream(_ context.Context, cfg config.Config) (*sendAWSKinesisDataStream, error) { +func newSendAWSKinesisDataStream(ctx context.Context, cfg config.Config) (*sendAWSKinesisDataStream, error) { conf := sendAWSKinesisDataStreamConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform send_aws_kinesis_data_stream: %v", err) @@ -73,10 +76,21 @@ func newSendAWSKinesisDataStream(_ context.Context, cfg config.Config) (*sendAWS conf: conf, } + // Kinesis Data Streams limits batch operations to 500 records. + count := 500 + if conf.Batch.Count > 0 && conf.Batch.Count <= count { + count = conf.Batch.Count + } + + // Kinesis Data Streams limits batch operations to 5MiB. + size := sendAWSKinesisDataStreamMessageSizeLimit * 5 + if conf.Batch.Size > 0 && conf.Batch.Size <= size { + size = conf.Batch.Size + } + agg, err := aggregate.New(aggregate.Config{ - // Kinesis Data Streams limits batch operations to 500 records and 5MiB. - Count: 500, - Size: sendAWSKinesisDataStreamMessageSizeLimit * 5, + Count: count, + Size: size, Duration: conf.Batch.Duration, }) if err != nil { @@ -96,22 +110,19 @@ func newSendAWSKinesisDataStream(_ context.Context, cfg config.Config) (*sendAWS } } - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = kinesis.NewFromConfig(awsCfg) return &tf, nil } type sendAWSKinesisDataStream struct { - conf sendAWSKinesisDataStreamConfig - - // client is safe for concurrent use. - client kinesis.API + conf sendAWSKinesisDataStreamConfig + client *kinesis.Client mu sync.Mutex agg *aggregate.Aggregate @@ -154,7 +165,7 @@ func (tf *sendAWSKinesisDataStream) Transform(ctx context.Context, msg *message. // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil @@ -187,7 +198,8 @@ func (tf *sendAWSKinesisDataStream) send(ctx context.Context, key string) error return nil } - if _, err := tf.client.PutRecords(ctx, tf.conf.StreamName, partitionKey, data); err != nil { + ctx = context.WithoutCancel(ctx) + if err := tf.putRecords(ctx, tf.conf.AWS.ARN, partitionKey, data); err != nil { return err } @@ -198,7 +210,7 @@ func (tf *sendAWSKinesisDataStream) aggregateRecords(partitionKey string, data [ var records [][]byte // Aggregation silently drops any data that is between ~0.9999 MB and 1 MB. - agg := &kinesis.Aggregate{} + agg := &sendAWSKinesisAggregate{} agg.New() for _, d := range data { @@ -218,3 +230,142 @@ func (tf *sendAWSKinesisDataStream) aggregateRecords(partitionKey string, data [ return records } + +func (tf *sendAWSKinesisDataStream) putRecords(ctx context.Context, streamName, partitionKey string, data [][]byte) error { + var entries []types.PutRecordsRequestEntry + for _, d := range data { + entries = append(entries, types.PutRecordsRequestEntry{ + Data: d, + PartitionKey: &partitionKey, + }) + } + + resp, err := tf.client.PutRecords(ctx, &kinesis.PutRecordsInput{ + Records: entries, + StreamARN: &streamName, + }) + if err != nil { + return err + } + + if resp.FailedRecordCount != nil && *resp.FailedRecordCount > 0 { + var retry [][]byte + + for i, r := range resp.Records { + if r.ErrorCode != nil { + retry = append(retry, data[i]) + } + } + + if len(retry) > 0 { + return tf.putRecords(ctx, streamName, partitionKey, retry) + } + } + + return nil +} + +// sendAWSKinesisAggregate produces a KPL-compliant Kinesis record +type sendAWSKinesisAggregate struct { + Record *rec.AggregatedRecord + Count int + Size int + PartitionKey string +} + +// New creates a new Kinesis record with default values +// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L167 +func (a *sendAWSKinesisAggregate) New() { + a.Record = &rec.AggregatedRecord{} + a.Count = 0 + a.Size = 0 + + a.PartitionKey = "" + a.Record.PartitionKeyTable = make([]string, 0) +} + +func varIntSize(i int) int { + if i == 0 { + return 1 + } + + var needed int + for i > 0 { + needed++ + i >>= 1 + } + + bytes := needed / 7 + if needed%7 > 0 { + bytes++ + } + + return bytes +} + +func (a *sendAWSKinesisAggregate) calculateRecordSize(data []byte, partitionKey string) int { + var recordSize int + // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L344-L349 + pkSize := 1 + varIntSize(len(partitionKey)) + len(partitionKey) + recordSize += pkSize + // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L362-L364 + pkiSize := 1 + varIntSize(a.Count) + recordSize += pkiSize + // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L371-L374 + dataSize := 1 + varIntSize(len(data)) + len(data) + recordSize += dataSize + // https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L376-L378 + recordSize = recordSize + 1 + varIntSize(pkiSize+dataSize) + + // input record size + current aggregated record size + 4 byte magic header + 16 byte MD5 digest + return recordSize + a.Record.XXX_Size() + 20 +} + +// Add inserts a Kinesis record into an aggregated Kinesis record +// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L382 +func (a *sendAWSKinesisAggregate) Add(data []byte, partitionKey string) bool { + // https://docs.aws.amazon.com/streams/latest/dev/key-concepts.html#partition-key + if len(partitionKey) > 256 { + partitionKey = partitionKey[0:256] + } + + // grab the first parition key in the set of events + if a.PartitionKey == "" { + a.PartitionKey = partitionKey + } + + // Verify the record size won't exceed the 1 MB limit of the Kinesis service. + // https://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html + if a.calculateRecordSize(data, partitionKey) > 1024*1024 { + return false + } + + pki := uint64(a.Count) + r := &rec.Record{ + PartitionKeyIndex: &pki, + Data: data, + } + + // Append the data to the aggregated record. + a.Record.Records = append(a.Record.Records, r) + a.Record.PartitionKeyTable = append(a.Record.PartitionKeyTable, partitionKey) + + // Update the record count and size. This is not used in the aggregated record. + a.Count++ + a.Size += a.calculateRecordSize(data, partitionKey) + + return true +} + +// Get returns a KPL-compliant compressed Kinesis record +// https://github.com/awslabs/kinesis-aggregation/blob/398fbd4b430d4bf590431b301d03cbbc94279cef/python/aws_kinesis_agg/aggregator.py#L293 +func (a *sendAWSKinesisAggregate) Get() []byte { + data, _ := proto.Marshal(a.Record) + md5Hash := md5.Sum(data) + + record := []byte("\xf3\x89\x9a\xc2") + record = append(record, data...) + record = append(record, md5Hash[:]...) + + return record +} diff --git a/transform/send_aws_lambda.go b/transform/send_aws_lambda.go index f8d174a3..3ebe24d4 100644 --- a/transform/send_aws_lambda.go +++ b/transform/send_aws_lambda.go @@ -6,13 +6,13 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/lambda" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/lambda" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Payloads greater than 256 KB in size cannot be @@ -25,8 +25,6 @@ const sendLambdaPayloadSizeLimit = 1024 * 1024 * 256 var errSendLambdaPayloadSizeLimit = fmt.Errorf("data exceeded size limit") type sendAWSLambdaConfig struct { - // FunctionName is the AWS Lambda function to asynchronously invoke. - FunctionName string `json:"function_name"` // AuxTransforms are applied to batched data before it is sent. AuxTransforms []config.Config `json:"auxiliary_transforms"` @@ -34,7 +32,6 @@ type sendAWSLambdaConfig struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSLambdaConfig) Decode(in interface{}) error { @@ -42,14 +39,14 @@ func (c *sendAWSLambdaConfig) Decode(in interface{}) error { } func (c *sendAWSLambdaConfig) Validate() error { - if c.FunctionName == "" { - return fmt.Errorf("function_name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newSendAWSLambda(_ context.Context, cfg config.Config) (*sendAWSLambda, error) { +func newSendAWSLambda(ctx context.Context, cfg config.Config) (*sendAWSLambda, error) { conf := sendAWSLambdaConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform send_aws_lambda: %v", err) @@ -64,8 +61,7 @@ func newSendAWSLambda(_ context.Context, cfg config.Config) (*sendAWSLambda, err } tf := sendAWSLambda{ - conf: conf, - function: conf.FunctionName, + conf: conf, } agg, err := aggregate.New(aggregate.Config{ @@ -90,23 +86,19 @@ func newSendAWSLambda(_ context.Context, cfg config.Config) (*sendAWSLambda, err } } - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = lambda.NewFromConfig(awsCfg) return &tf, nil } type sendAWSLambda struct { - conf sendAWSLambdaConfig - function string - - // client is safe for concurrent use. - client lambda.API + conf sendAWSLambdaConfig + client *lambda.Client mu sync.Mutex agg *aggregate.Aggregate @@ -149,7 +141,7 @@ func (tf *sendAWSLambda) Transform(ctx context.Context, msg *message.Message) ([ // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil @@ -161,8 +153,13 @@ func (tf *sendAWSLambda) send(ctx context.Context, key string) error { return err } - for _, b := range data { - if _, err := tf.client.InvokeAsync(ctx, tf.function, b); err != nil { + ctx = context.WithoutCancel(ctx) + for _, d := range data { + if _, err := tf.client.Invoke(ctx, &lambda.InvokeInput{ + FunctionName: &tf.conf.AWS.ARN, + Payload: d, + InvocationType: "Event", // Asynchronous invocation. + }); err != nil { return err } } diff --git a/transform/send_aws_s3.go b/transform/send_aws_s3.go index ba92e958..92a7a868 100644 --- a/transform/send_aws_s3.go +++ b/transform/send_aws_s3.go @@ -5,24 +5,23 @@ import ( "encoding/json" "fmt" "os" - "slices" "sync" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go/aws/arn" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/s3manager" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/file" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" + "github.com/brexhq/substation/v2/internal/media" ) type sendAWSS3Config struct { - // BucketName is the AWS S3 bucket that data is written to. - BucketName string `json:"bucket_name"` // StorageClass is the storage class of the object. StorageClass string `json:"storage_class"` // FilePath determines how the name of the uploaded object is constructed. @@ -37,7 +36,6 @@ type sendAWSS3Config struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSS3Config) Decode(in interface{}) error { @@ -45,18 +43,18 @@ func (c *sendAWSS3Config) Decode(in interface{}) error { } func (c *sendAWSS3Config) Validate() error { - if c.BucketName == "" { - return fmt.Errorf("bucket_name: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } - if !slices.Contains(s3.StorageClass_Values(), c.StorageClass) { - return fmt.Errorf("storage_class: %v", errors.ErrInvalidOption) + if types.StorageClass(c.StorageClass) == "" { + return fmt.Errorf("storage class: %v", iconfig.ErrInvalidOption) } return nil } -func newSendAWSS3(_ context.Context, cfg config.Config) (*sendAWSS3, error) { +func newSendAWSS3(ctx context.Context, cfg config.Config) (*sendAWSS3, error) { conf := sendAWSS3Config{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform send_aws_s3: %v", err) @@ -66,10 +64,6 @@ func newSendAWSS3(_ context.Context, cfg config.Config) (*sendAWSS3, error) { conf.ID = "send_aws_s3" } - if conf.StorageClass == "" { - conf.StorageClass = "STANDARD" - } - if err := conf.Validate(); err != nil { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } @@ -78,6 +72,21 @@ func newSendAWSS3(_ context.Context, cfg config.Config) (*sendAWSS3, error) { conf: conf, } + // Extracts the bucket name from the ARN. + // The ARN is in the format: arn:aws:s3:::bucket-name + a, err := arn.Parse(conf.AWS.ARN) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.bucket = a.Resource + + if conf.StorageClass == "" { + tf.sclass = types.StorageClassStandard + } else { + tf.sclass = types.StorageClass(conf.StorageClass) + } + agg, err := aggregate.New(aggregate.Config{ Count: conf.Batch.Count, Size: conf.Batch.Size, @@ -100,22 +109,22 @@ func newSendAWSS3(_ context.Context, cfg config.Config) (*sendAWSS3, error) { } } - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + c := s3.NewFromConfig(awsCfg) + tf.client = manager.NewUploader(c) return &tf, nil } type sendAWSS3 struct { - conf sendAWSS3Config - - // client is safe for concurrent use. - client s3manager.UploaderAPI + conf sendAWSS3Config + client *manager.Uploader + bucket string + sclass types.StorageClass mu sync.Mutex agg *aggregate.Aggregate @@ -154,7 +163,7 @@ func (tf *sendAWSS3) Transform(ctx context.Context, msg *message.Message) ([]*me // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil @@ -205,7 +214,23 @@ func (tf *sendAWSS3) send(ctx context.Context, key string) error { } defer f.Close() - if _, err := tf.client.Upload(ctx, tf.conf.BucketName, filePath, tf.conf.StorageClass, f); err != nil { + mediaType, err := media.File(f) + if err != nil { + return err + } + + if _, err := f.Seek(0, 0); err != nil { + return err + } + + ctx = context.WithoutCancel(ctx) + if _, err := tf.client.Upload(ctx, &s3.PutObjectInput{ + Bucket: &tf.bucket, + Key: &filePath, + Body: f, + StorageClass: tf.sclass, + ContentType: &mediaType, + }); err != nil { return err } diff --git a/transform/send_aws_sns.go b/transform/send_aws_sns.go index 50ce7668..95a31117 100644 --- a/transform/send_aws_sns.go +++ b/transform/send_aws_sns.go @@ -4,15 +4,20 @@ import ( "context" "encoding/json" "fmt" + "strconv" + "strings" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/sns" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/sns" + "github.com/aws/aws-sdk-go-v2/service/sns/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/google/uuid" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Records greater than 256 KB in size cannot be @@ -25,8 +30,6 @@ const sendAWSSNSMessageSizeLimit = 1024 * 1024 * 256 var errSendAWSSNSMessageSizeLimit = fmt.Errorf("data exceeded size limit") type sendAWSSNSConfig struct { - // ARN is the AWS SNS topic ARN that messages are sent to. - ARN string `json:"arn"` // AuxTransforms are applied to batched data before it is sent. AuxTransforms []config.Config `json:"auxiliary_transforms"` @@ -34,7 +37,6 @@ type sendAWSSNSConfig struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSSNSConfig) Decode(in interface{}) error { @@ -42,14 +44,14 @@ func (c *sendAWSSNSConfig) Decode(in interface{}) error { } func (c *sendAWSSNSConfig) Validate() error { - if c.ARN == "" { - return fmt.Errorf("topic: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newSendAWSSNS(_ context.Context, cfg config.Config) (*sendAWSSNS, error) { +func newSendAWSSNS(ctx context.Context, cfg config.Config) (*sendAWSSNS, error) { conf := sendAWSSNSConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform send_aws_sns: %v", err) @@ -67,19 +69,28 @@ func newSendAWSSNS(_ context.Context, cfg config.Config) (*sendAWSSNS, error) { conf: conf, } - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = sns.NewFromConfig(awsCfg) + + // SNS limits batch operations to 10 messages. + count := 10 + if conf.Batch.Count > 0 && conf.Batch.Count <= count { + count = conf.Batch.Count + } + + // SNS limits batch operations to 256 KB. + size := sendAWSSNSMessageSizeLimit + if conf.Batch.Size > 0 && conf.Batch.Size <= size { + size = conf.Batch.Size + } agg, err := aggregate.New(aggregate.Config{ - // SQS limits batch operations to 10 messages. - Count: 10, - // SNS limits batch operations to 256 KB. - Size: sendAWSSNSMessageSizeLimit, + Count: count, + Size: size, Duration: conf.Batch.Duration, }) if err != nil { @@ -103,10 +114,8 @@ func newSendAWSSNS(_ context.Context, cfg config.Config) (*sendAWSSNS, error) { } type sendAWSSNS struct { - conf sendAWSSNSConfig - - // client is safe for concurrent use. - client sns.API + conf sendAWSSNSConfig + client *sns.Client mu sync.Mutex agg *aggregate.Aggregate @@ -149,7 +158,7 @@ func (tf *sendAWSSNS) Transform(ctx context.Context, msg *message.Message) ([]*m // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil } @@ -165,9 +174,51 @@ func (tf *sendAWSSNS) send(ctx context.Context, key string) error { return err } - if _, err := tf.client.PublishBatch(ctx, tf.conf.ARN, data); err != nil { + ctx = context.WithoutCancel(ctx) + return tf.sendMessages(ctx, data) +} + +func (tf *sendAWSSNS) sendMessages(ctx context.Context, data [][]byte) error { + mgid := uuid.New().String() + + entries := make([]types.PublishBatchRequestEntry, 0, len(data)) + for idx, d := range data { + entry := types.PublishBatchRequestEntry{ + Id: aws.String(strconv.Itoa(idx)), + Message: aws.String(string(d)), + } + + if strings.HasSuffix(tf.conf.AWS.ARN, ".fifo") { + entry.MessageGroupId = aws.String(mgid) + } + + entries = append(entries, entry) + } + + ctx = context.WithoutCancel(ctx) + resp, err := tf.client.PublishBatch(ctx, &sns.PublishBatchInput{ + PublishBatchRequestEntries: entries, + TopicArn: aws.String(tf.conf.AWS.ARN), + }) + if err != nil { return err } + if resp.Failed != nil { + var retry [][]byte + for _, r := range resp.Failed { + idx, err := strconv.Atoi(aws.StringValue(r.Id)) + if err != nil { + return err + } + + retry = append(retry, data[idx]) + } + + if len(retry) > 0 { + return tf.sendMessages(ctx, retry) + } + } + return nil } diff --git a/transform/send_aws_sqs.go b/transform/send_aws_sqs.go index bcc664c2..092d71fe 100644 --- a/transform/send_aws_sqs.go +++ b/transform/send_aws_sqs.go @@ -4,16 +4,21 @@ import ( "context" "encoding/json" "fmt" + "strconv" "strings" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - "github.com/brexhq/substation/internal/aws" - "github.com/brexhq/substation/internal/aws/sqs" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/google/uuid" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) // Records greater than 256 KB in size cannot be @@ -26,8 +31,6 @@ const sendSQSMessageSizeLimit = 1024 * 1024 * 256 var errSendSQSMessageSizeLimit = fmt.Errorf("data exceeded size limit") type sendAWSSQSConfig struct { - // ARN is the AWS SNS topic ARN that messages are sent to. - ARN string `json:"arn"` // AuxTransforms are applied to batched data before it is sent. AuxTransforms []config.Config `json:"auxiliary_transforms"` @@ -35,7 +38,6 @@ type sendAWSSQSConfig struct { Object iconfig.Object `json:"object"` Batch iconfig.Batch `json:"batch"` AWS iconfig.AWS `json:"aws"` - Retry iconfig.Retry `json:"retry"` } func (c *sendAWSSQSConfig) Decode(in interface{}) error { @@ -43,14 +45,14 @@ func (c *sendAWSSQSConfig) Decode(in interface{}) error { } func (c *sendAWSSQSConfig) Validate() error { - if c.ARN == "" { - return fmt.Errorf("arn: %v", errors.ErrMissingRequiredOption) + if c.AWS.ARN == "" { + return fmt.Errorf("aws.arn: %v", iconfig.ErrMissingRequiredOption) } return nil } -func newSendAWSSQS(_ context.Context, cfg config.Config) (*sendAWSSQS, error) { +func newSendAWSSQS(ctx context.Context, cfg config.Config) (*sendAWSSQS, error) { conf := sendAWSSQSConfig{} if err := conf.Decode(cfg.Settings); err != nil { return nil, fmt.Errorf("transform send_aws_sqs: %v", err) @@ -64,31 +66,44 @@ func newSendAWSSQS(_ context.Context, cfg config.Config) (*sendAWSSQS, error) { return nil, fmt.Errorf("transform %s: %v", conf.ID, err) } - // arn:aws:sqs:region:account_id:queue_name - arn := strings.Split(conf.ARN, ":") tf := sendAWSSQS{ conf: conf, - queueURL: fmt.Sprintf( - "https://sqs.%s.amazonaws.com/%s/%s", - arn[3], - arn[4], - arn[5], - ), - } - - // Setup the AWS client. - tf.client.Setup(aws.Config{ - Region: conf.AWS.Region, - RoleARN: conf.AWS.RoleARN, - MaxRetries: conf.Retry.Count, - RetryableErrors: conf.Retry.ErrorMessages, - }) + } + + arn, err := arn.Parse(conf.AWS.ARN) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.queueURL = fmt.Sprintf( + "https://sqs.%s.amazonaws.com/%s/%s", + arn.Region, + arn.AccountID, + arn.Resource, + ) + + awsCfg, err := iconfig.NewAWS(ctx, conf.AWS) + if err != nil { + return nil, fmt.Errorf("transform %s: %v", conf.ID, err) + } + + tf.client = sqs.NewFromConfig(awsCfg) + + // SQS limits batch operations to 10 messages. + count := 10 + if conf.Batch.Count > 0 && conf.Batch.Count <= count { + count = conf.Batch.Count + } + + // SQS limits batch operations to 256 KB. + size := sendSQSMessageSizeLimit + if conf.Batch.Size > 0 && conf.Batch.Size <= size { + size = conf.Batch.Size + } agg, err := aggregate.New(aggregate.Config{ - // SQS limits batch operations to 10 messages. - Count: 10, - // SQS limits batch operations to 256 KB. - Size: sendSQSMessageSizeLimit, + Count: count, + Size: size, Duration: conf.Batch.Duration, }) if err != nil { @@ -114,9 +129,7 @@ func newSendAWSSQS(_ context.Context, cfg config.Config) (*sendAWSSQS, error) { type sendAWSSQS struct { conf sendAWSSQSConfig queueURL string - - // client is safe for concurrent use. - client sqs.API + client *sqs.Client mu sync.Mutex agg *aggregate.Aggregate @@ -159,7 +172,7 @@ func (tf *sendAWSSQS) Transform(ctx context.Context, msg *message.Message) ([]*m // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil } @@ -175,9 +188,50 @@ func (tf *sendAWSSQS) send(ctx context.Context, key string) error { return err } - if _, err := tf.client.SendMessageBatch(ctx, tf.queueURL, data); err != nil { + ctx = context.WithoutCancel(ctx) + return tf.sendMessages(ctx, data) +} + +func (tf *sendAWSSQS) sendMessages(ctx context.Context, data [][]byte) error { + mgid := uuid.New().String() + + entries := make([]types.SendMessageBatchRequestEntry, 0, len(data)) + for idx, d := range data { + entry := types.SendMessageBatchRequestEntry{ + Id: aws.String(strconv.Itoa(idx)), + MessageBody: aws.String(string(d)), + } + + if strings.HasSuffix(tf.queueURL, ".fifo") { + entry.MessageGroupId = aws.String(mgid) + } + + entries = append(entries, entry) + } + + resp, err := tf.client.SendMessageBatch(ctx, &sqs.SendMessageBatchInput{ + Entries: entries, + QueueUrl: aws.String(tf.queueURL), + }) + if err != nil { return err } + if resp.Failed != nil { + var retry [][]byte + for _, r := range resp.Failed { + idx, err := strconv.Atoi(aws.StringValue(r.Id)) + if err != nil { + return err + } + + retry = append(retry, data[idx]) + } + + if len(retry) > 0 { + return tf.sendMessages(ctx, retry) + } + } + return nil } diff --git a/transform/send_file.go b/transform/send_file.go index b027964d..03d94aa0 100644 --- a/transform/send_file.go +++ b/transform/send_file.go @@ -8,11 +8,12 @@ import ( "path/filepath" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/file" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/file" ) type sendFileConfig struct { @@ -120,7 +121,7 @@ func (tf *sendFile) Transform(ctx context.Context, msg *message.Message) ([]*mes // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil diff --git a/transform/send_http_post.go b/transform/send_http_post.go index 3fe42d5a..c8666ebc 100644 --- a/transform/send_http_post.go +++ b/transform/send_http_post.go @@ -8,13 +8,13 @@ import ( "os" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/http" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/http" + "github.com/brexhq/substation/v2/internal/secrets" ) type sendHTTPPostConfig struct { @@ -38,7 +38,7 @@ func (c *sendHTTPPostConfig) Decode(in interface{}) error { func (c *sendHTTPPostConfig) Validate() error { if c.URL == "" { - return fmt.Errorf("url: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("url: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -135,7 +135,7 @@ func (tf *sendHTTPPost) Transform(ctx context.Context, msg *message.Message) ([] // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil diff --git a/transform/send_stdout.go b/transform/send_stdout.go index 2c8f53fd..0b24dd4c 100644 --- a/transform/send_stdout.go +++ b/transform/send_stdout.go @@ -6,10 +6,11 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type sendStdoutConfig struct { @@ -104,7 +105,7 @@ func (tf *sendStdout) Transform(ctx context.Context, msg *message.Message) ([]*m // If data cannot be added after reset, then the batch is misconfgured. tf.agg.Reset(key) if ok := tf.agg.Add(key, msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } return []*message.Message{msg}, nil diff --git a/transform/string.go b/transform/string.go index 171e917f..18e26565 100644 --- a/transform/string.go +++ b/transform/string.go @@ -3,8 +3,7 @@ package transform import ( "fmt" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type strCaseConfig struct { @@ -18,11 +17,11 @@ func (c *strCaseConfig) Decode(in interface{}) error { func (c *strCaseConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/string_append.go b/transform/string_append.go index 1af277c4..74773626 100644 --- a/transform/string_append.go +++ b/transform/string_append.go @@ -5,10 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringAppendConfig struct { @@ -25,15 +25,15 @@ func (c *stringAppendConfig) Decode(in interface{}) error { func (c *stringAppendConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Suffix == "" { - return fmt.Errorf("suffix: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("suffix: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/string_append_test.go b/transform/string_append_test.go index 2c6b124c..7bdbc9aa 100644 --- a/transform/string_append_test.go +++ b/transform/string_append_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringAppend{} diff --git a/transform/string_capture.go b/transform/string_capture.go index 4d5d9cfe..6386a762 100644 --- a/transform/string_capture.go +++ b/transform/string_capture.go @@ -7,10 +7,10 @@ import ( "regexp" "strings" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringCaptureConfig struct { @@ -35,15 +35,15 @@ func (c *stringCaptureConfig) Decode(in interface{}) error { func (c *stringCaptureConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Pattern == "" { - return fmt.Errorf("pattern: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("pattern: %v", iconfig.ErrMissingRequiredOption) } re, err := regexp.Compile(c.Pattern) diff --git a/transform/string_capture_test.go b/transform/string_capture_test.go index 186b5051..15f5d4bf 100644 --- a/transform/string_capture_test.go +++ b/transform/string_capture_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringCapture{} diff --git a/transform/string_replace.go b/transform/string_replace.go index 6d2d3f1b..b3477dd3 100644 --- a/transform/string_replace.go +++ b/transform/string_replace.go @@ -6,10 +6,10 @@ import ( "fmt" "regexp" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringReplaceConfig struct { @@ -29,15 +29,15 @@ func (c *stringReplaceConfig) Decode(in interface{}) error { func (c *stringReplaceConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Pattern == "" { - return fmt.Errorf("old: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("old: %v", iconfig.ErrMissingRequiredOption) } re, err := regexp.Compile(c.Pattern) diff --git a/transform/string_replace_test.go b/transform/string_replace_test.go index 9cbd529d..3839fdd1 100644 --- a/transform/string_replace_test.go +++ b/transform/string_replace_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringReplace{} diff --git a/transform/string_split.go b/transform/string_split.go index 330482c2..9ac1fa21 100644 --- a/transform/string_split.go +++ b/transform/string_split.go @@ -7,10 +7,10 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringSplitConfig struct { @@ -27,15 +27,15 @@ func (c *stringSplitConfig) Decode(in interface{}) error { func (c *stringSplitConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Separator == "" { - return fmt.Errorf("separator: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("separator: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/string_split_test.go b/transform/string_split_test.go index 99706efe..3ee3cd7c 100644 --- a/transform/string_split_test.go +++ b/transform/string_split_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringSplit{} diff --git a/transform/string_to_lower.go b/transform/string_to_lower.go index 19673794..6537e489 100644 --- a/transform/string_to_lower.go +++ b/transform/string_to_lower.go @@ -7,8 +7,8 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringToLower(_ context.Context, cfg config.Config) (*stringToLower, error) { diff --git a/transform/string_to_lower_test.go b/transform/string_to_lower_test.go index 5f3a5fd3..bc854a34 100644 --- a/transform/string_to_lower_test.go +++ b/transform/string_to_lower_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringToLower{} diff --git a/transform/string_to_snake.go b/transform/string_to_snake.go index 7683e591..5209929b 100644 --- a/transform/string_to_snake.go +++ b/transform/string_to_snake.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" "github.com/iancoleman/strcase" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringToSnake(_ context.Context, cfg config.Config) (*stringToSnake, error) { diff --git a/transform/string_to_snake_test.go b/transform/string_to_snake_test.go index a4954527..1a529abf 100644 --- a/transform/string_to_snake_test.go +++ b/transform/string_to_snake_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringToSnake{} diff --git a/transform/string_to_upper.go b/transform/string_to_upper.go index 983cd1b7..465592fe 100644 --- a/transform/string_to_upper.go +++ b/transform/string_to_upper.go @@ -7,8 +7,8 @@ import ( "fmt" "strings" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newStringToUpper(_ context.Context, cfg config.Config) (*stringToUpper, error) { diff --git a/transform/string_to_upper_test.go b/transform/string_to_upper_test.go index 029e1257..55ec95f9 100644 --- a/transform/string_to_upper_test.go +++ b/transform/string_to_upper_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &stringToUpper{} diff --git a/transform/string_uuid.go b/transform/string_uuid.go index 59380bd2..db0cd25c 100644 --- a/transform/string_uuid.go +++ b/transform/string_uuid.go @@ -5,10 +5,12 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" "github.com/google/uuid" + + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type stringUUIDConfig struct { diff --git a/transform/time.go b/transform/time.go index a1a91c0f..4a031d06 100644 --- a/transform/time.go +++ b/transform/time.go @@ -4,8 +4,7 @@ import ( "fmt" "time" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" + iconfig "github.com/brexhq/substation/v2/internal/config" ) const ( @@ -23,11 +22,11 @@ func (c *timeUnixConfig) Decode(in interface{}) error { func (c *timeUnixConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } return nil @@ -47,15 +46,15 @@ func (c *timePatternConfig) Decode(in interface{}) error { func (c *timePatternConfig) Validate() error { if c.Object.SourceKey == "" && c.Object.TargetKey != "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey != "" && c.Object.TargetKey == "" { - return fmt.Errorf("object_target_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_target_key: %v", iconfig.ErrMissingRequiredOption) } if c.Format == "" { - return fmt.Errorf("format: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("format: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/time_from_string.go b/transform/time_from_string.go index 4b3b14e0..08d41d6f 100644 --- a/transform/time_from_string.go +++ b/transform/time_from_string.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeFromString(_ context.Context, cfg config.Config) (*timeFromString, error) { diff --git a/transform/time_from_string_test.go b/transform/time_from_string_test.go index e8420ed2..d24f13cf 100644 --- a/transform/time_from_string_test.go +++ b/transform/time_from_string_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeFromString{} diff --git a/transform/time_from_unix.go b/transform/time_from_unix.go index 60fb5654..bfbf3bee 100644 --- a/transform/time_from_unix.go +++ b/transform/time_from_unix.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeFromUnix(_ context.Context, cfg config.Config) (*timeFromUnix, error) { diff --git a/transform/time_from_unix_milli.go b/transform/time_from_unix_milli.go index 69191c48..972671d1 100644 --- a/transform/time_from_unix_milli.go +++ b/transform/time_from_unix_milli.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeFromUnixMilli(_ context.Context, cfg config.Config) (*timeFromUnixMilli, error) { diff --git a/transform/time_from_unix_milli_test.go b/transform/time_from_unix_milli_test.go index 6320eb1f..22d38df8 100644 --- a/transform/time_from_unix_milli_test.go +++ b/transform/time_from_unix_milli_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeFromUnix{} diff --git a/transform/time_from_unix_test.go b/transform/time_from_unix_test.go index 1e0870bd..b040f353 100644 --- a/transform/time_from_unix_test.go +++ b/transform/time_from_unix_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeFromUnix{} diff --git a/transform/time_now.go b/transform/time_now.go index 6eae9579..3c9ae046 100644 --- a/transform/time_now.go +++ b/transform/time_now.go @@ -6,9 +6,10 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type timeNowConfig struct { diff --git a/transform/time_to_string.go b/transform/time_to_string.go index eb2f3d89..3d9f369a 100644 --- a/transform/time_to_string.go +++ b/transform/time_to_string.go @@ -5,8 +5,8 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeToString(_ context.Context, cfg config.Config) (*timeToString, error) { diff --git a/transform/time_to_string_test.go b/transform/time_to_string_test.go index 6588c8bc..437b9451 100644 --- a/transform/time_to_string_test.go +++ b/transform/time_to_string_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeToString{} diff --git a/transform/time_to_unix.go b/transform/time_to_unix.go index cd054be2..1c56156f 100644 --- a/transform/time_to_unix.go +++ b/transform/time_to_unix.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeToUnix(_ context.Context, cfg config.Config) (*timeToUnix, error) { diff --git a/transform/time_to_unix_milli.go b/transform/time_to_unix_milli.go index 573b3fa1..f39adab3 100644 --- a/transform/time_to_unix_milli.go +++ b/transform/time_to_unix_milli.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) func newTimeToUnixMilli(_ context.Context, cfg config.Config) (*timeToUnixMilli, error) { diff --git a/transform/time_to_unix_milli_test.go b/transform/time_to_unix_milli_test.go index 3e3b5b46..b3660a99 100644 --- a/transform/time_to_unix_milli_test.go +++ b/transform/time_to_unix_milli_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeToUnixMilli{} diff --git a/transform/time_to_unix_test.go b/transform/time_to_unix_test.go index dd33dcc0..6271ffc5 100644 --- a/transform/time_to_unix_test.go +++ b/transform/time_to_unix_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var _ Transformer = &timeToUnix{} diff --git a/transform/transform.go b/transform/transform.go index 650da77c..7aa22479 100644 --- a/transform/transform.go +++ b/transform/transform.go @@ -6,9 +6,10 @@ import ( "fmt" "math" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) var errMsgInvalidObject = fmt.Errorf("message must be JSON object") @@ -40,8 +41,8 @@ func New(ctx context.Context, cfg config.Config) (Transformer, error) { //nolint case "array_zip": return newArrayZip(ctx, cfg) // Enrichment transforms. - case "enrich_aws_dynamodb": - return newEnrichAWSDynamoDB(ctx, cfg) + case "enrich_aws_dynamodb_query": + return newEnrichAWSDynamoDBQuery(ctx, cfg) case "enrich_aws_lambda": return newEnrichAWSLambda(ctx, cfg) case "enrich_dns_ip_lookup": @@ -93,8 +94,6 @@ func New(ctx context.Context, cfg config.Config) (Transformer, error) { //nolint return newMetaKVStoreLock(ctx, cfg) case "meta_metric_duration": return newMetaMetricsDuration(ctx, cfg) - case "meta_pipeline": - return newMetaPipeline(ctx, cfg) case "meta_retry": return newMetaRetry(ctx, cfg) case "meta_switch": @@ -139,12 +138,12 @@ func New(ctx context.Context, cfg config.Config) (Transformer, error) { //nolint case "object_to_unsigned_integer": return newObjectToUnsignedInteger(ctx, cfg) // Send transforms. - case "send_aws_dynamodb": - return newSendAWSDynamoDB(ctx, cfg) + case "send_aws_dynamodb_put": + return newSendAWSDynamoDBPut(ctx, cfg) case "send_aws_eventbridge": return newSendAWSEventBridge(ctx, cfg) - case "send_aws_kinesis_data_firehose": - return newSendAWSKinesisDataFirehose(ctx, cfg) + case "send_aws_data_firehose": + return newSendAWSDataFirehose(ctx, cfg) case "send_aws_kinesis_data_stream": return newSendAWSKinesisDataStream(ctx, cfg) case "send_aws_lambda": @@ -211,7 +210,7 @@ func New(ctx context.Context, cfg config.Config) (Transformer, error) { //nolint case "utility_secret": return newUtilitySecret(ctx, cfg) default: - return nil, fmt.Errorf("transform %s: %w", cfg.Type, errors.ErrInvalidFactoryInput) + return nil, fmt.Errorf("transform %s: %w", cfg.Type, iconfig.ErrInvalidFactoryInput) } } diff --git a/transform/transform_example_test.go b/transform/transform_example_test.go index 4f5b45a6..becfce36 100644 --- a/transform/transform_example_test.go +++ b/transform/transform_example_test.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" - "github.com/brexhq/substation/transform" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + "github.com/brexhq/substation/v2/transform" ) func ExampleTransformer() { diff --git a/transform/transform_test.go b/transform/transform_test.go index 888297fb..fc8d9c13 100644 --- a/transform/transform_test.go +++ b/transform/transform_test.go @@ -5,8 +5,8 @@ import ( "reflect" "testing" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" ) var transformTests = []struct { diff --git a/transform/utility_control.go b/transform/utility_control.go index 4621b972..05c64bd7 100644 --- a/transform/utility_control.go +++ b/transform/utility_control.go @@ -6,10 +6,11 @@ import ( "fmt" "sync" - "github.com/brexhq/substation/config" - "github.com/brexhq/substation/internal/aggregate" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + "github.com/brexhq/substation/v2/internal/aggregate" + iconfig "github.com/brexhq/substation/v2/internal/config" ) type utilityControlConfig struct { @@ -73,11 +74,11 @@ func (tf *utilityControl) Transform(_ context.Context, msg *message.Message) ([] tf.agg.Reset("") if ok := tf.agg.Add("", msg.Data()); !ok { - return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errSendBatchMisconfigured) + return nil, fmt.Errorf("transform %s: %v", tf.conf.ID, errBatchNoMoreData) } ctrl := message.New().AsControl() - return []*message.Message{msg, ctrl}, nil + return []*message.Message{ctrl, msg}, nil } func (tf *utilityControl) String() string { diff --git a/transform/utility_delay.go b/transform/utility_delay.go index 5793600e..88037809 100644 --- a/transform/utility_delay.go +++ b/transform/utility_delay.go @@ -6,10 +6,10 @@ import ( "fmt" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type utilityDelayConfig struct { @@ -25,7 +25,7 @@ func (c *utilityDelayConfig) Decode(in interface{}) error { func (c *utilityDelayConfig) Validate() error { if c.Duration == "" { - return fmt.Errorf("duration: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("duration: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/utility_drop.go b/transform/utility_drop.go index 24a888c0..19b18e6b 100644 --- a/transform/utility_drop.go +++ b/transform/utility_drop.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type utilityDropConfig struct { diff --git a/transform/utility_err.go b/transform/utility_err.go index f7e10c11..34d7dae1 100644 --- a/transform/utility_err.go +++ b/transform/utility_err.go @@ -5,9 +5,10 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" ) type utilityErrConfig struct { diff --git a/transform/utility_metric_bytes.go b/transform/utility_metric_bytes.go index 8d91224e..20362865 100644 --- a/transform/utility_metric_bytes.go +++ b/transform/utility_metric_bytes.go @@ -6,10 +6,11 @@ import ( "fmt" "sync/atomic" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/metrics" ) type utilityMetricBytesConfig struct { diff --git a/transform/utility_metric_count.go b/transform/utility_metric_count.go index 1e283a30..b87d882c 100644 --- a/transform/utility_metric_count.go +++ b/transform/utility_metric_count.go @@ -6,10 +6,11 @@ import ( "fmt" "sync/atomic" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/metrics" ) type utilityMetricsCountConfig struct { diff --git a/transform/utility_metric_freshness.go b/transform/utility_metric_freshness.go index 8ac4f088..9bfd4f8c 100644 --- a/transform/utility_metric_freshness.go +++ b/transform/utility_metric_freshness.go @@ -7,11 +7,11 @@ import ( "sync/atomic" "time" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/errors" - "github.com/brexhq/substation/internal/metrics" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/metrics" ) type utilityMetricFreshnessConfig struct { @@ -28,11 +28,11 @@ func (c *utilityMetricFreshnessConfig) Decode(in interface{}) error { func (c *utilityMetricFreshnessConfig) Validate() error { if c.Threshold == "" { - return fmt.Errorf("threshold: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("threshold: %v", iconfig.ErrMissingRequiredOption) } if c.Object.SourceKey == "" { - return fmt.Errorf("object_source_key: %v", errors.ErrMissingRequiredOption) + return fmt.Errorf("object_source_key: %v", iconfig.ErrMissingRequiredOption) } return nil diff --git a/transform/utility_secret.go b/transform/utility_secret.go index 60903322..8febae63 100644 --- a/transform/utility_secret.go +++ b/transform/utility_secret.go @@ -5,10 +5,11 @@ import ( "encoding/json" "fmt" - "github.com/brexhq/substation/config" - iconfig "github.com/brexhq/substation/internal/config" - "github.com/brexhq/substation/internal/secrets" - "github.com/brexhq/substation/message" + "github.com/brexhq/substation/v2/config" + "github.com/brexhq/substation/v2/message" + + iconfig "github.com/brexhq/substation/v2/internal/config" + "github.com/brexhq/substation/v2/internal/secrets" ) type utilitySecretConfig struct {