diff --git a/.github/codeowners b/.github/codeowners new file mode 100644 index 0000000..41a97be --- /dev/null +++ b/.github/codeowners @@ -0,0 +1,2 @@ +* @xDarksome +* @xav diff --git a/.github/workflows/dispatch_deploy.yml b/.github/workflows/dispatch_deploy.yml new file mode 100644 index 0000000..93ebd43 --- /dev/null +++ b/.github/workflows/dispatch_deploy.yml @@ -0,0 +1,79 @@ +name: ⚙️ Deploy +run-name: "Deploy: ${{ github.sha }} ➠ ${{ inputs.version }}${{ (!inputs.deploy-infra && !inputs.deploy-app) && ' 👀 deploy nothing' || ''}}${{ inputs.deploy-infra && ' ❱❱ infra' || '' }}${{ inputs.deploy-app && ' ❱❱ app' || '' }}" + +on: + workflow_dispatch: + inputs: + deploy-infra: + description: "Deploy Infra" + default: true + required: true + type: boolean + deploy-app: + description: "Deploy App" + default: true + required: true + type: boolean + stage: + description: 'Target Environment' + type: choice + options: + - staging + - prod + default: staging + required: true + version: + description: "Release Version" + type: string + default: '-current-' + +concurrency: deploy + +permissions: + contents: write + checks: write + id-token: write + packages: write + +jobs: + get_deployed_version: + name: Lookup Version + if: ${{ !inputs.deploy-app && inputs.version == '-current-' }} + secrets: inherit + uses: WalletConnect/ci_workflows/.github/workflows/release-get_deployed_version.yml@0.1.3 + with: + task-name-stage: prod + task-name: ${{ vars.TASK_NAME }} + aws-region: ${{ vars.AWS_REGION }} + aws-role-arn: ${{vars.AWS_ROLE_PROD}} + run-group: ${{ vars.RUN_GROUP }} + + select_version: + name: Select Version + needs: [ get_deployed_version ] + if: ${{ always() && !cancelled() && !failure() }} + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - name: Select target version + id: select_version + run: | + if [ "${{ inputs.deploy-app }}" != "true" ] && [ "${{ inputs.version }}" == "-current-" ]; then + echo "version=${{ needs.get_deployed_version.outputs.version }}" >> "$GITHUB_OUTPUT" + else + echo "version=${{ inputs.version }}" >> "$GITHUB_OUTPUT" + fi + outputs: + version: ${{ steps.select_version.outputs.version }} + + cd: + name: CD + uses: ./.github/workflows/sub-cd.yml + needs: [ select_version ] + if: ${{ always() && !cancelled() && !failure() }} + secrets: inherit + with: + deploy-infra: ${{ inputs.deploy-infra }} + deploy-app: ${{ inputs.deploy-app }} + deploy-prod: ${{ inputs.stage == 'prod' }} + version: ${{ needs.select_version .outputs.version }} diff --git a/.github/workflows/dispatch_publish.yml b/.github/workflows/dispatch_publish.yml new file mode 100644 index 0000000..414dea8 --- /dev/null +++ b/.github/workflows/dispatch_publish.yml @@ -0,0 +1,52 @@ +name: ⚙️ Publish +run-name: "Publish: ${{ github.sha }}${{ inputs.deploy-to != 'none' && format(' ❱❱ {0}', inputs.deploy-to) || ''}}" + +on: + workflow_dispatch: + inputs: + deploy-to: + description: "Deploy published image to" + type: choice + options: + - none + - staging + - prod + default: staging + required: true + +concurrency: deploy + +permissions: + contents: write + checks: write + id-token: write + packages: write + +jobs: + ci: + name: CI + uses: WalletConnect/ci_workflows/.github/workflows/ci.yml@0.1.3 + secrets: inherit + with: + check-infra: false + check-app: true + + release: + name: Release + uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.1.3 + secrets: inherit + with: + infra-changed: false + app-changed: true + + cd: + name: CD + needs: [ release ] + if: ${{ inputs.deploy-to == 'staging' || inputs.deploy-to == 'prod' }} + secrets: inherit + uses: ./.github/workflows/sub-cd.yml + with: + deploy-infra: false + deploy-app: true + deploy-prod: ${{ inputs.deploy-to == 'prod' }} + version: ${{ needs.release.outputs.version }} diff --git a/.github/workflows/dispatch_validate.yml b/.github/workflows/dispatch_validate.yml new file mode 100644 index 0000000..f8ee6a3 --- /dev/null +++ b/.github/workflows/dispatch_validate.yml @@ -0,0 +1,57 @@ +name: ⚙️ Validate +run-name: "Validate: ${{ github.sha }}${{ (!inputs.check-infra && !inputs.check-app) && '👀 validate nothing' || ''}}${{ inputs.check-infra && ' ✓ infra' || '' }}${{ inputs.check-app && ' ✓ app' || '' }}" +on: + workflow_dispatch: + inputs: + check-infra: + description: "Validate Infra" + default: true + required: true + type: boolean + check-app: + description: "Validate App" + default: true + required: true + type: boolean + check-staging: + description: "Validate Staging" + default: false + required: true + type: boolean + check-prod: + description: "Validate Prod" + default: false + required: true + type: boolean + +permissions: + contents: read + checks: write + id-token: write + +jobs: + ci: + name: CI + uses: WalletConnect/ci_workflows/.github/workflows/ci.yml@0.1.3 + secrets: inherit + with: + check-infra: ${{ inputs.check-infra }} + check-app: ${{ inputs.check-app }} + + validate-staging: + name: Validate - Staging + if: ${{ inputs.check-staging }} + uses: ./.github/workflows/sub-validate.yml + secrets: inherit + with: + stage: staging + stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com + + validate-prod: + name: Validate - Prod + if: ${{ inputs.check-prod }} + uses: ./.github/workflows/sub-validate.yml + secrets: inherit + with: + stage: prod + stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com diff --git a/.github/workflows/event_intake.yml b/.github/workflows/event_intake.yml new file mode 100644 index 0000000..36ff490 --- /dev/null +++ b/.github/workflows/event_intake.yml @@ -0,0 +1,47 @@ +# This workflow moves issues to the Project board when they receive the "accepted" label +# When WalletConnect Org members create issues they are automatically "accepted". +# Otherwise, they need to manually receive that label during intake. +name: ⚡ Intake + +on: + issues: + types: [ opened, labeled ] + +jobs: + add-to-project: + name: Add issue to board + if: github.event_name == 'issues' && github.event.action == 'labeled' && github.event.label.name == 'accepted' + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - uses: actions/add-to-project@v0.1.0 + with: + project-url: https://github.com/orgs/WalletConnect/projects/20 + github-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} + labeled: accepted + label-operator: OR + + auto-promote: + name: auto-promote + if: github.event.action == 'opened' + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - name: Check Core Team membership + uses: tspascoal/get-user-teams-membership@v1 + id: is-core-team + with: + username: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} + team: "Core Team" + GITHUB_TOKEN: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} + - name: Print result + env: + CREATOR: ${{ github.event_name != 'pull_request' && github.event.issue.user.login || github.event.sender.login }} + IS_TEAM_MEMBER: ${{ steps.is-core-team.outputs.isTeamMember }} + run: echo "$CREATOR (Core Team Member $IS_TEAM_MEMBER) created this issue/PR" + - name: Label issues + if: ${{ steps.is-core-team.outputs.isTeamMember == 'true' }} + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 + with: + add-labels: "accepted" + repo-token: ${{ secrets.ASSIGN_TO_PROJECT_GITHUB_TOKEN }} diff --git a/.github/workflows/event_pr.yml b/.github/workflows/event_pr.yml new file mode 100644 index 0000000..0536d10 --- /dev/null +++ b/.github/workflows/event_pr.yml @@ -0,0 +1,64 @@ +name: ⚡ Pull-Request +run-name: 'PR / ${{ github.event.pull_request.title }}' + +on: + pull_request: + types: + - opened # A pull request was created. + - reopened # A closed pull request was reopened. + - edited # A pull request's title, body, or labels are edited. + - synchronize # A pull request's branch was synchronized with its base branch. + - unlocked # Conversation on a pull request was unlocked. + +concurrency: + group: pr-${{ github.event.pull_request.number }} + cancel-in-progress: true + +permissions: + contents: read + id-token: write + issues: read + pull-requests: write + +jobs: + check_pr: + name: Check PR + runs-on: + group: ${{ vars.RUN_GROUP }} + permissions: + statuses: write + steps: + - name: Check PR Title + uses: aslafy-z/conventional-pr-title-action@v3 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + paths-filter: + name: Paths Filter + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - uses: actions/checkout@v3 + - uses: WalletConnect/actions/github/paths-filter/@2.2.1 + id: filter + outputs: + infra: ${{ steps.filter.outputs.infra }} + app: ${{ steps.filter.outputs.app }} + + ci: + name: CI + needs: [ paths-filter ] + uses: WalletConnect/ci_workflows/.github/workflows/ci.yml@0.1.3 + secrets: inherit + with: + check-app: ${{ needs.paths-filter.outputs.app == 'true' }} + check-infra: ${{ needs.paths-filter.outputs.infra == 'true' }} + + merge_check: + name: Merge Check + needs: [ check_pr, ci ] + if: ${{ always() && !cancelled() && !failure() }} + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - run: echo "CI is successful" diff --git a/.github/workflows/event_release.yml b/.github/workflows/event_release.yml new file mode 100644 index 0000000..958c5b1 --- /dev/null +++ b/.github/workflows/event_release.yml @@ -0,0 +1,64 @@ +name: ⚡ Release +run-name: 'Release / ${{ github.event.head_commit.message }}' + +on: + push: + branches: + - main + - master + paths-ignore: + - '.github/**' + - 'docs/**' + - 'Cargo.toml' + - 'Cargo.lock' + - 'README.md' + - 'CHANGELOG.md' + - 'LICENSE' + - 'justfile' + - 'rustfmt.toml' + - '.editorconfig' + - '.pre-commit-config.yaml' + - '.terraformignore' + - '.env.example' + +concurrency: deploy + +permissions: + contents: write + id-token: write + packages: write + checks: write + +jobs: + paths_filter: + name: Paths Filter + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - uses: actions/checkout@v3 + - uses: WalletConnect/actions/github/paths-filter/@2.2.1 + id: filter + outputs: + infra: ${{ steps.filter.outputs.infra }} + app: ${{ steps.filter.outputs.app }} + + release: + name: Release + needs: [ paths_filter ] + uses: WalletConnect/ci_workflows/.github/workflows/release.yml@0.1.3 + secrets: inherit + with: + task-name: ${{ vars.TASK_NAME }} + infra-changed: ${{ needs.paths_filter.outputs.infra == 'true' }} + app-changed: ${{ needs.paths_filter.outputs.app == 'true' }} + + cd: + name: CD + needs: [ paths_filter, release ] + secrets: inherit + uses: ./.github/workflows/sub-cd.yml + with: + deploy-infra: ${{ needs.paths_filter.outputs.infra == 'true' }} + deploy-app: ${{ needs.paths_filter.outputs.app == 'true' }} + deploy-prod: true + version: ${{ needs.release.outputs.version }} diff --git a/.github/workflows/sub-cd.yml b/.github/workflows/sub-cd.yml new file mode 100644 index 0000000..740edae --- /dev/null +++ b/.github/workflows/sub-cd.yml @@ -0,0 +1,79 @@ +name: ❖ CD + +on: + workflow_call: + inputs: + deploy-infra: + description: "Deploy infrastructure" + type: boolean + default: true + deploy-app: + description: "Deploy app" + type: boolean + default: true + deploy-prod: + description: "Deploy to production after successful deployment to staging" + type: boolean + default: false + version: + description: "The release version" + type: string + required: true + +concurrency: cd + +permissions: + contents: write + checks: write + id-token: write + +jobs: + cd-staging: + name: Staging + secrets: inherit + uses: WalletConnect/ci_workflows/.github/workflows/cd.yml@0.1.3 + with: + deploy-infra: ${{ inputs.deploy-infra }} + deploy-app: ${{ inputs.deploy-app && !inputs.deploy-infra }} + version: ${{ inputs.version }} + task-name: ${{ vars.TASK_NAME }} + stage: staging + stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health + tf-variables: | + ofac_blocked_countries: ${{ vars.OFAC_BLOCKED_ZONES }} + aws-role-arn: ${{ vars.AWS_ROLE_STAGING }} + + validate-staging: + name: Validate Staging + needs: [ cd-staging ] + uses: ./.github/workflows/sub-validate.yml + secrets: inherit + with: + stage: staging + stage-url: https://staging.${{ vars.SUBDOMAIN_NAME }}.walletconnect.com + + cd-prod: + name: Prod + needs: [ validate-staging ] + if: ${{ inputs.deploy-prod }} + secrets: inherit + uses: WalletConnect/ci_workflows/.github/workflows/cd.yml@0.1.3 + with: + deploy-infra: ${{ inputs.deploy-infra }} + deploy-app: ${{ inputs.deploy-app && !inputs.deploy-infra }} + version: ${{ inputs.version }} + task-name: ${{ vars.TASK_NAME }} + stage: prod + stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com/health + tf-variables: | + ofac_blocked_countries: ${{ vars.OFAC_BLOCKED_ZONES }} + aws-role-arn: ${{ vars.AWS_ROLE_PROD }} + + validate-prod: + name: Validate Prod + needs: [ cd-prod ] + uses: ./.github/workflows/sub-validate.yml + secrets: inherit + with: + stage: prod + stage-url: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com diff --git a/.github/workflows/sub-validate.yml b/.github/workflows/sub-validate.yml new file mode 100644 index 0000000..2798bcd --- /dev/null +++ b/.github/workflows/sub-validate.yml @@ -0,0 +1,85 @@ +name: ❖ Validate + +on: + workflow_call: + inputs: + stage: + description: 'the environment to validate' + type: string + default: 'staging' + stage-url: + description: 'the URL of the environment' + type: string + default: https://${{ vars.SUBDOMAIN_NAME }}.walletconnect.com + rust-toolchain: + description: 'The Rust version to use' + type: string + default: ${{ vars.RUST_VERSION }} + +permissions: + contents: read + checks: write + id-token: write + +jobs: + health-check: + name: Health Check - ${{ inputs.stage }} + runs-on: + group: ${{ vars.RUN_GROUP }} + environment: + name: ${{ inputs.stage }} + url: ${{ inputs.stage-url }} + steps: + - name: health-check + run: curl "${{ inputs.stage-url }}" + + integration-tests: + name: Integration Tests - ${{ inputs.stage }} + runs-on: + group: ${{ vars.RUN_GROUP }} + environment: + name: ${{ inputs.stage }} + url: ${{ inputs.stage-url }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install Rust ${{ inputs.rust-toolchain }} + uses: WalletConnect/actions-rs/toolchain@1.0.0 + with: + toolchain: ${{ inputs.rust-toolchain }} + profile: 'default' + override: true + + - name: "Run Integration Tests" + uses: WalletConnect/actions-rs/cargo@1.0.0 + env: + PROJECT_ID: ${{ secrets.PROJECT_ID }} + RPC_URL: ${{ inputs.stage-url }} + with: + command: test + args: --test integration + + integration-tests-ts: + name: TS Integration Tests - ${{ inputs.stage }} + runs-on: + group: ${{ vars.RUN_GROUP }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup Node + uses: actions/setup-node@v3 + with: + node-version: 18.x + cache: 'yarn' + cache-dependency-path: '**/yarn.lock' + + - name: Yarn Install + run: yarn install + + - name: Yarn Integration Tests + run: yarn integration + env: + PROJECT_ID: ${{ secrets.PROJECT_ID }} + RPC_URL: ${{ inputs.stage-url }} diff --git a/.gitignore b/.gitignore index d0e8a39..d63f40f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,83 @@ -/target +#--------------------------------------- +# General + +.DS_Store +.AppleDouble +.LSOverride +[Dd]esktop.ini + +#--------------------------------------- +# Environment + +.direnv +.envrc +.actrc .env +#--------------------------------------- # Editors -.idea -.vscode -# Terraform -terraform/.terraform* +# JetBrains +.idea/ +out/ +.fleet +*.iws + +# VSCode +.vscode/ +.history/ +*.code-workspace +#--------------------------------------- +# Rust/Cargo + +# Generated by Cargo, will have compiled files and executables +debug/ +target/ + +# Backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb + +#--------------------------------------- # Integration + node_modules *.log + + +#--------------------------------------- +# Terraform + +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +*tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..b7368fb --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "terraform/monitoring/grafonnet-lib"] + path = terraform/monitoring/grafonnet-lib + url = git@github.com:WalletConnect/grafonnet-lib.git diff --git a/justfile b/justfile index d6d6f54..1794866 100644 --- a/justfile +++ b/justfile @@ -1,175 +1,550 @@ -binary-crate := "." - -export JUST_ROOT := justfile_directory() +binary-crate := "." +tf-dir := "terraform" +app-name := "verify" + +nocolor := '\033[0m' +black := '\033[0;30m' +red := '\033[0;31m' +green := '\033[0;32m' +brown := '\033[0;33m' +blue := '\033[0;34m' +purple := '\033[0;35m' +cyan := '\033[0;36m' +light-gray := '\033[0;37m' +dark-gray := '\033[1;30m' +light-red := '\033[1;31m' +light-green := '\033[1;32m' +yellow := '\033[1;33m' +light-blue := '\033[1;34m' +light-purple := '\033[1;35m' +light-cyan := '\033[1;36m' +white := '\033[1;37m' + +color-cmd := brown +color-arg := cyan +color-val := green +color-hint := brown +color-desc := blue +color-service := light-green + + +export JUST_ROOT := justfile_directory() # Default to listing recipes _default: - @just --list --list-prefix ' > ' + @just --list --unsorted + +alias build := cargo-build +alias run := cargo-run +alias test := cargo-test +alias clean := cargo-clean +alias check := cargo-check +alias clippy := cargo-clippy +alias udeps := cargo-udeps +alias checkfmt := cargo-checkfmt + +alias tfsec := tf-tfsec +alias tflint := tf-tflint + +deploy-dev: + #!/bin/bash + set -euo pipefail + + accountId="$(aws sts get-caller-identity | jq -r .Account)" + region="$(cat $TERRAFORM_DIR/variables.tf | grep -A 2 region | grep default | sed -nr 's/.+default = "(.+)"/\1/p')" + + imageRepo="$accountId.dkr.ecr.$region.amazonaws.com/{{ app-name }}" + aws ecr get-login-password --region $region | docker login --username AWS --password-stdin "$imageRepo" + docker build . -t "$imageRepo" --build-arg=release=true --platform=linux/amd64 $BUILD_ARGS + sha="$(docker inspect --format="{{ .Id }}" "$imageRepo" | cut -d: -f2)" + tag="$imageRepo:$sha" + docker tag "$imageRepo" "$tag" + docker push "$tag" + + cd {{ tf-dir }} + terraform workspace select dev + TF_VAR_ecr_app_version="$sha" terraform apply -auto-approve + +################################################################################ +# Meta recipes + +# Format the project code +fmt target='all': (_check-string-in-set target "all,rust,tf") + #!/bin/bash + set -euo pipefail + [[ '{{ target }}' == 'all' || '{{ target }}' == 'rust' ]] && { just cargo-fmt; } + [[ '{{ target }}' == 'all' || '{{ target }}' == 'tf' ]] && { just tf-fmt; } + +# Update project documentation +docs target='all': (_check-string-in-set target "all,rust,tf") + #!/bin/bash + set -euo pipefail + [[ '{{ target }}' == 'all' || '{{ target }}' == 'rust' ]] && { just cargo-build-docs; } + [[ '{{ target }}' == 'all' || '{{ target }}' == 'tf' ]] && { just tf-docs; } + +# Run linting and tests +amigood: lint cargo-test-default cargo-test-all -# Open project documentation in your local browser -docs: (_build-docs "open" "nodeps") +################################################################################ +# Linting recipes + +# Lint the project for quality issues +lint target='all': (_check-string-in-set target "all,rust,tf") + #!/bin/bash + set -euo pipefail + [[ '{{ target }}' == 'all' || '{{ target }}' == 'rust' ]] && { just lint-rust; } + [[ '{{ target }}' == 'all' || '{{ target }}' == 'tf' ]] && { just lint-tf; } + + +# Lint the rust project for any quality issues +lint-rust: cargo-check cargo-clippy cargo-udeps cargo-checkfmt + +# Lint the terrafrom project for any quality issues +lint-tf: tf-checkfmt tf-validate tf-tfsec tf-tflint + +################################################################################ +# Rust recipes + +# Run a Cargo command, choose target from open-docs, build-docs, fmt, build, run, test, clean, check, clippy, udeps, checkfmt +cargo target='' sub-target='': (_check-string-in-set target "open-docs,build-docs,fmt,build,run,test,clean,check,clippy,udeps,checkfmt" "allow_empty") + #!/bin/bash + set -euo pipefail + + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}cargo{{ nocolor }} targets:\n" + printf " open-docs {{ color-desc }}# Open rust project documentation in your local browser{{ nocolor }}\n" + printf " build-docs {{ color-desc }}# Build rust project documentation{{ nocolor }}\n" + printf " fmt {{ color-desc }}# Format the application code{{ nocolor }}\n" + printf " build {{ color-desc }}# Build service for development{{ nocolor }}\n" + printf " run {{ color-desc }}# Run the service{{ nocolor }}\n" + printf " test target='default' {{ color-desc }}# Run project tests, choose target from default, all, doc{{ nocolor }}\n" + printf " clean {{ color-desc }}# Clean build artifacts{{ nocolor }}\n" + printf " check {{ color-desc }}# Fast check rust project for errors{{ nocolor }}\n" + printf " clippy {{ color-desc }}# Check rust project with clippy{{ nocolor }}\n" + printf " udeps {{ color-desc }}# Check unused dependencies{{ nocolor }}\n" + printf " checkfmt {{ color-desc }}# Check the rust code formatting{{ nocolor }}\n" + + exit 0 + } + + [[ '{{ target }}' == 'open-docs' ]] && { just cargo-open-docs; } + [[ '{{ target }}' == 'build-docs' ]] && { just cargo-build-docs; } + [[ '{{ target }}' == 'fmt' ]] && { just cargo-fmt; } + [[ '{{ target }}' == 'build' ]] && { just cargo-build; } + [[ '{{ target }}' == 'run' ]] && { just cargo-run; } + [[ '{{ target }}' == 'test' ]] && { just cargo-test {{ sub-target }}; } + [[ '{{ target }}' == 'clean' ]] && { just cargo-clean; } + [[ '{{ target }}' == 'check' ]] && { just cargo-check; } + [[ '{{ target }}' == 'clippy' ]] && { just cargo-clippy; } + [[ '{{ target }}' == 'udeps' ]] && { just cargo-udeps; } + [[ '{{ target }}' == 'checkfmt' ]] && { just cargo-checkfmt; } + +# Open rust project documentation in your local browser +cargo-open-docs: (_cargo-build-docs "open" "nodeps") @echo '==> Opening documentation in system browser' -# Fast check project for errors -check: - @echo '==> Checking project for compile errors' - cargo check +# Build rust project documentation +cargo-build-docs: (_cargo-build-docs "" "nodeps") + +@_cargo-build-docs $open="" $nodeps="": _check-cmd-cargo + echo "==> Building project documentation @$JUST_ROOT/target/doc" + cargo doc --all-features --document-private-items ${nodeps:+--no-deps} ${open:+--open} + +# Format the application code +@cargo-fmt: _check-cmd-cargo-fmt + printf '==> Running {{ color-cmd }}rustfmt{{ nocolor }}\n' + cargo +nightly fmt # Build service for development -build: +cargo-build: _check-cmd-cargo @echo '==> Building project' cargo build -# Build project documentation -build-docs: (_build-docs "" "nodeps") - # Run the service -run: build +cargo-run: _check-cmd-cargo cargo-build @echo '==> Running project (ctrl+c to exit)' cargo run -# Run project test suite, skipping storage tests -test: - @echo '==> Testing project (default)' +# Run project tests, choose target from default, all, doc +cargo-test target='default': (_check-string-in-set target "default,all,doc") + #!/bin/bash + set -euo pipefail + [[ "{{ target }}" == 'default' ]] && { just cargo-test-default; } + [[ "{{ target }}" == 'all' ]] && { just cargo-test-all; } + [[ "{{ target }}" == 'doc' ]] && { just cargo-test-doc; } + +# Run project default tests +cargo-test-default: _check-cmd-cargo + @printf '==> Testing project ({{ light-green }}default{{ nocolor }})\n' cargo test -# Run project test suite, including storage tests (requires storage docker services to be running) -test-all: - @echo '==> Testing project (all features)' +# Run project tests with all features activated +cargo-test-all: _check-cmd-cargo + @printf '==> Testing project ({{ light-green }}all features{{ nocolor }})\n' cargo test --all-features -# Run test from project documentation -test-doc: - @echo '==> Testing project docs' +# Run tests from project documentation +cargo-test-doc: _check-cmd-cargo + @printf '==> Testing project ({{ light-green }}docs{{ nocolor }})\n' cargo test --doc # Clean build artifacts -clean: - @echo '==> Cleaning project target/*' +cargo-clean: _check-cmd-cargo + @printf '==> Cleaning project target/*\n' cargo clean -# Build docker image -build-docker: - @echo '=> Build rs-relay docker image' - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml build rs-relay-srv1 - -# Start relay & storage services on docker -run-docker: - @echo '==> Start services on docker' - @echo '==> Use run rs-relay app on docker with "cargo-watch"' - @echo '==> for more details check https://crates.io/crates/cargo-watch' - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml up -d - -# Stop relay & storage services on docker -stop-docker: - @echo '==> Stop services on docker' - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml down - -# Clean up docker relay & storage services -clean-docker: - @echo '==> Clean services on docker' - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml stop - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml rm -f - -# Start storage services on docker -run-storage-docker: - @echo '==> Start storage services on docker' +# Fast check project for errors +cargo-check: _check-cmd-cargo + @printf '==> Checking project for compile errors\n' + cargo check + +# Check rust project with clippy +cargo-clippy: _check-cmd-cargo-clippy + @printf '==> Running {{ color-cmd }}clippy{{ nocolor }}\n' + cargo +nightly clippy --all-features --tests -- -D clippy::all + +# Check unused dependencies +cargo-udeps: _check-cmd-cargo-udeps + @printf '==> Running {{ color-cmd }}udeps{{ nocolor }}\n' + cargo +nightly udeps + +# Check the rust code formatting +cargo-checkfmt: _check-cmd-cargo-fmt + @printf '==> Running {{ color-cmd }}rustfmt{{ nocolor }} --check\n' + cargo +nightly fmt --check + +################################################################################ +# Docker recipes + +# Run a docker command, choose target from build, run, stop, clean, ps, test +docker target='' sub-target='': (_check-string-in-set target "build,run,stop,clean,ps,test" "allow_empty") + #!/bin/bash + set -euo pipefail + + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}docker{{ nocolor }} targets:\n" + printf " build {{ color-desc }}# Build the application docker image{{ nocolor }}\n" + printf " run {{ color-arg }}target{{ nocolor }}={{ color-val }}''{{ nocolor }} {{ color-desc }}# Run docker services{{ nocolor }}\n" + printf " stop {{ color-arg }}target{{ nocolor }}={{ color-val }}''{{ nocolor }} {{ color-desc }}# Stop docker services{{ nocolor }}\n" + printf " clean {{ color-arg }}target{{ nocolor }}={{ color-val }}''{{ nocolor }} {{ color-desc }}# Stop and clean docker services{{ nocolor }}\n" + printf " ps {{ color-desc }}# List running docker services{{ nocolor }}\n" + printf " test {{ color-desc }}# Run project test suite on docker containers{{ nocolor }}\n" + + exit 0 + } + + [[ '{{ target }}' == 'build' ]] && { just docker-build; } + [[ '{{ target }}' == 'run' ]] && { just docker-run {{ sub-target }}; } + [[ '{{ target }}' == 'stop' ]] && { just docker-stop {{ sub-target }}; } + [[ '{{ target }}' == 'clean' ]] && { just docker-clean {{ sub-target }}; } + [[ '{{ target }}' == 'ps' ]] && { just docker-ps; } + [[ '{{ target }}' == 'test' ]] && { just docker-test; } + +# Build the application docker image +docker-build: _check-cmd-docker-compose + @printf '=> Build {{ color-cmd }}application server{{ nocolor }} docker image\n' + docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml -f ./ops/docker-compose.ot.yml build {{ app-name }} + +# Run docker services, you can specify which services to run by passing a comma separated list of targets +docker-run target='': (_check-set-in-set target "all,server,storage,ot" "allow_empty") + #!/bin/bash + set -euo pipefail + + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}run{{ nocolor }} targets:\n" + printf " server {{ color-desc }}# Run the Application Server docker container{{ nocolor }}\n" + printf " storage {{ color-desc }}# Run Storage Services docker containers{{ nocolor }}\n" + printf " ot {{ color-desc }}# Run OpenTelemetry docker container{{ nocolor }}\n" + + exit 0 + } + + IFS=',' read -ra items <<< "{{ target }}" + for item in "${items[@]}"; do + [[ "$item" == 'all' || "$item" == 'ot' ]] && { just docker-run-ot; } + [[ "$item" == 'all' || "$item" == 'storage' ]] && { just docker-run-storage; } + [[ "$item" == 'all' || "$item" == 'server' ]] && { just docker-run-server; } + done + +# Run the application server docker container +docker-run-server: _check-cmd-docker-compose + @printf '==> Start {{ color-service }}Application Server{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.server.yml up -d + +# Run storage services docker containers +docker-run-storage: _check-cmd-docker-compose + @printf '==> Start {{ color-service }}Storage Services{{ nocolor }} docker containers\n' docker-compose -f ./ops/docker-compose.storage.yml up -d -# Stop relay & storage services on docker -stop-storage-docker: - @echo '==> Stop storage services on docker' + +# Run OpenTelemetry docker container +docker-run-ot: _check-cmd-docker-compose + @printf '==> Start {{ color-service }}OpenTelemetry{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.ot.yml up -d + +# Stop docker services, you can specify which services to stop by passing a comma separated list of targets +docker-stop target='': (_check-set-in-set target "all,server,storage,ot" "allow_empty") + #!/bin/bash + set -euo pipefail + + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}stop{{ nocolor }} targets:\n" + printf " server {{ color-desc }}# Stop the application server docker container{{ nocolor }}\n" + printf " storage {{ color-desc }}# Stop the storage services docker containers{{ nocolor }}\n" + printf " ot {{ color-desc }}# Stop the OpenTelemetry docker container{{ nocolor }}\n" + + exit 0 + } + + IFS=',' read -ra items <<< "{{ target }}" + for item in "${items[@]}"; do + [[ "$item" == 'all' || "$item" == 'server' ]] && { just docker-stop-server; } + [[ "$item" == 'all' || "$item" == 'storage' ]] && { just docker-stop-storage; } + [[ "$item" == 'all' || "$item" == 'ot' ]] && { just docker-stop-ot; } + done + +# Stop the application server docker container +docker-stop-server: _check-cmd-docker-compose + @printf '==> Stop {{ color-service }}application server{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.server.yml down + +# Stop storage services docker containers +docker-stop-storage: _check-cmd-docker-compose + @printf '==> Stop {{ color-service }}storage services{{ nocolor }} docker containers\n' docker-compose -f ./ops/docker-compose.storage.yml down -# Clean up docker storage services -clean-storage-docker: - @echo '==> Clean storage services on docker' +# Stop OpenTelemetry docker container +docker-stop-ot: _check-cmd-docker-compose + @printf '==> Stop {{ color-cmd }}OpenTelemetry{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.ot.yml down + +# Stop and clean docker services, you can specify which services to clean by passing a comma separated list of targets +docker-clean target='': (_check-set-in-set target "all,server,storage,ot" "allow_empty") + #!/bin/bash + set -euo pipefail + + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}clean{{ nocolor }} targets:\n" + printf " server {{ color-desc }}# Stop and clean the application server docker container{{ nocolor }}\n" + printf " storage {{ color-desc }}# Stop and clean the storage services docker containers{{ nocolor }}\n" + printf " ot {{ color-desc }}# Stop and clean the OpenTelemetry docker container{{ nocolor }}\n" + + exit 0 + } + + IFS=',' read -ra items <<< "{{ target }}" + for item in "${items[@]}"; do + [[ "$item" == 'all' || "$item" == 'server' ]] && { just docker-clean-server; } + [[ "$item" == 'all' || "$item" == 'storage' ]] && { just docker-clean-storage; } + [[ "$item" == 'all' || "$item" == 'ot' ]] && { just docker-clean-ot; } + done + +# Stop and clean the application server docker container +docker-clean-server: _check-cmd-docker-compose + @printf '==> Clean {{ color-cmd }}application server{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.server.yml stop + docker-compose -f ./ops/docker-compose.server.yml rm -f + +# Stop and clean storage services docker containers +docker-clean-storage: _check-cmd-docker-compose + @printf '==> Clean {{ color-cmd }}storage services{{ nocolor }} docker containers\n' docker-compose -f ./ops/docker-compose.storage.yml stop docker-compose -f ./ops/docker-compose.storage.yml rm -f -# List services running on docker -ps-docker: - @echo '==> List services on docker' - docker-compose -f ./ops/docker-compose.relay.yml -f ./ops/docker-compose.storage.yml ps +# Stop and clean OpenTelemetry docker container +docker-clean-ot: _check-cmd-docker-compose + @printf '==> Clean {{ color-cmd }}OpenTelemetry{{ nocolor }} docker container\n' + docker-compose -f ./ops/docker-compose.ot.yml stop + docker-compose -f ./ops/docker-compose.ot.yml rm -f -# Run project test suite on docker containers -test-docker: - @echo '==> Run tests on docker container' - docker-compose -f ./ops/docker-compose.storage.yml -f ./ops/docker-compose.test.yml run --rm rs-relay-test +# List running docker services +docker-ps: _check-cmd-docker-compose + @printf '==> List running docker services\n' + docker-compose -f ./ops/docker-compose.server.yml -f ./ops/docker-compose.storage.yml p -f ./ops/docker-compose.ot.yml ps -run-jaeger: - @echo '==> Run opentelemetry jaeger docker container' - docker run --rm -p4317:4317 -p16686:16686 jaegertracing/all-in-one:latest +# Run project test suite on docker containers +docker-test: _check-cmd-docker-compose + @printf '==> Run tests on docker container\n' + docker-compose -f ./ops/docker-compose.storage.yml -f ./ops/docker-compose.test.yml -f ./ops/docker-compose.ot.yml run --rm {{ app-name }}-test -# Bumps the binary version to the given version -bump-version to: (_bump-cargo-version to binary-crate + "/Cargo.toml") +################################################################################ +# Terraform recipes -# Lint the project for any quality issues -lint: check fmt clippy commit-check +# Run a Terraform command, choose target from build, run, stop, clean, ps, test +tf target='': (_check-string-in-set target "fmt,checkfmt,validate,tfsec,tflint,init,plan,apply,docs,clean" "allow_empty") + #!/bin/bash + set -euo pipefail -# Run project linter -clippy: + [[ '{{ target }}' == 'help' || '{{ target }}' == 'h' || '{{ target }}' == '' ]] && { + printf "Available {{ color-cmd }}Terraform{{ nocolor }} targets:\n" + printf " fmt {{ color-desc }}# Format the terraform code{{ nocolor }}\n" + printf " checkfmt {{ color-desc }}# Check Terraform formatting{{ nocolor }}\n" + printf " validate {{ color-desc }}# Run Terraform validation{{ nocolor }}\n" + printf " tfsec {{ color-desc }}# Check Terraform configuration for potential security issues{{ nocolor }}\n" + printf " tflint {{ color-desc }}# Run Terraform linter{{ nocolor }}\n" + printf " init {{ color-desc }}# Init Terraform project{{ nocolor }}\n" + printf " plan {{ color-desc }}# Perform a Terraform plan on the current workspace{{ nocolor }}\n" + printf " apply {{ color-desc }}# Perform a Terraform apply on the current workspace{{ nocolor }}\n" + printf " docs {{ color-desc }}# Update the Terraform documentation{{ nocolor }}\n" + printf " clean {{ color-desc }}# Clean the Terraform environment{{ nocolor }}\n" + + exit 0 + } + + [[ '{{ target }}' == 'fmt' ]] && { just tf-fmt; } + [[ '{{ target }}' == 'checkfmt' ]] && { just tf-checkfmt; } + [[ '{{ target }}' == 'validate' ]] && { just tf-validate; } + [[ '{{ target }}' == 'tfsec' ]] && { just tf-tfsec; } + [[ '{{ target }}' == 'tflint' ]] && { just tf-tflint; } + [[ '{{ target }}' == 'init' ]] && { just tf-init; } + [[ '{{ target }}' == 'plan' ]] && { just tf-plan; } + [[ '{{ target }}' == 'apply' ]] && { just tf-apply; } + [[ '{{ target }}' == 'docs' ]] && { just tf-docs; } + [[ '{{ target }}' == 'clean' ]] && { just tf-clean; } + +# Format the terraform code +@tf-fmt: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform fmt{{ nocolor }}\n' + cd {{ tf-dir }}; terraform fmt -recursive + +# Check Terraform formatting +@tf-checkfmt: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform fmt{{ nocolor }}\n' + cd {{ tf-dir }}; terraform fmt -check -recursive + +# Run Terraform validation +@tf-validate: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform fmt{{ nocolor }}\n' + cd {{ tf-dir }}; terraform validate + +# Check Terraform configuration for potential security issues +@tf-tfsec: _check-cmd-tfsec + printf '==> Running {{ color-cmd }}tfsec{{ nocolor }}\n' + cd {{ tf-dir }}; tfsec + +# Run Terraform linter +@tf-tflint: _check-cmd-tflint + printf '==> Running {{ color-cmd }}tflint{{ nocolor }}\n' + cd {{ tf-dir }}; tflint --recursive + +# Init Terraform project +@tf-init: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform init{{ nocolor }}\n' + cd {{ tf-dir }}; terraform init + +# Perform a Terraform plan on the current workspace +@tf-plan: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform init{{ nocolor }}\n' + cd {{ tf-dir }}; terraform plan + +# Perform a Terraform apply on the current workspace +@tf-apply: _check-cmd-terraform + printf '==> Running {{ color-cmd }}terraform init{{ nocolor }}\n' + cd {{ tf-dir }}; terraform apply + +# Update the Terraform documentation +@tf-docs: _check-cmd-tfdocs + printf '==> Running {{ color-cmd }}terraform-docs{{ nocolor }}\n' + cd {{ tf-dir }}; terraform-docs . + +# Clean the Terraform environment +@tf-clean: + printf '==> Clean Terraform environment\n' + cd {{ tf-dir }}; rm -rf .terraform/ .terraform.lock.hcl + +################################################################################ +# Helper recipes + +_check-cmd-cargo: (_check-cmd 'cargo' 'To install see https://doc.rust-lang.org/cargo/getting-started/installation.html for details') +_check-cmd-cargo-fmt: (_check-cmd 'cargo-fmt' 'To install run ' + color-hint + '`rustup component add rustfmt`' + nocolor + ', see https://github.com/rust-lang/rustfmt for details') +_check-cmd-cargo-clippy: (_check-cmd 'cargo-clippy' 'To install run ' + color-hint + '`rustup component add clippy`' + nocolor + ', see https://github.com/rust-lang/rust-clippy for details') +_check-cmd-cargo-udeps: (_check-cmd 'cargo-udeps' 'To install run ' + color-hint + '`cargo install cargo-udeps --locked`' + nocolor + ', see https://github.com/est31/cargo-udeps for details') +_check-cmd-docker-compose: (_check-cmd 'docker-compose' 'To install see https://docs.docker.com/compose/install') +_check-cmd-terraform: (_check-cmd 'terraform' 'To install see https://developer.hashicorp.com/terraform/downloads') +_check-cmd-tfsec: (_check-cmd 'tfsec' 'To install see https://github.com/aquasecurity/tfsec#installation') +_check-cmd-tflint: (_check-cmd 'tflint' 'To install see https://github.com/terraform-linters/tflint#installation') +_check-cmd-tfdocs: (_check-cmd 'terraform-docs' 'To install see https://terraform-docs.io/user-guide/installation/') + +[no-exit-message] +_check-cmd cmd install: #!/bin/bash set -euo pipefail - if command -v cargo-clippy >/dev/null; then - echo '==> Running clippy' - cargo clippy --all-features --tests -- -D clippy::all -W clippy::style - else - echo '==> clippy not found in PATH, skipping' - echo ' ^^^^^^ To install `rustup component add clippy`, see https://github.com/rust-lang/rust-clippy for details' + cmd="{{ cmd }}" + numChars=${#cmd} + underline=$(printf '%*s' "$numChars" | tr ' ' '^') + + if ! command -v {{ cmd }} >/dev/null; then + printf '==> {{ color-cmd }}{{ cmd }}{{ nocolor }} not found in PATH\n' + printf ' %s {{ install }}\n' "$underline" + exit 1 fi -# Run code formatting check -fmt: +[no-exit-message] +_check-string-in-set target set options='': #!/bin/bash set -euo pipefail - if command -v cargo-fmt >/dev/null; then - echo '==> Running rustfmt' - cargo +nightly fmt -- --check - else - echo '==> rustfmt not found in PATH, skipping' - echo ' ^^^^^^ To install `rustup component add rustfmt`, see https://github.com/rust-lang/rustfmt for details' + target="{{ target }}" + set="{{ set }}" + options="{{ options }}" + + if ! [[ -z "$target" && "$options" == "allow_empty" ]]; then + # Convert the set into an array + IFS=',' read -ra setArray <<< "$set" + + # Check if target is in the setArray + found=false + for item in "${setArray[@]}"; do + if [[ "$item" == "$target" ]]; then + found=true + break + fi + done + + if [[ "$found" != true ]]; then + printf "{{red }}$target{{ nocolor }} is not a valid target, accepted values are {{ brown }}[${set}]{{ nocolor }}\n" + exit 1 + fi fi - if command -v terraform -version >/dev/null; then - echo '==> Running terraform fmt' - terraform -chdir=terraform fmt -recursive - else - echo '==> terraform not found in PATH, skipping' - echo ' ^^^^^^^^^ To install see https://developer.hashicorp.com/terraform/downloads' - fi - -# Run commit checker -commit-check: +[no-exit-message] +_check-set-in-set set1 set2 options='': #!/bin/bash set -euo pipefail - if command -v cog >/dev/null; then - echo '==> Running cog check' - cog check --from-latest-tag - else - echo '==> cog not found in PATH, skipping' - echo ' ^^^ To install `cargo install --locked cocogitto`, see https://github.com/cocogitto/cocogitto for details' + set1="{{ set1 }}" + set2="{{ set2 }}" + options="{{ options }}" + + # Exit with status 0 if the first set is empty and empty strings are allowed + if [[ -z "$set1" && "$options" == "allow_empty" ]]; then + exit 0 fi -# Update documentation with any changes detected -update-docs: (_regenerate-metrics "docs/Metrics.md") - -# Build project documentation -_build-docs $open="" $nodeps="": - @echo "==> Building project documentation @$JUST_ROOT/target/doc" - @cargo doc --all-features --document-private-items ${nodeps:+--no-deps} ${open:+--open} - -# Update the metrics documentation with current metrics -_regenerate-metrics file temp=`mktemp`: build - @echo '==> Regenerating metrics to @{{file}}' - @cd scripts && ./metrics-apply.awk <(./metrics-fetch.sh | ./metrics-doc.pl | ./metrics-format.pl) < $JUST_ROOT/{{file}} > {{temp}} - @mv -f {{temp}} {{file}} - -# Bump the version field of a given Cargo.toml file -_bump-cargo-version version file temp=`mktemp`: - @echo '==> Bumping {{file}} version to {{version}}' - @perl -spe 'if (/^version/) { s/("[\w.]+")/"$version"/ }' -- -version={{version}} < {{file}} > {{temp}} - @mv -f {{temp}} {{file}} + # Convert both sets into arrays + IFS=',' read -ra setArray1 <<< "$set1" + IFS=',' read -ra setArray2 <<< "$set2" + + # Function to check if an item is in the second set + is_in_set() { + local e match="$1" + for e in "${setArray2[@]}"; do [[ "$e" == "$match" ]] && return 0; done + return 1 + } + + # Check each item in the first set + all_found=true + for item in "${setArray1[@]}"; do + if [[ -n "$item" || "$options" == "allow_empty" ]]; then + if ! is_in_set "$item"; then + all_found=false + break + fi + fi + done + + if [[ "$all_found" != true ]]; then + printf "[{{ red }}$set1{{ nocolor }}] contains invalid targets, accepted values are {{ brown }}[${set2}]{{ nocolor }}\n" + exit 1 + fi diff --git a/ops/docker-compose.ot.yml b/ops/docker-compose.ot.yml new file mode 100644 index 0000000..b76506a --- /dev/null +++ b/ops/docker-compose.ot.yml @@ -0,0 +1,24 @@ +version: '3.9' + +services: + jaeger: + image: jaegertracing/all-in-one:latest + networks: + - walletconnect-server + ports: + - 4317:4317 + - 16686:16686 + volumes: + - server:/jaeger + environment: + - COLLECTOR_OTLP_ENABLED=true + +networks: + walletconnect-server: + ipam: + driver: default + config: + - subnet: 172.10.1.0/16 + +volumes: + server: diff --git a/ops/docker-compose.storage.yml b/ops/docker-compose.storage.yml new file mode 100644 index 0000000..0463765 --- /dev/null +++ b/ops/docker-compose.storage.yml @@ -0,0 +1,148 @@ +version: '3.9' + +services: + activemq: + build: ./ActiveMQ + networks: + - walletconnect-rs-relay + ports: + - 5672:5672 + - 8086:8161 + volumes: + - rs-relay-test-data-storage:/activemq + environment: + - ACTIVEMQ_USERNAME=admin + - ACTIVEMQ_PASSWORD=admin + - ACTIVEMQ_WEBADMIN_USERNAME=admin + - ACTIVEMQ_WEBADMIN_PASSWORD=admin + + mongo: + image: mongo:4 + networks: + - walletconnect-rs-relay + ports: + - 27017:27017 + volumes: + - rs-relay-test-data-storage:/mongo + healthcheck: + test: + [ + "CMD", + "mongo", + "--eval", + "'db.runCommand(\"ping\").ok'", + "localhost:27017/test", + "--quiet" + ] + interval: 5s + timeout: 5s + retries: 5 + environment: + - MONGO_INITDB_ROOT_USERNAME=admin + - MONGO_INITDB_ROOT_PASSWORD=admin + - MONGO_INITDB_DATABASE=relay + + mongo-express: + image: mongo-express + networks: + - walletconnect-rs-relay + ports: + - 8085:8081 + depends_on: + mongo: + condition: service_healthy + environment: + - ME_CONFIG_MONGODB_ADMINUSERNAME=admin + - ME_CONFIG_MONGODB_ADMINPASSWORD=admin + - ME_CONFIG_MONGODB_URL="mongodb://admin:admin@mongo:27017" + + redis: + image: redis:6-alpine + networks: + - walletconnect-rs-relay + ports: + - 6379:6379 + volumes: + - rs-relay-test-data-storage:/redis + healthcheck: + test: [ "CMD", "redis-cli", "ping" ] + interval: 5s + timeout: 5s + retries: 5 + + redisinsight: + image: redislabs/redisinsight:latest + networks: + - walletconnect-rs-relay + ports: + - 8001:8001 + + minio: + image: minio/minio + networks: + - walletconnect-rs-relay + ports: + - "9000:9000" + - "9090:9090" + volumes: + - rs-relay-test-data-storage:/minio + environment: + - "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" + - "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + command: server /data --console-address ":9090" + + createbuckets: + image: minio/mc + depends_on: + - minio + networks: + - walletconnect-rs-relay + entrypoint: > + /bin/sh -c " + /usr/bin/mc config host add myminio http://minio:9000 AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY; + /usr/bin/mc mb myminio/datalake; + /usr/bin/mc anonymous set upload myminio/datalake; + /usr/bin/mc anonymous set download myminio/datalake; + /usr/bin/mc anonymous set public myminio/datalake; + exit 0; + " + + jaeger: + image: jaegertracing/all-in-one:latest + networks: + - walletconnect-rs-relay + ports: + - 4317:4317 + - 16686:16686 + volumes: + - rs-relay-test-data-storage:/jaeger + environment: + - COLLECTOR_OTLP_ENABLED=true + # aws-otel-collector: + # image: amazon/aws-otel-collector:latest + # command: --config=/otel-config.yaml + # networks: + # - walletconnect-rs-relay + # ports: + # - 4318:4317 + # volumes: + # - ./otel/config.yaml:/otel-config.yaml + # healthcheck: + # test: [ "CMD", "/healthcheck" ] + # interval: 5s + # timeout: 5s + # retries: 5 + # environment: + # - AWS_ACCESS_KEY_ID + # - AWS_SECRET_ACCESS_KEY + # - AWS_REGION + +networks: + walletconnect-rs-relay: + ipam: + driver: default + config: + - subnet: 172.10.1.0/16 + +volumes: + rs-relay-test-data-storage: diff --git a/terraform/.terraform-docs.yml b/terraform/.terraform-docs.yml new file mode 100644 index 0000000..f112e8b --- /dev/null +++ b/terraform/.terraform-docs.yml @@ -0,0 +1,40 @@ +formatter: 'markdown table' + +recursive: + enabled: true + path: . + +output: + file: README.md + mode: inject + template: |- + + {{ .Content }} + + +content: | + {{ .Header }} + {{ .Requirements }} + {{ .Providers }} + {{ .Modules }} + + ## Inputs + {{- $hideInputs := list "namespace" "region" "stage" "name" "delimiter" "attributes" "tags" "regex_replace_chars" "id_length_limit" "label_key_case" "label_value_case" "label_order" }} + {{- $filteredInputs := list -}} + {{- range .Module.Inputs -}} + {{- if not (has .Name $hideInputs) -}} + {{- $filteredInputs = append $filteredInputs . -}} + {{- end -}} + {{- end -}} + {{ if not $filteredInputs }} + + No inputs. + {{ else }} + | Name | Description | Type | Default | Required | + |------|-------------|------|---------|:--------:| + {{- range $filteredInputs }} + | {{ anchorNameMarkdown "input" .Name }} | {{ tostring .Description | sanitizeMarkdownTbl }} | {{ printf " " }}
{{ tostring .Type | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ .GetValue | sanitizeMarkdownTbl }}
| {{ printf " " }}{{ ternary .Required "yes" "no" }} |
+ {{- end }}
+ {{- end }}
+ {{ .Outputs }}
+ {{/** End of file fixer */}}
diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl
new file mode 100644
index 0000000..4655c55
--- /dev/null
+++ b/terraform/.terraform.lock.hcl
@@ -0,0 +1,89 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/alxrem/jsonnet" {
+ version = "2.2.0"
+ constraints = "~> 2.2.0"
+ hashes = [
+ "h1:618oQ4FUqJKIihf/Tmxl3Tu9MsiuUpHYwn5BH79SJ0Y=",
+ "zh:36d073bffcbdc47a3e3d5b19f5c511f38e4075026b467d98395d27436aeb0234",
+ "zh:3e252ca26d6a6dbad61a10d3a9231daf0cf565d418efbd651d4e67afe1d6f500",
+ "zh:3e275f0ff014e7d32b3cc7d655b14a1ba82781757f65830d4e5b6349a82d1062",
+ "zh:42ddeed65087338ec73724e5b211157a804d9ab9ef6913cbb48e362d30c6b5c0",
+ "zh:5034cd7aaa3f27d914813eb3fb9c344a4670f3226476123379d9ec95d8a5381f",
+ "zh:6a0650d9f4302f0b6107612b149ea55c22eb9d19a1483e08dacb2ba22b5be5d3",
+ "zh:97e9f0b32e33d33d109b5e751342a6ba5949165c23d8a88dd147a6b082fee109",
+ "zh:a10faaf69352ee9ecb9a68a7b4ceea647f6a10840ecdf0b1a4edd59fe755d600",
+ "zh:c6bb0612a6eb489b74aa74dc5ff740b601bbdf2335a29f87b571c19cd232a62d",
+ "zh:d061a59d5c11e6e7b167d0cf6795cdf7de199c926fe4cc96edd434de71374c61",
+ "zh:da49f78a7ec1f598c2e4b5c3f84c2785fdb210ef61cfd70ae6d48b03143a416b",
+ "zh:e5bb54e1737c196dc6f8a585251f51fdd717fdc24a480816e1b86958693b993b",
+ "zh:f10ef2044905b08d9ed7c74a8b778f41ce48e86afd62c4119ab54a80810b795a",
+ "zh:f787d511f302d5714cb6349fae5f44053c14ebf6cb0435c733e7c822c929fe36",
+ ]
+}
+
+provider "registry.terraform.io/grafana/grafana" {
+ version = "2.10.0"
+ constraints = ">= 2.1.0, ~> 2.1"
+ hashes = [
+ "h1:coquRbqr1W9eLDGjkfya6zA7Zlirs9/23419URZoNSE=",
+ "zh:0e1132bc5c0b572f44af2b9336627447d8e6e2be3e4178c24b574bcd3b61f442",
+ "zh:32ecadcd77f1bdb4ccb456adecd126d28f82f3fcecee51366378c2a40289a558",
+ "zh:603580330d884b00b379c5407acd1f0d6b87fde36efeda52ab00132cb9347c1f",
+ "zh:61f40b67c4b3c021847e7163f0e96907089bdcd454c05f9625b6b7324ad2a908",
+ "zh:93d9d97998a210512acb1f11aea4dac2c02cefbcdb7f706bc1580bb977cb0a51",
+ "zh:9a4fac8681e2d0374edfb8f4df9876c112cf07a737f896f79b296cf5b531ac32",
+ "zh:bba533e6e0a16869090a55ba232945e79ce482ea370900ec93e8e843eeedc212",
+ "zh:c3c68199be5b98d2eca410d559b66543ad828d30e16968453e4fe4eac687137e",
+ "zh:c71358c30f3d02ee587253e4b152f159d20ee01865b39084d5b2ee00eb681fed",
+ "zh:d131827d98a1365b9e00ef7562f27d65ef98a15550e390141573bec512f171f7",
+ "zh:d5a7541508ba46d35e1bd5f21bea03a7dd536b0eab585be89f8b8d9662b3eb78",
+ "zh:eaa1f64856995aa1da61ae1b202e74253d473bb7486c8d16424f007e7fa9d0ef",
+ "zh:f71a4aa023d3f43e19ebcec7f6856a64235d12cf400c24c139945833d07c6b25",
+ "zh:faa014f97f8515653a596c76b9455f762c2a4ae637a3753786612c623d5571af",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/aws" {
+ version = "5.34.0"
+ constraints = ">= 4.9.0, >= 4.50.0, >= 5.0.0, >= 5.7.0, ~> 5.7, >= 5.20.0"
+ hashes = [
+ "h1:1Y1JgV1z99QqAK06+atyfNqreZxyGZKbm4mZO4VhhT8=",
+ "zh:01bb20ae12b8c66f0cacec4f417a5d6741f018009f3a66077008e67cce127aa4",
+ "zh:3b0c9bdbbf846beef2c9573fc27898ceb71b69cf9d2f4b1dd2d0c2b539eab114",
+ "zh:5226ecb9c21c2f6fbf1d662ac82459ffcd4ad058a9ea9c6200750a21a80ca009",
+ "zh:6021b905d9b3cd3d7892eb04d405c6fa20112718de1d6ef7b9f1db0b0c97721a",
+ "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+ "zh:9e61b8e0ccf923979cd2dc1f1140dbcb02f92248578e10c1996f560b6306317c",
+ "zh:ad6bf62cdcf531f2f92f6416822918b7ba2af298e4a0065c6baf44991fda982d",
+ "zh:b698b041ef38837753bbe5265dddbc70b76e8b8b34c5c10876e6aab0eb5eaf63",
+ "zh:bb799843c534f6a3f072a99d93a3b53ff97c58a96742be15518adf8127706784",
+ "zh:cebee0d942c37cd3b21e9050457cceb26d0a6ea886b855dab64bb67d78f863d1",
+ "zh:e061fdd1cb99e7c81fb4485b41ae000c6792d38f73f9f50aed0d3d5c2ce6dcfb",
+ "zh:eeb4943f82734946362696928336357cd1d36164907ae5905da0316a67e275e1",
+ "zh:ef09b6ad475efa9300327a30cbbe4373d817261c8e41e5b7391750b16ef4547d",
+ "zh:f01aab3881cd90b3f56da7c2a75f83da37fd03cc615fc5600a44056a7e0f9af7",
+ "zh:fcd0f724ebc4b56a499eb6c0fc602de609af18a0d578befa2f7a8df155c55550",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/random" {
+ version = "3.5.1"
+ constraints = "3.5.1"
+ hashes = [
+ "h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
+ "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
+ "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
+ "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
+ "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
+ "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
+ "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
+ "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
+ "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
+ "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
+ "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
+ ]
+}
diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl
new file mode 100644
index 0000000..6432d3b
--- /dev/null
+++ b/terraform/.tflint.hcl
@@ -0,0 +1,19 @@
+config {
+ format = "default"
+ module = true
+}
+
+plugin "terraform" {
+ enabled = true
+ preset = "all"
+}
+
+plugin "aws" {
+ enabled = true
+ version = "0.18.0"
+ source = "github.com/terraform-linters/tflint-ruleset-aws"
+}
+
+rule "terraform_workspace_remote" {
+ enabled = false
+}
diff --git a/terraform/README.md b/terraform/README.md
new file mode 100644
index 0000000..8af30e9
--- /dev/null
+++ b/terraform/README.md
@@ -0,0 +1,56 @@
+# Terraform Infrastructure
+
+You need to be authenticated to Terraform Cloud to manage the infrastructure
+from your computer.
+
+To authenticate, run `terraform login` and follow the instructions.
+
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.0 |
+| [aws](#requirement\_aws) | >= 5.7 |
+| [grafana](#requirement\_grafana) | >= 2.1 |
+| [random](#requirement\_random) | 3.5.1 |
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | 5.30.0 |
+| [random](#provider\_random) | 3.5.1 |
+| [terraform](#provider\_terraform) | n/a |
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 |
+
+## Inputs
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [app\_autoscaling\_desired\_count](#input\_app\_autoscaling\_desired\_count) | The desired number of tasks to run | number
| 1
| no |
+| [app\_autoscaling\_max\_capacity](#input\_app\_autoscaling\_max\_capacity) | The maximum number of tasks to run when autoscaling | number
| 1
| no |
+| [app\_autoscaling\_min\_capacity](#input\_app\_autoscaling\_min\_capacity) | The minimum number of tasks to run when autoscaling | number
| 1
| no |
+| [geoip\_db\_key](#input\_geoip\_db\_key) | The name to the GeoIP database | string
| n/a
| yes |
+| [grafana\_auth](#input\_grafana\_auth) | The API Token for the Grafana instance | string
| ""
| no |
+| [image\_version](#input\_image\_version) | The ECS tag of the image to deploy | string
| n/a
| yes |
+| [infura\_project\_id](#input\_infura\_project\_id) | The project ID for Infura | string
| n/a
| yes |
+| [log\_level](#input\_log\_level) | Defines logging level for the application | string
| n/a
| yes |
+| [notification\_channels](#input\_notification\_channels) | The notification channels to send alerts to | list(any)
| []
| no |
+| [ofac\_blocked\_countries](#input\_ofac\_blocked\_countries) | The list of countries to block | string
| ""
| no |
+| [pokt\_project\_id](#input\_pokt\_project\_id) | The project ID for POKT | string
| n/a
| yes |
+| [project\_cache\_ttl](#input\_project\_cache\_ttl) | The TTL for project data cache | number
| 300
| no |
+| [registry\_api\_auth\_token](#input\_registry\_api\_auth\_token) | The auth token for the registry API | string
| n/a
| yes |
+| [registry\_api\_endpoint](#input\_registry\_api\_endpoint) | The endpoint of the registry API | string
| n/a
| yes |
+| [webhook\_cloudwatch\_p2](#input\_webhook\_cloudwatch\_p2) | The webhook to send CloudWatch P2 alerts to | string
| ""
| no |
+| [webhook\_prometheus\_p2](#input\_webhook\_prometheus\_p2) | The webhook to send Prometheus P2 alerts to | string
| ""
| no |
+| [zerion\_api\_key](#input\_zerion\_api\_key) | The API key for Zerion | string
| n/a
| yes |
+## Outputs
+
+No outputs.
+
+
+
diff --git a/terraform/alerting/README.md b/terraform/alerting/README.md
new file mode 100644
index 0000000..d2909be
--- /dev/null
+++ b/terraform/alerting/README.md
@@ -0,0 +1,43 @@
+# `cloudwatch` module
+
+This module configures the cloudwatch alarms and webhook forwarding.
+
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.0 |
+| [aws](#requirement\_aws) | ~> 5.7 |
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | ~> 5.7 |
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [cloudwatch](#module\_cloudwatch) | app.terraform.io/wallet-connect/cloudwatch-constants/aws | 1.0.0 |
+| [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 |
+
+## Inputs
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [context](#input\_context) | Single object for setting entire context at once.any
| n/a
| yes |
+| [ecs\_cluster\_name](#input\_ecs\_cluster\_name) | The name of the ECS cluster running the application | string
| n/a
| yes |
+| [ecs\_cpu\_threshold](#input\_ecs\_cpu\_threshold) | The ECS CPU utilization alarm threshold in percents | number
| 80
| no |
+| [ecs\_memory\_threshold](#input\_ecs\_memory\_threshold) | The ECS memory utilization alarm threshold in percents | number
| 80
| no |
+| [ecs\_service\_name](#input\_ecs\_service\_name) | The name of the ECS service running the application | string
| n/a
| yes |
+| [redis\_cluster\_id](#input\_redis\_cluster\_id) | The Redis cluster ID | string
| n/a
| yes |
+| [redis\_cpu\_threshold](#input\_redis\_cpu\_threshold) | The Redis CPU utilization alarm threshold in percents | number
| 80
| no |
+| [redis\_memory\_threshold](#input\_redis\_memory\_threshold) | The Redis available memory alarm threshold in GiB | number
| 3
| no |
+| [webhook\_cloudwatch\_p2](#input\_webhook\_cloudwatch\_p2) | The URL of the webhook to be called on CloudWatch P2 alarms | string
| n/a
| yes |
+| [webhook\_prometheus\_p2](#input\_webhook\_prometheus\_p2) | The URL of the webhook to be called on Prometheus P2 alarms | string
| n/a
| yes |
+## Outputs
+
+No outputs.
+
+
+
diff --git a/terraform/alerting/alarms_ecs.tf b/terraform/alerting/alarms_ecs.tf
new file mode 100644
index 0000000..0317297
--- /dev/null
+++ b/terraform/alerting/alarms_ecs.tf
@@ -0,0 +1,45 @@
+resource "aws_cloudwatch_metric_alarm" "ecs_cpu_utilization" {
+ alarm_name = "${local.alarm_prefix} - ECS CPU Utilization"
+ alarm_description = "${local.alarm_prefix} - ECS CPU utilization is high (over ${var.ecs_cpu_threshold}%)"
+
+ namespace = module.cloudwatch.namespaces.ECS
+ dimensions = {
+ ClusterName = var.ecs_cluster_name
+ ServiceName = var.ecs_service_name
+ }
+ metric_name = module.cloudwatch.metrics.ECS.CPUUtilization
+
+ evaluation_periods = local.evaluation_periods
+ period = local.period
+
+ statistic = module.cloudwatch.statistics.Average
+ comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold
+ threshold = var.ecs_cpu_threshold
+ treat_missing_data = "breaching"
+
+ alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+ insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+}
+
+resource "aws_cloudwatch_metric_alarm" "ecs_mem_utilization" {
+ alarm_name = "${local.alarm_prefix} - ECS Memory Utilization"
+ alarm_description = "${local.alarm_prefix} - ECS Memory utilization is high (over ${var.ecs_memory_threshold}%)"
+
+ namespace = module.cloudwatch.namespaces.ECS
+ dimensions = {
+ ClusterName = var.ecs_cluster_name
+ ServiceName = var.ecs_service_name
+ }
+ metric_name = module.cloudwatch.metrics.ECS.MemoryUtilization
+
+ evaluation_periods = local.evaluation_periods
+ period = local.period
+
+ statistic = module.cloudwatch.statistics.Average
+ comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold
+ threshold = var.ecs_memory_threshold
+ treat_missing_data = "breaching"
+
+ alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+ insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+}
diff --git a/terraform/alerting/alarms_redis.tf b/terraform/alerting/alarms_redis.tf
new file mode 100644
index 0000000..89b4ebe
--- /dev/null
+++ b/terraform/alerting/alarms_redis.tf
@@ -0,0 +1,43 @@
+resource "aws_cloudwatch_metric_alarm" "redis_cpu_utilization" {
+ alarm_name = "${local.alarm_prefix} - Redis CPU Utilization"
+ alarm_description = "${local.alarm_prefix} - Redis CPU utilization is high (over ${var.redis_cpu_threshold}%)"
+
+ namespace = module.cloudwatch.namespaces.ElastiCache
+ dimensions = {
+ CacheClusterId = var.redis_cluster_id
+ }
+ metric_name = module.cloudwatch.metrics.ElastiCache.CPUUtilization
+
+ evaluation_periods = local.evaluation_periods
+ period = local.period
+
+ statistic = module.cloudwatch.statistics.Average
+ comparison_operator = module.cloudwatch.operators.GreaterThanOrEqualToThreshold
+ threshold = var.redis_cpu_threshold
+ treat_missing_data = "breaching"
+
+ alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+ insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+}
+
+resource "aws_cloudwatch_metric_alarm" "redis_available_memory" {
+ alarm_name = "${local.alarm_prefix} - Redis Available Memory"
+ alarm_description = "${local.alarm_prefix} - Redis available memory is low (less than ${var.redis_memory_threshold}GiB)"
+
+ namespace = module.cloudwatch.namespaces.ElastiCache
+ dimensions = {
+ CacheClusterId = var.redis_cluster_id
+ }
+ metric_name = module.cloudwatch.metrics.ElastiCache.FreeableMemory
+
+ evaluation_periods = local.evaluation_periods
+ period = local.period
+
+ statistic = module.cloudwatch.statistics.Average
+ comparison_operator = module.cloudwatch.operators.LessThanOrEqualToThreshold
+ threshold = var.redis_memory_threshold * pow(1000, 3)
+ treat_missing_data = "breaching"
+
+ alarm_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+ insufficient_data_actions = [aws_sns_topic.cloudwatch_webhook.arn]
+}
diff --git a/terraform/alerting/context.tf b/terraform/alerting/context.tf
new file mode 100644
index 0000000..da1c290
--- /dev/null
+++ b/terraform/alerting/context.tf
@@ -0,0 +1,179 @@
+module "this" {
+ source = "app.terraform.io/wallet-connect/label/null"
+ version = "0.3.2"
+
+ namespace = var.namespace
+ region = var.region
+ stage = var.stage
+ name = var.name
+ delimiter = var.delimiter
+ attributes = var.attributes
+ tags = var.tags
+ label_order = var.label_order
+ regex_replace_chars = var.regex_replace_chars
+ id_length_limit = var.id_length_limit
+ label_key_case = var.label_key_case
+ label_value_case = var.label_value_case
+
+ context = var.context
+}
+
+################################################################################
+# Copy contents of label/variables.tf here
+
+#tflint-ignore: terraform_standard_module_structure
+variable "context" {
+ type = any
+ description = <<-EOT
+ Single object for setting entire context at once.
+ See description of individual variables for details.
+ Leave string and numeric variables as `null` to use default value.
+ Individual variable settings (non-null) override settings in context object,
+ except for attributes and tags, which are merged.
+ EOT
+
+ validation {
+ condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+
+ validation {
+ condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "namespace" {
+ type = string
+ default = null
+ description = "ID element. Usually the organization name, i.e. 'walletconnect' to help ensure generated IDs are globally unique."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "region" {
+ type = string
+ default = null
+ description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "stage" {
+ type = string
+ default = null
+ description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "name" {
+ type = string
+ default = null
+ description = <<-EOT
+ ID element. Usually the component name.
+ This is the only ID element not also included as a `tag`.
+ The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "delimiter" {
+ type = string
+ default = null
+ description = <<-EOT
+ Delimiter to be used between ID elements.
+ Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "attributes" {
+ type = list(string)
+ default = []
+ description = <<-EOT
+ ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
+ in the order they appear in the list. New attributes are appended to the
+ end of the list. The elements of the list are joined by the `delimiter`
+ and treated as a single ID element.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "tags" {
+ type = map(string)
+ default = {}
+ description = "Additional tags."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_order" {
+ type = list(string)
+ default = null
+ description = <<-EOT
+ The order in which the labels (ID elements) appear in the `id`.
+ Defaults to ["namespace", "region", "stage", "name", "attributes"].
+ You can omit any of the 5 labels, but at least one must be present.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "regex_replace_chars" {
+ type = string
+ default = null
+ description = <<-EOT
+ Terraform regular expression (regex) string.
+ Characters matching the regex will be removed from the ID elements.
+ If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "id_length_limit" {
+ type = number
+ default = null
+ description = <<-EOT
+ Limit `id` to this many characters (minimum 6).
+ Set to `0` for unlimited length.
+ Set to `null` for keep the existing setting, which defaults to `0`.
+ Does not affect `id_full`.
+ EOT
+ validation {
+ condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
+ error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_key_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of the `tags` keys (label names) for tags generated by this module.
+ Does not affect keys of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper`.
+ Default value: `title`.
+ EOT
+
+ validation {
+ condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_value_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of ID elements (labels) as included in `id`,
+ set as tag values, and output by this module individually.
+ Does not affect values of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper` and `none` (no transformation).
+ Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
+ Default value: `lower`.
+ EOT
+
+ validation {
+ condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
diff --git a/terraform/alerting/main.tf b/terraform/alerting/main.tf
new file mode 100644
index 0000000..58da3d8
--- /dev/null
+++ b/terraform/alerting/main.tf
@@ -0,0 +1,39 @@
+module "cloudwatch" {
+ source = "app.terraform.io/wallet-connect/cloudwatch-constants/aws"
+ version = "1.0.0"
+}
+
+locals {
+ alarm_prefix = "${title(module.this.name)} - ${title(module.this.stage)}"
+ evaluation_periods = 1
+ period = 60 * 5
+}
+
+
+#tfsec:ignore:aws-sns-enable-topic-encryption
+resource "aws_sns_topic" "cloudwatch_webhook" {
+ name = "cloudwatch-webhook"
+ display_name = "CloudWatch Webhook forwarding to BetterUptime"
+}
+
+resource "aws_sns_topic_subscription" "cloudwatch_webhook" {
+ count = var.webhook_cloudwatch_p2 != "" ? 1 : 0
+
+ endpoint = var.webhook_cloudwatch_p2
+ protocol = "https"
+ topic_arn = aws_sns_topic.cloudwatch_webhook.arn
+}
+
+
+#tfsec:ignore:aws-sns-enable-topic-encryption
+resource "aws_sns_topic" "prometheus_webhook" {
+ name = "prometheus-webhook"
+ display_name = "Prometheus Webhook forwarding to BetterUptime"
+}
+
+resource "aws_sns_topic_subscription" "prometheus_webhook" {
+ count = var.webhook_prometheus_p2 != "" ? 1 : 0
+ endpoint = var.webhook_prometheus_p2
+ protocol = "https"
+ topic_arn = aws_sns_topic.prometheus_webhook.arn
+}
diff --git a/terraform/alerting/terraform.tf b/terraform/alerting/terraform.tf
new file mode 100644
index 0000000..f4c0a25
--- /dev/null
+++ b/terraform/alerting/terraform.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = "~> 1.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.7"
+ }
+ }
+}
diff --git a/terraform/alerting/variables.tf b/terraform/alerting/variables.tf
new file mode 100644
index 0000000..1b603f4
--- /dev/null
+++ b/terraform/alerting/variables.tf
@@ -0,0 +1,54 @@
+variable "webhook_cloudwatch_p2" {
+ description = "The URL of the webhook to be called on CloudWatch P2 alarms"
+ type = string
+}
+
+variable "webhook_prometheus_p2" {
+ description = "The URL of the webhook to be called on Prometheus P2 alarms"
+ type = string
+}
+
+#-------------------------------------------------------------------------------
+# ECS
+
+variable "ecs_cluster_name" {
+ description = "The name of the ECS cluster running the application"
+ type = string
+}
+
+variable "ecs_service_name" {
+ description = "The name of the ECS service running the application"
+ type = string
+}
+
+variable "ecs_cpu_threshold" {
+ description = "The ECS CPU utilization alarm threshold in percents"
+ type = number
+ default = 80
+}
+
+variable "ecs_memory_threshold" {
+ description = "The ECS memory utilization alarm threshold in percents"
+ type = number
+ default = 80
+}
+
+#-------------------------------------------------------------------------------
+# Redis
+
+variable "redis_cluster_id" {
+ description = "The Redis cluster ID"
+ type = string
+}
+
+variable "redis_cpu_threshold" {
+ description = "The Redis CPU utilization alarm threshold in percents"
+ type = number
+ default = 80
+}
+
+variable "redis_memory_threshold" {
+ description = "The Redis available memory alarm threshold in GiB"
+ type = number
+ default = 3
+}
diff --git a/terraform/context.tf b/terraform/context.tf
new file mode 100644
index 0000000..c678334
--- /dev/null
+++ b/terraform/context.tf
@@ -0,0 +1,23 @@
+module "stage" {
+ source = "app.terraform.io/wallet-connect/stage/null"
+ version = "0.1.0"
+ project = "verify-server"
+}
+
+locals {
+ stage = module.stage.stage
+}
+
+module "this" {
+ source = "app.terraform.io/wallet-connect/label/null"
+ version = "0.3.2"
+
+ namespace = "wc"
+ region = var.region
+ stage = local.stage
+ name = var.name
+
+ tags = {
+ Application = var.name
+ }
+}
diff --git a/terraform/ecs/README.md b/terraform/ecs/README.md
new file mode 100644
index 0000000..a5bc239
--- /dev/null
+++ b/terraform/ecs/README.md
@@ -0,0 +1,87 @@
+# `ecs` module
+
+This module creates an ECS cluster and an autoscaling group of EC2 instances to run the application.
+
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.0 |
+| [aws](#requirement\_aws) | ~> 5.7 |
+| [random](#requirement\_random) | 3.5.1 |
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | ~> 5.7 |
+| [random](#provider\_random) | 3.5.1 |
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [ecs\_cpu\_mem](#module\_ecs\_cpu\_mem) | app.terraform.io/wallet-connect/ecs_cpu_mem/aws | 1.0.0 |
+| [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 |
+
+## Inputs
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [allowed\_app\_ingress\_cidr\_blocks](#input\_allowed\_app\_ingress\_cidr\_blocks) | A list of CIDR blocks to allow ingress access to the application. | string
| n/a
| yes |
+| [allowed\_lb\_ingress\_cidr\_blocks](#input\_allowed\_lb\_ingress\_cidr\_blocks) | A list of CIDR blocks to allow ingress access to the load-balancer. | string
| n/a
| yes |
+| [analytics\_datalake\_bucket\_name](#input\_analytics\_datalake\_bucket\_name) | The name of the S3 bucket to use for the analytics datalake | string
| n/a
| yes |
+| [analytics\_datalake\_kms\_key\_arn](#input\_analytics\_datalake\_kms\_key\_arn) | The ARN of the KMS key to use with the datalake bucket | string
| n/a
| yes |
+| [autoscaling\_cpu\_scale\_in\_cooldown](#input\_autoscaling\_cpu\_scale\_in\_cooldown) | The cooldown period (in seconds) before a scale in is possible | number
| 180
| no |
+| [autoscaling\_cpu\_scale\_out\_cooldown](#input\_autoscaling\_cpu\_scale\_out\_cooldown) | The cooldown period (in seconds) before a scale out is possible | number
| 180
| no |
+| [autoscaling\_cpu\_target](#input\_autoscaling\_cpu\_target) | The target CPU utilization for the autoscaling group | number
| 50
| no |
+| [autoscaling\_desired\_count](#input\_autoscaling\_desired\_count) | Minimum number of instances in the autoscaling group | number
| 2
| no |
+| [autoscaling\_max\_capacity](#input\_autoscaling\_max\_capacity) | Maximum number of instances in the autoscaling group | number
| 8
| no |
+| [autoscaling\_memory\_scale\_in\_cooldown](#input\_autoscaling\_memory\_scale\_in\_cooldown) | The cooldown period (in seconds) before a scale in is possible | number
| 180
| no |
+| [autoscaling\_memory\_scale\_out\_cooldown](#input\_autoscaling\_memory\_scale\_out\_cooldown) | The cooldown period (in seconds) before a scale out is possible | number
| 180
| no |
+| [autoscaling\_memory\_target](#input\_autoscaling\_memory\_target) | The target memory utilization for the autoscaling group | number
| 50
| no |
+| [autoscaling\_min\_capacity](#input\_autoscaling\_min\_capacity) | Minimum number of instances in the autoscaling group | number
| 2
| no |
+| [cloudwatch\_logs\_key\_arn](#input\_cloudwatch\_logs\_key\_arn) | The ARN of the KMS key to use for encrypting CloudWatch logs | string
| n/a
| yes |
+| [cloudwatch\_retention\_in\_days](#input\_cloudwatch\_retention\_in\_days) | The number of days to retain CloudWatch logs for the DB instance | number
| 14
| no |
+| [context](#input\_context) | Single object for setting entire context at once.any
| n/a
| yes |
+| [ecr\_repository\_url](#input\_ecr\_repository\_url) | The URL of the ECR repository where the app image is stored | string
| n/a
| yes |
+| [geoip\_db\_bucket\_name](#input\_geoip\_db\_bucket\_name) | The name of the S3 bucket where the GeoIP database is stored | string
| n/a
| yes |
+| [geoip\_db\_key](#input\_geoip\_db\_key) | The key of the GeoIP database in the S3 bucket | string
| n/a
| yes |
+| [identity\_cache\_endpoint\_read](#input\_identity\_cache\_endpoint\_read) | The endpoint of the identity cache (read) | string
| n/a
| yes |
+| [identity\_cache\_endpoint\_write](#input\_identity\_cache\_endpoint\_write) | The endpoint of the identity cache (write) | string
| n/a
| yes |
+| [image\_version](#input\_image\_version) | The version of the app image to deploy | string
| n/a
| yes |
+| [infura\_project\_id](#input\_infura\_project\_id) | The project ID for Infura | string
| n/a
| yes |
+| [log\_level](#input\_log\_level) | The log level for the app | string
| n/a
| yes |
+| [ofac\_blocked\_countries](#input\_ofac\_blocked\_countries) | The list of countries under OFAC sanctions | string
| n/a
| yes |
+| [pokt\_project\_id](#input\_pokt\_project\_id) | The project ID for POKT | string
| n/a
| yes |
+| [port](#input\_port) | The port the app listens on | number
| n/a
| yes |
+| [private\_subnets](#input\_private\_subnets) | The IDs of the private subnets | list(string)
| n/a
| yes |
+| [project\_cache\_endpoint\_read](#input\_project\_cache\_endpoint\_read) | The endpoint of the project cache (read) | string
| n/a
| yes |
+| [project\_cache\_endpoint\_write](#input\_project\_cache\_endpoint\_write) | The endpoint of the project cache (write) | string
| n/a
| yes |
+| [project\_cache\_ttl](#input\_project\_cache\_ttl) | The TTL for project data cache | number
| n/a
| yes |
+| [prometheus\_endpoint](#input\_prometheus\_endpoint) | The endpoint of the Prometheus server to use for monitoring | string
| n/a
| yes |
+| [prometheus\_workspace\_id](#input\_prometheus\_workspace\_id) | The workspace ID of the Prometheus server used for monitoring | string
| n/a
| yes |
+| [public\_subnets](#input\_public\_subnets) | The IDs of the public subnets | list(string)
| n/a
| yes |
+| [redis\_max\_connections](#input\_redis\_max\_connections) | The maximum number of connections to the Redis server | number
| 128
| no |
+| [registry\_api\_auth\_token](#input\_registry\_api\_auth\_token) | The auth token for the registry API | string
| n/a
| yes |
+| [registry\_api\_endpoint](#input\_registry\_api\_endpoint) | The endpoint of the registry API | string
| n/a
| yes |
+| [route53\_zones](#input\_route53\_zones) | The FQDNs to use for the app | map(string)
| n/a
| yes |
+| [route53\_zones\_certificates](#input\_route53\_zones\_certificates) | The ARNs of the ACM certificates to use for HTTPS | map(string)
| n/a
| yes |
+| [task\_cpu](#input\_task\_cpu) | The number of CPU units to reserve for the container. | number
| n/a
| yes |
+| [task\_execution\_role\_name](#input\_task\_execution\_role\_name) | The name of the task execution role | string
| n/a
| yes |
+| [task\_memory](#input\_task\_memory) | The amount of memory (in MiB) to reserve for the container. | number
| n/a
| yes |
+| [vpc\_id](#input\_vpc\_id) | The ID of the VPC to deploy to | string
| n/a
| yes |
+| [zerion\_api\_key](#input\_zerion\_api\_key) | The API key for Zerion | string
| n/a
| yes |
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [ecs\_cluster\_name](#output\_ecs\_cluster\_name) | The name of the ECS cluster |
+| [ecs\_service\_name](#output\_ecs\_service\_name) | The name of the ECS service |
+| [ecs\_task\_family](#output\_ecs\_task\_family) | The family of the task definition |
+| [load\_balancer\_arn](#output\_load\_balancer\_arn) | The ARN of the load balancer |
+| [load\_balancer\_arn\_suffix](#output\_load\_balancer\_arn\_suffix) | The ARN suffix of the load balancer |
+| [service\_security\_group\_id](#output\_service\_security\_group\_id) | The ID of the security group for the service |
+| [target\_group\_arn](#output\_target\_group\_arn) | The ARN of the target group |
+
+
+
diff --git a/terraform/ecs/cluster.tf b/terraform/ecs/cluster.tf
new file mode 100644
index 0000000..2b0ed8f
--- /dev/null
+++ b/terraform/ecs/cluster.tf
@@ -0,0 +1,188 @@
+locals {
+ image = "${var.ecr_repository_url}:${var.image_version}"
+
+ otel_port = var.port + 1
+ otel_cpu = 128
+ otel_memory = 128
+
+ file_descriptor_soft_limit = pow(2, 18)
+ file_descriptor_hard_limit = local.file_descriptor_soft_limit * 2
+}
+
+module "ecs_cpu_mem" {
+ source = "app.terraform.io/wallet-connect/ecs_cpu_mem/aws"
+ version = "1.0.0"
+ cpu = var.task_cpu + local.otel_cpu
+ memory = var.task_memory + local.otel_memory
+}
+
+#-------------------------------------------------------------------------------
+# ECS Cluster
+
+resource "aws_ecs_cluster" "app_cluster" {
+ name = "${module.this.id}-cluster"
+
+ configuration {
+ execute_command_configuration {
+ logging = "OVERRIDE"
+
+ log_configuration {
+ cloud_watch_encryption_enabled = false
+ cloud_watch_log_group_name = aws_cloudwatch_log_group.cluster.name
+ }
+ }
+ }
+
+ # Exposes metrics such as the number of running tasks in CloudWatch
+ setting {
+ name = "containerInsights"
+ value = "enabled"
+ }
+}
+
+#-------------------------------------------------------------------------------
+# ECS Task definition
+
+resource "aws_ecs_task_definition" "app_task" {
+ family = module.this.id
+
+ requires_compatibilities = ["FARGATE"]
+ network_mode = "awsvpc"
+ cpu = module.ecs_cpu_mem.cpu
+ memory = module.ecs_cpu_mem.memory
+ execution_role_arn = data.aws_iam_role.ecs_task_execution_role.arn
+ task_role_arn = data.aws_iam_role.ecs_task_execution_role.arn
+
+ runtime_platform {
+ operating_system_family = "LINUX"
+ }
+
+ container_definitions = jsonencode([
+ {
+ name = module.this.id,
+ image = local.image,
+ cpu = var.task_cpu,
+ memory = var.task_memory,
+ essential = true,
+
+ environment = [
+ { name = "SECRET", value = var.app_secret },
+
+ { name = "PORT", value = tostring(var.port) },
+ { name = "PROMETHEUS_PORT", value = tostring(local.otel_port) },
+
+ { name = "LOG_LEVEL", value = var.log_level },
+
+ { name = "GEOIP_DB_BUCKET", value = var.geoip_db_bucket_name },
+ { name = "GEOIP_DB_KEY", value = var.geoip_db_key },
+
+ { name = "PROJECT_REGISTRY_URL", value = var.project_registry_api_url },
+ { name = "PROJECT_REGISTRY_AUTH_TOKEN", value = var.project_registry_api_auth_token },
+
+ { name = "DATA_API_URL", value = var.data_api_url },
+ { name = "DATA_API_AUTH_TOKEN", value = var.data_api_auth_token },
+
+ { name = "ATTESTATION_CACHE_URL", value = var.attestation_cache_url },
+ { name = "PROJECT_REGISTRY_CACHE_URL", value = var.project_registry_cache_url },
+ { name = "SCAM_GUARD_CACHE_URL", value = var.scam_guard_cache_url },
+
+ { name = "DATA_LAKE_BUCKET", value = var.analytics_datalake_bucket_name },
+
+ { name = "BLOCKED_COUNTRIES", value = var.ofac_blocked_countries },
+ ],
+
+ ulimits = [{
+ name : "nofile",
+ softLimit : local.file_descriptor_soft_limit,
+ hardLimit : local.file_descriptor_hard_limit
+ }],
+
+ portMappings = [
+ {
+ containerPort = var.port,
+ hostPort = var.port
+ },
+ {
+ containerPort = local.otel_port,
+ hostPort = local.otel_port
+ }
+ ],
+
+ logConfiguration : {
+ logDriver = "awslogs",
+ options = {
+ "awslogs-group" = aws_cloudwatch_log_group.cluster.name,
+ "awslogs-region" = module.this.region,
+ "awslogs-stream-prefix" = "ecs"
+ }
+ },
+
+ dependsOn = [
+ { containerName : "aws-otel-collector", condition : "START" },
+ ]
+ },
+
+ # Forward telemetry data to AWS CloudWatch
+ {
+ name = "aws-otel-collector",
+ image = "public.ecr.aws/aws-observability/aws-otel-collector:v0.31.0",
+ cpu = local.otel_cpu,
+ memory = local.otel_memory,
+ essential = true,
+
+ command = [
+ "--config=/etc/ecs/ecs-amp-xray-prometheus.yaml"
+ # Uncomment to enable debug logging in otel-collector
+ # "--set=service.telemetry.logs.level=DEBUG"
+ ],
+
+ environment = [
+ { name : "AWS_PROMETHEUS_SCRAPING_ENDPOINT", value : "0.0.0.0:${local.otel_port}" },
+ { name : "AWS_PROMETHEUS_ENDPOINT", value : "${var.prometheus_endpoint}api/v1/remote_write" },
+ { name : "AWS_REGION", value : module.this.region },
+ ],
+
+ logConfiguration = {
+ logDriver = "awslogs",
+ options = {
+ "awslogs-group" = aws_cloudwatch_log_group.otel.name,
+ "awslogs-region" = module.this.region,
+ "awslogs-stream-prefix" = "ecs"
+ }
+ }
+ },
+ ])
+}
+
+
+#-------------------------------------------------------------------------------
+# ECS Service
+
+resource "aws_ecs_service" "app_service" {
+ name = "${module.this.id}-service"
+ cluster = aws_ecs_cluster.app_cluster.id
+ task_definition = aws_ecs_task_definition.app_task.arn
+ launch_type = "FARGATE"
+ desired_count = var.autoscaling_desired_count
+ propagate_tags = "TASK_DEFINITION"
+
+ # Wait for the service deployment to succeed
+ wait_for_steady_state = true
+
+ network_configuration {
+ subnets = var.private_subnets
+ assign_public_ip = false
+ security_groups = [aws_security_group.app_ingress.id]
+ }
+
+ load_balancer {
+ target_group_arn = aws_lb_target_group.target_group.arn
+ container_name = aws_ecs_task_definition.app_task.family
+ container_port = var.port
+ }
+
+ # Allow external changes without Terraform plan difference
+ lifecycle {
+ ignore_changes = [desired_count]
+ }
+}
diff --git a/terraform/ecs/cluster_autoscaling.tf b/terraform/ecs/cluster_autoscaling.tf
new file mode 100644
index 0000000..d20ed76
--- /dev/null
+++ b/terraform/ecs/cluster_autoscaling.tf
@@ -0,0 +1,43 @@
+resource "aws_appautoscaling_target" "ecs_target" {
+ min_capacity = var.autoscaling_min_capacity
+ max_capacity = var.autoscaling_max_capacity
+ resource_id = "service/${aws_ecs_cluster.app_cluster.name}/${aws_ecs_service.app_service.name}"
+ scalable_dimension = "ecs:service:DesiredCount"
+ service_namespace = "ecs"
+}
+
+resource "aws_appautoscaling_policy" "ecs_target_cpu" {
+ name = "${module.this.id}-scaling-policy-cpu"
+ policy_type = "TargetTrackingScaling"
+ resource_id = aws_appautoscaling_target.ecs_target.resource_id
+ scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
+ service_namespace = aws_appautoscaling_target.ecs_target.service_namespace
+
+ target_tracking_scaling_policy_configuration {
+ predefined_metric_specification {
+ predefined_metric_type = "ECSServiceAverageCPUUtilization"
+ }
+ target_value = var.autoscaling_cpu_target
+ scale_in_cooldown = var.autoscaling_cpu_scale_in_cooldown
+ scale_out_cooldown = var.autoscaling_cpu_scale_out_cooldown
+ }
+ depends_on = [aws_appautoscaling_target.ecs_target]
+}
+
+resource "aws_appautoscaling_policy" "ecs_target_memory" {
+ name = "${module.this.id}-scaling-policy-memory"
+ policy_type = "TargetTrackingScaling"
+ resource_id = aws_appautoscaling_target.ecs_target.resource_id
+ scalable_dimension = aws_appautoscaling_target.ecs_target.scalable_dimension
+ service_namespace = aws_appautoscaling_target.ecs_target.service_namespace
+
+ target_tracking_scaling_policy_configuration {
+ predefined_metric_specification {
+ predefined_metric_type = "ECSServiceAverageMemoryUtilization"
+ }
+ target_value = var.autoscaling_memory_target
+ scale_in_cooldown = var.autoscaling_memory_scale_in_cooldown
+ scale_out_cooldown = var.autoscaling_memory_scale_out_cooldown
+ }
+ depends_on = [aws_appautoscaling_target.ecs_target]
+}
diff --git a/terraform/ecs/cluster_iam.tf b/terraform/ecs/cluster_iam.tf
new file mode 100644
index 0000000..572c4ef
--- /dev/null
+++ b/terraform/ecs/cluster_iam.tf
@@ -0,0 +1,118 @@
+#-------------------------------------------------------------------------------
+# Task execution role
+
+data "aws_iam_role" "ecs_task_execution_role" {
+ name = var.task_execution_role_name
+}
+
+# GeoIP Bucket Access
+resource "aws_iam_policy" "geoip_bucket_access" {
+ name = "${module.this.id}-geoip-bucket_access"
+ path = "/"
+ description = "Allows ${module.this.id} to read from ${var.geoip_db_bucket_name}"
+
+ policy = jsonencode({
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Sid" : "ListObjectsInGeoipBucket",
+ "Effect" : "Allow",
+ "Action" : ["s3:ListBucket"],
+ "Resource" : ["arn:aws:s3:::${var.geoip_db_bucket_name}"]
+ },
+ {
+ "Sid" : "AllObjectActionsInGeoipBucket",
+ "Effect" : "Allow",
+ "Action" : ["s3:CopyObject", "s3:GetObject", "s3:HeadObject"],
+ "Resource" : ["arn:aws:s3:::${var.geoip_db_bucket_name}/*"]
+ },
+ ]
+ })
+}
+resource "aws_iam_role_policy_attachment" "geoip_bucket_access" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = aws_iam_policy.geoip_bucket_access.arn
+}
+
+# Analytics Bucket Access
+resource "aws_iam_policy" "datalake_bucket_access" {
+ name = "${module.this.id}-analytics-bucket_access"
+ path = "/"
+ description = "Allows ${module.this.id} to read/write from ${var.analytics_datalake_bucket_name}"
+
+ policy = jsonencode({
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Sid" : "ListObjectsInAnalyticsBucket",
+ "Effect" : "Allow",
+ "Action" : ["s3:ListBucket"],
+ "Resource" : ["arn:aws:s3:::${var.analytics_datalake_bucket_name}"]
+ },
+ {
+ "Sid" : "AllObjectActionsInAnalyticsBucket",
+ "Effect" : "Allow",
+ "Action" : "s3:*Object",
+ "Resource" : ["arn:aws:s3:::${var.analytics_datalake_bucket_name}/${module.this.name}}/*"]
+ },
+ {
+ "Sid" : "AllGenerateDataKeyForAnalyticsBucket",
+ "Effect" : "Allow",
+ "Action" : ["kms:GenerateDataKey"],
+ "Resource" : [var.analytics_datalake_kms_key_arn]
+ }
+ ]
+ })
+}
+resource "aws_iam_role_policy_attachment" "datalake_bucket_access" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = aws_iam_policy.datalake_bucket_access.arn
+}
+
+resource "aws_iam_role_policy_attachment" "ecs_task_execution_role_policy" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
+}
+
+resource "aws_iam_role_policy_attachment" "cloudwatch_write_policy" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess"
+}
+
+resource "aws_iam_role_policy_attachment" "prometheus_write_policy" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = "arn:aws:iam::aws:policy/AmazonPrometheusRemoteWriteAccess"
+}
+
+resource "aws_iam_role_policy_attachment" "ssm_read_only_policy" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess"
+}
+
+#tfsec:ignore:aws-iam-no-policy-wildcards
+resource "aws_iam_policy" "otel" {
+ name = "${module.this.id}-otel"
+ path = "/"
+
+ policy = jsonencode({
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "logs:PutLogEvents",
+ "logs:CreateLogGroup",
+ "logs:CreateLogStream",
+ "logs:DescribeLogStreams",
+ "logs:DescribeLogGroups",
+ "ssm:GetParameters",
+ ],
+ "Resource" : ["*"]
+ },
+ ]
+ })
+}
+resource "aws_iam_role_policy_attachment" "ecs_task_execution_fetch_ghcr_secret_policy" {
+ role = data.aws_iam_role.ecs_task_execution_role.name
+ policy_arn = aws_iam_policy.otel.arn
+}
diff --git a/terraform/ecs/cluster_logs.tf b/terraform/ecs/cluster_logs.tf
new file mode 100644
index 0000000..f5ef4c4
--- /dev/null
+++ b/terraform/ecs/cluster_logs.tf
@@ -0,0 +1,17 @@
+resource "aws_cloudwatch_log_group" "cluster" {
+ name = "${module.this.id}-app-logs"
+ kms_key_id = var.cloudwatch_logs_key_arn
+ retention_in_days = var.cloudwatch_retention_in_days
+}
+
+resource "aws_cloudwatch_log_group" "otel" {
+ name = "${module.this.id}-aws-otel-sidecar-collector"
+ kms_key_id = var.cloudwatch_logs_key_arn
+ retention_in_days = var.cloudwatch_retention_in_days
+}
+
+resource "aws_cloudwatch_log_group" "prometheus_proxy" {
+ name = "${module.this.id}-sigv4-prometheus-proxy"
+ kms_key_id = var.cloudwatch_logs_key_arn
+ retention_in_days = var.cloudwatch_retention_in_days
+}
diff --git a/terraform/ecs/context.tf b/terraform/ecs/context.tf
new file mode 100644
index 0000000..da1c290
--- /dev/null
+++ b/terraform/ecs/context.tf
@@ -0,0 +1,179 @@
+module "this" {
+ source = "app.terraform.io/wallet-connect/label/null"
+ version = "0.3.2"
+
+ namespace = var.namespace
+ region = var.region
+ stage = var.stage
+ name = var.name
+ delimiter = var.delimiter
+ attributes = var.attributes
+ tags = var.tags
+ label_order = var.label_order
+ regex_replace_chars = var.regex_replace_chars
+ id_length_limit = var.id_length_limit
+ label_key_case = var.label_key_case
+ label_value_case = var.label_value_case
+
+ context = var.context
+}
+
+################################################################################
+# Copy contents of label/variables.tf here
+
+#tflint-ignore: terraform_standard_module_structure
+variable "context" {
+ type = any
+ description = <<-EOT
+ Single object for setting entire context at once.
+ See description of individual variables for details.
+ Leave string and numeric variables as `null` to use default value.
+ Individual variable settings (non-null) override settings in context object,
+ except for attributes and tags, which are merged.
+ EOT
+
+ validation {
+ condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+
+ validation {
+ condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "namespace" {
+ type = string
+ default = null
+ description = "ID element. Usually the organization name, i.e. 'walletconnect' to help ensure generated IDs are globally unique."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "region" {
+ type = string
+ default = null
+ description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "stage" {
+ type = string
+ default = null
+ description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "name" {
+ type = string
+ default = null
+ description = <<-EOT
+ ID element. Usually the component name.
+ This is the only ID element not also included as a `tag`.
+ The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "delimiter" {
+ type = string
+ default = null
+ description = <<-EOT
+ Delimiter to be used between ID elements.
+ Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "attributes" {
+ type = list(string)
+ default = []
+ description = <<-EOT
+ ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
+ in the order they appear in the list. New attributes are appended to the
+ end of the list. The elements of the list are joined by the `delimiter`
+ and treated as a single ID element.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "tags" {
+ type = map(string)
+ default = {}
+ description = "Additional tags."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_order" {
+ type = list(string)
+ default = null
+ description = <<-EOT
+ The order in which the labels (ID elements) appear in the `id`.
+ Defaults to ["namespace", "region", "stage", "name", "attributes"].
+ You can omit any of the 5 labels, but at least one must be present.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "regex_replace_chars" {
+ type = string
+ default = null
+ description = <<-EOT
+ Terraform regular expression (regex) string.
+ Characters matching the regex will be removed from the ID elements.
+ If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "id_length_limit" {
+ type = number
+ default = null
+ description = <<-EOT
+ Limit `id` to this many characters (minimum 6).
+ Set to `0` for unlimited length.
+ Set to `null` for keep the existing setting, which defaults to `0`.
+ Does not affect `id_full`.
+ EOT
+ validation {
+ condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
+ error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_key_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of the `tags` keys (label names) for tags generated by this module.
+ Does not affect keys of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper`.
+ Default value: `title`.
+ EOT
+
+ validation {
+ condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_value_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of ID elements (labels) as included in `id`,
+ set as tag values, and output by this module individually.
+ Does not affect values of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper` and `none` (no transformation).
+ Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
+ Default value: `lower`.
+ EOT
+
+ validation {
+ condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
diff --git a/terraform/ecs/dns.tf b/terraform/ecs/dns.tf
new file mode 100644
index 0000000..aaa95cd
--- /dev/null
+++ b/terraform/ecs/dns.tf
@@ -0,0 +1,14 @@
+# DNS Records
+resource "aws_route53_record" "dns_load_balancer" {
+ for_each = var.route53_zones
+
+ zone_id = each.key
+ name = each.value
+ type = "A"
+
+ alias {
+ name = aws_lb.load_balancer.dns_name
+ zone_id = aws_lb.load_balancer.zone_id
+ evaluate_target_health = true
+ }
+}
diff --git a/terraform/ecs/main.tf b/terraform/ecs/main.tf
new file mode 100644
index 0000000..fb5425f
--- /dev/null
+++ b/terraform/ecs/main.tf
@@ -0,0 +1,3 @@
+resource "random_pet" "this" {
+ length = 2
+}
diff --git a/terraform/ecs/network.tf b/terraform/ecs/network.tf
new file mode 100644
index 0000000..f488cdd
--- /dev/null
+++ b/terraform/ecs/network.tf
@@ -0,0 +1,161 @@
+locals {
+ lb_name = trimsuffix(substr(replace("${module.this.id}-${random_pet.this.id}", "_", "-"), 0, 32), "-")
+}
+
+#tfsec:ignore:aws-elb-drop-invalid-headers
+#tfsec:ignore:aws-elb-alb-not-public
+resource "aws_lb" "load_balancer" {
+ name = local.lb_name
+ load_balancer_type = "application"
+ subnets = var.public_subnets
+
+ security_groups = [aws_security_group.lb_ingress.id]
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+locals {
+ main_certificate_key = keys(var.route53_zones_certificates)[0]
+ main_certificate = var.route53_zones_certificates[local.main_certificate_key]
+ additional_certificates = { for k, v in var.route53_zones_certificates : k => v if k != local.main_certificate_key }
+}
+
+resource "aws_lb_listener" "listener-https" {
+ load_balancer_arn = aws_lb.load_balancer.arn
+ port = "443"
+ protocol = "HTTPS"
+ certificate_arn = local.main_certificate
+ ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06"
+
+ default_action {
+ type = "forward"
+ target_group_arn = aws_lb_target_group.target_group.arn
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_lb_listener_certificate" "listener-https" {
+ for_each = local.additional_certificates
+ listener_arn = aws_lb_listener.listener-https.arn
+ certificate_arn = each.value
+}
+
+resource "aws_lb_listener" "listener-http" {
+ load_balancer_arn = aws_lb.load_balancer.arn
+ port = "80"
+ protocol = "HTTP"
+
+ default_action {
+ type = "redirect"
+
+ redirect {
+ port = "443"
+ protocol = "HTTPS"
+ status_code = "HTTP_301"
+ }
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+resource "aws_lb_target_group" "target_group" {
+ name = local.lb_name
+ port = var.port
+ protocol = "HTTP"
+ target_type = "ip"
+ vpc_id = var.vpc_id
+ slow_start = 30
+
+ health_check {
+ protocol = "HTTP"
+ path = "/health" # KeysServer's health path
+ port = var.port
+ interval = 15
+ timeout = 10
+ healthy_threshold = 2
+ unhealthy_threshold = 2
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+# Security Groups
+
+#tfsec:ignore:aws-ec2-no-public-ingress-sgr
+resource "aws_security_group" "lb_ingress" {
+ name = "${local.lb_name}-lb-ingress"
+ description = "Allow app port ingress from vpc"
+ vpc_id = var.vpc_id
+
+ ingress {
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow HTTPS traffic from anywhere"
+ }
+
+ ingress {
+ from_port = 80
+ to_port = 80
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow HTTP traffic from anywhere"
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = [var.allowed_lb_ingress_cidr_blocks]
+ description = "Allow traffic out to all VPC IP addresses"
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
+
+#tfsec:ignore:aws-ec2-no-public-egress-sgr
+resource "aws_security_group" "app_ingress" {
+ name = "${local.lb_name}-app-ingress"
+ description = "Allow app port ingress"
+ vpc_id = var.vpc_id
+
+ ingress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ security_groups = [aws_security_group.lb_ingress.id]
+ description = "Allow traffic from load balancer"
+ }
+
+ ingress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = [var.allowed_app_ingress_cidr_blocks]
+ description = "Allow traffic from allowed CIDR blocks"
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow traffic out to all IP addresses"
+ }
+
+ lifecycle {
+ create_before_destroy = true
+ }
+}
diff --git a/terraform/ecs/outputs.tf b/terraform/ecs/outputs.tf
new file mode 100644
index 0000000..8036e7a
--- /dev/null
+++ b/terraform/ecs/outputs.tf
@@ -0,0 +1,34 @@
+output "ecs_cluster_name" {
+ description = "The name of the ECS cluster"
+ value = aws_ecs_cluster.app_cluster.name
+}
+
+output "ecs_service_name" {
+ description = "The name of the ECS service"
+ value = aws_ecs_service.app_service.name
+}
+
+output "ecs_task_family" {
+ description = "The family of the task definition"
+ value = aws_ecs_task_definition.app_task.family
+}
+
+output "service_security_group_id" {
+ description = "The ID of the security group for the service"
+ value = aws_security_group.app_ingress.id
+}
+
+output "target_group_arn" {
+ description = "The ARN of the target group"
+ value = aws_lb_target_group.target_group.arn
+}
+
+output "load_balancer_arn" {
+ description = "The ARN of the load balancer"
+ value = aws_lb.load_balancer.arn
+}
+
+output "load_balancer_arn_suffix" {
+ description = "The ARN suffix of the load balancer"
+ value = aws_lb.load_balancer.arn_suffix
+}
diff --git a/terraform/ecs/terraform.tf b/terraform/ecs/terraform.tf
new file mode 100644
index 0000000..692cc58
--- /dev/null
+++ b/terraform/ecs/terraform.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = "~> 1.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.7"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "3.5.1"
+ }
+ }
+}
diff --git a/terraform/ecs/variables.tf b/terraform/ecs/variables.tf
new file mode 100644
index 0000000..2205d2a
--- /dev/null
+++ b/terraform/ecs/variables.tf
@@ -0,0 +1,231 @@
+#-------------------------------------------------------------------------------
+# Cluster
+
+variable "ecr_repository_url" {
+ description = "The URL of the ECR repository where the app image is stored"
+ type = string
+}
+
+variable "image_version" {
+ description = "The version of the app image to deploy"
+ type = string
+}
+
+variable "task_execution_role_name" {
+ description = "The name of the task execution role"
+ type = string
+}
+
+variable "task_cpu" {
+ description = "The number of CPU units to reserve for the container."
+ type = number
+}
+
+variable "task_memory" {
+ description = "The amount of memory (in MiB) to reserve for the container."
+ type = number
+}
+
+variable "autoscaling_desired_count" {
+ description = "Minimum number of instances in the autoscaling group"
+ type = number
+ default = 2
+}
+
+variable "autoscaling_min_capacity" {
+ description = "Minimum number of instances in the autoscaling group"
+ type = number
+ default = 2
+}
+
+variable "autoscaling_max_capacity" {
+ description = "Maximum number of instances in the autoscaling group"
+ type = number
+ default = 8
+}
+
+variable "cloudwatch_logs_key_arn" {
+ description = "The ARN of the KMS key to use for encrypting CloudWatch logs"
+ type = string
+}
+
+variable "cloudwatch_retention_in_days" {
+ description = "The number of days to retain CloudWatch logs for the DB instance"
+ type = number
+ default = 14
+}
+
+#-------------------------------------------------------------------------------
+# DNS
+
+variable "route53_zones" {
+ description = "The FQDNs to use for the app"
+ type = map(string)
+}
+
+variable "route53_zones_certificates" {
+ description = "The ARNs of the ACM certificates to use for HTTPS"
+ type = map(string)
+}
+
+#-------------------------------------------------------------------------------
+# Network
+
+variable "vpc_id" {
+ description = "The ID of the VPC to deploy to"
+ type = string
+}
+
+variable "public_subnets" {
+ description = "The IDs of the public subnets"
+ type = list(string)
+}
+
+variable "private_subnets" {
+ description = "The IDs of the private subnets"
+ type = list(string)
+}
+
+variable "allowed_app_ingress_cidr_blocks" {
+ description = "A list of CIDR blocks to allow ingress access to the application."
+ type = string
+}
+
+variable "allowed_lb_ingress_cidr_blocks" {
+ description = "A list of CIDR blocks to allow ingress access to the load-balancer."
+ type = string
+}
+
+#-------------------------------------------------------------------------------
+# Application
+
+variable "app_secret" {
+ description = "The application secret"
+ type = string
+ sensitive = true
+}
+
+variable "port" {
+ description = "The port the app listens on"
+ type = number
+}
+
+variable "log_level" {
+ description = "The log level for the app"
+ type = string
+}
+
+variable "project_registry_api_url" {
+ description = "The url of the project registry API"
+ type = string
+}
+
+variable "project_registry_api_auth_token" {
+ description = "The auth token for the project registry API"
+ type = string
+ sensitive = true
+}
+
+variable "data_api_url" {
+ description = "The url of the data API"
+ type = string
+}
+
+variable "data_api_auth_token" {
+ description = "The auth token for the data API"
+ type = string
+ sensitive = true
+}
+
+variable "attestation_cache_url" {
+ description = "The endpoint of the attestation cache"
+ type = string
+}
+
+variable "project_registry_cache_url" {
+ description = "The url of the project registry cache"
+ type = string
+}
+
+variable "scam_guard_cache_url" {
+ description = "The url of the scam guard cache"
+ type = string
+}
+
+variable "ofac_blocked_countries" {
+ description = "The list of countries under OFAC sanctions"
+ type = string
+}
+
+#-------------------------------------------------------------------------------
+# Analytics
+
+variable "analytics_datalake_bucket_name" {
+ description = "The name of the S3 bucket to use for the analytics datalake"
+ type = string
+}
+
+variable "analytics_datalake_kms_key_arn" {
+ description = "The ARN of the KMS key to use with the datalake bucket"
+ type = string
+}
+
+#-------------------------------------------------------------------------------
+# Autoscaling
+
+variable "autoscaling_cpu_target" {
+ description = "The target CPU utilization for the autoscaling group"
+ type = number
+ default = 50
+}
+
+variable "autoscaling_cpu_scale_in_cooldown" {
+ description = "The cooldown period (in seconds) before a scale in is possible"
+ type = number
+ default = 180
+}
+
+variable "autoscaling_cpu_scale_out_cooldown" {
+ description = "The cooldown period (in seconds) before a scale out is possible"
+ type = number
+ default = 180
+}
+
+variable "autoscaling_memory_target" {
+ description = "The target memory utilization for the autoscaling group"
+ type = number
+ default = 50
+}
+
+variable "autoscaling_memory_scale_in_cooldown" {
+ description = "The cooldown period (in seconds) before a scale in is possible"
+ type = number
+ default = 180
+}
+
+variable "autoscaling_memory_scale_out_cooldown" {
+ description = "The cooldown period (in seconds) before a scale out is possible"
+ type = number
+ default = 180
+}
+
+#-------------------------------------------------------------------------------
+# Monitoring
+
+variable "prometheus_endpoint" {
+ description = "The endpoint of the Prometheus server to use for monitoring"
+ type = string
+}
+
+#---------------------------------------
+# GeoIP
+
+variable "geoip_db_bucket_name" {
+ description = "The name of the S3 bucket where the GeoIP database is stored"
+ type = string
+}
+
+variable "geoip_db_key" {
+ description = "The key of the GeoIP database in the S3 bucket"
+ type = string
+}
diff --git a/terraform/inputs.tf b/terraform/inputs.tf
new file mode 100644
index 0000000..a5298e4
--- /dev/null
+++ b/terraform/inputs.tf
@@ -0,0 +1,39 @@
+data "terraform_remote_state" "org" {
+ backend = "remote"
+ config = {
+ organization = "wallet-connect"
+ workspaces = {
+ name = "aws-org"
+ }
+ }
+}
+
+data "terraform_remote_state" "datalake" {
+ backend = "remote"
+ config = {
+ organization = "wallet-connect"
+ workspaces = {
+ name = "datalake-${module.stage.dev ? "staging" : local.stage}"
+ }
+ }
+}
+
+data "terraform_remote_state" "infra_aws" {
+ backend = "remote"
+ config = {
+ organization = "wallet-connect"
+ workspaces = {
+ name = "infra-aws"
+ }
+ }
+}
+
+data "terraform_remote_state" "monitoring" {
+ backend = "remote"
+ config = {
+ organization = "wallet-connect"
+ workspaces = {
+ name = "monitoring"
+ }
+ }
+}
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 0000000..e950eaa
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,48 @@
+data "aws_caller_identity" "this" {}
+
+resource "random_pet" "this" {
+ length = 2
+}
+
+locals {
+ ecr_repository_url = module.stage.dev ? data.terraform_remote_state.org.outputs.accounts.sdlc.dev.ecr-urls.verify : data.terraform_remote_state.org.outputs.accounts.wl.verify[local.stage].ecr-url
+}
+
+resource "aws_kms_key" "cloudwatch_logs" {
+ description = "KMS key for encrypting CloudWatch Logs"
+ enable_key_rotation = true
+ policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Sid = "Enable IAM User Permissions"
+ Effect = "Allow"
+ Principal = {
+ AWS = data.aws_caller_identity.this.account_id
+ }
+ Action = "kms:*"
+ Resource = "*"
+ },
+ {
+ Sid = "AllowCloudWatchLogs"
+ Effect = "Allow"
+ Principal = {
+ Service = "logs.${module.this.region}.amazonaws.com"
+ }
+ Action = [
+ "kms:Encrypt*",
+ "kms:Decrypt*",
+ "kms:ReEncrypt*",
+ "kms:GenerateDataKey*",
+ "kms:Describe*"
+ ]
+ Resource = "*"
+ },
+ ]
+ })
+}
+
+resource "aws_kms_alias" "cloudwatch_logs" {
+ name = "alias/${module.this.id}-cloudwatch-logs"
+ target_key_id = aws_kms_key.cloudwatch_logs.key_id
+}
diff --git a/terraform/monitoring/context.tf b/terraform/monitoring/context.tf
new file mode 100644
index 0000000..da1c290
--- /dev/null
+++ b/terraform/monitoring/context.tf
@@ -0,0 +1,179 @@
+module "this" {
+ source = "app.terraform.io/wallet-connect/label/null"
+ version = "0.3.2"
+
+ namespace = var.namespace
+ region = var.region
+ stage = var.stage
+ name = var.name
+ delimiter = var.delimiter
+ attributes = var.attributes
+ tags = var.tags
+ label_order = var.label_order
+ regex_replace_chars = var.regex_replace_chars
+ id_length_limit = var.id_length_limit
+ label_key_case = var.label_key_case
+ label_value_case = var.label_value_case
+
+ context = var.context
+}
+
+################################################################################
+# Copy contents of label/variables.tf here
+
+#tflint-ignore: terraform_standard_module_structure
+variable "context" {
+ type = any
+ description = <<-EOT
+ Single object for setting entire context at once.
+ See description of individual variables for details.
+ Leave string and numeric variables as `null` to use default value.
+ Individual variable settings (non-null) override settings in context object,
+ except for attributes and tags, which are merged.
+ EOT
+
+ validation {
+ condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+
+ validation {
+ condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "namespace" {
+ type = string
+ default = null
+ description = "ID element. Usually the organization name, i.e. 'walletconnect' to help ensure generated IDs are globally unique."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "region" {
+ type = string
+ default = null
+ description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "stage" {
+ type = string
+ default = null
+ description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "name" {
+ type = string
+ default = null
+ description = <<-EOT
+ ID element. Usually the component name.
+ This is the only ID element not also included as a `tag`.
+ The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "delimiter" {
+ type = string
+ default = null
+ description = <<-EOT
+ Delimiter to be used between ID elements.
+ Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "attributes" {
+ type = list(string)
+ default = []
+ description = <<-EOT
+ ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
+ in the order they appear in the list. New attributes are appended to the
+ end of the list. The elements of the list are joined by the `delimiter`
+ and treated as a single ID element.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "tags" {
+ type = map(string)
+ default = {}
+ description = "Additional tags."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_order" {
+ type = list(string)
+ default = null
+ description = <<-EOT
+ The order in which the labels (ID elements) appear in the `id`.
+ Defaults to ["namespace", "region", "stage", "name", "attributes"].
+ You can omit any of the 5 labels, but at least one must be present.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "regex_replace_chars" {
+ type = string
+ default = null
+ description = <<-EOT
+ Terraform regular expression (regex) string.
+ Characters matching the regex will be removed from the ID elements.
+ If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "id_length_limit" {
+ type = number
+ default = null
+ description = <<-EOT
+ Limit `id` to this many characters (minimum 6).
+ Set to `0` for unlimited length.
+ Set to `null` for keep the existing setting, which defaults to `0`.
+ Does not affect `id_full`.
+ EOT
+ validation {
+ condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
+ error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_key_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of the `tags` keys (label names) for tags generated by this module.
+ Does not affect keys of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper`.
+ Default value: `title`.
+ EOT
+
+ validation {
+ condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_value_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of ID elements (labels) as included in `id`,
+ set as tag values, and output by this module individually.
+ Does not affect values of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper` and `none` (no transformation).
+ Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
+ Default value: `lower`.
+ EOT
+
+ validation {
+ condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
diff --git a/terraform/monitoring/dashboard.jsonnet b/terraform/monitoring/dashboard.jsonnet
new file mode 100644
index 0000000..3075689
--- /dev/null
+++ b/terraform/monitoring/dashboard.jsonnet
@@ -0,0 +1,80 @@
+local grafana = import 'grafonnet-lib/grafana.libsonnet';
+local panels = import 'panels/panels.libsonnet';
+
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local annotation = grafana.annotation;
+local layout = grafana.layout;
+
+local ds = {
+ prometheus: {
+ type: 'prometheus',
+ uid: std.extVar('prometheus_uid'),
+ },
+ cloudwatch: {
+ type: 'cloudwatch',
+ uid: std.extVar('cloudwatch_uid'),
+ },
+};
+local vars = {
+ namespace: 'Verify',
+ environment: std.extVar('environment'),
+ notifications: std.parseJson(std.extVar('notifications')),
+
+ ecs_service_name: std.extVar('ecs_service_name'),
+ redis_cluster_id: std.extVar('redis_cluster_id'),
+ load_balancer: std.extVar('load_balancer'),
+ target_group: std.extVar('target_group'),
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+local height = 8;
+local pos = grafana.layout.pos(height);
+
+////////////////////////////////////////////////////////////////////////////////
+
+dashboard.new(
+ title = std.extVar('dashboard_title'),
+ uid = std.extVar('dashboard_uid'),
+ editable = true,
+ graphTooltip = dashboard.graphTooltips.sharedCrosshair,
+ timezone = dashboard.timezones.utc,
+)
+.addAnnotation(
+ annotation.new(
+ target = {
+ limit: 100,
+ matchAny: false,
+ tags: [],
+ type: 'dashboard',
+ },
+ )
+)
+
+.addPanels(layout.generate_grid([
+ //////////////////////////////////////////////////////////////////////////////
+ row.new('HTTP Server'),
+ panels.http.response_status(ds, vars) { gridPos: pos.one_third },
+ panels.http.request_response(ds, vars) { gridPos: pos.two_thirds },
+ panels.http.latency_quantiles(ds, vars) { gridPos: pos.one_third },
+ panels.http.average_latency(ds, vars) { gridPos: pos.two_thirds },
+
+ //////////////////////////////////////////////////////////////////////////////
+ row.new('ECS'),
+ panels.ecs.cpu(ds, vars) { gridPos: pos._2 },
+ panels.ecs.memory(ds, vars) { gridPos: pos._2 },
+
+ //////////////////////////////////////////////////////////////////////////////
+ row.new('Project Registry'),
+ panels.registry.requests(ds, vars) { gridPos: pos._3 },
+ panels.registry.cache_read(ds, vars) { gridPos: pos._3 },
+ panels.registry.cache_write(ds, vars) { gridPos: pos._3 },
+
+ //////////////////////////////////////////////////////////////////////////////
+ row.new('Redis'),
+ panels.redis.cpu(ds, vars) { gridPos: pos._2 },
+ panels.redis.reads(ds, vars) { gridPos: pos._2 },
+ panels.redis.memory(ds, vars) { gridPos: pos._2 },
+ panels.redis.writes(ds, vars) { gridPos: pos._2 },
+]))
diff --git a/terraform/monitoring/data_sources.tf b/terraform/monitoring/data_sources.tf
new file mode 100644
index 0000000..473b245
--- /dev/null
+++ b/terraform/monitoring/data_sources.tf
@@ -0,0 +1,34 @@
+module "monitoring-role" {
+ source = "app.terraform.io/wallet-connect/monitoring-role/aws"
+ version = "1.0.2"
+ context = module.this
+ remote_role_arn = var.monitoring_role_arn
+}
+
+resource "grafana_data_source" "prometheus" {
+ type = "prometheus"
+ name = "${module.this.stage}-${module.this.name}-amp"
+ url = var.prometheus_endpoint
+
+ json_data_encoded = jsonencode({
+ httpMethod = "GET"
+ sigV4Auth = true
+ sigV4AuthType = "ec2_iam_role"
+ sigV4Region = module.this.region
+ sigV4AssumeRoleArn = module.monitoring-role.iam_role_arn
+ })
+
+ depends_on = [module.monitoring-role]
+}
+
+resource "grafana_data_source" "cloudwatch" {
+ type = "cloudwatch"
+ name = "${module.this.stage}-${module.this.name}-cloudwatch"
+
+ json_data_encoded = jsonencode({
+ defaultRegion = module.this.region
+ assumeRoleArn = module.monitoring-role.iam_role_arn
+ })
+
+ depends_on = [module.monitoring-role]
+}
diff --git a/terraform/monitoring/grafana-dashboard.json.tpl b/terraform/monitoring/grafana-dashboard.json.tpl
new file mode 100644
index 0000000..8f851c3
--- /dev/null
+++ b/terraform/monitoring/grafana-dashboard.json.tpl
@@ -0,0 +1,480 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 44,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+//
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 17
+ },
+ "id": 8,
+ "panels": [],
+ "title": "Project Registry",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "errors"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "total"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 0,
+ "y": 18
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "sum(rate(project_registry_errors[1m]))",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "errors",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "sum(rate(project_registry_requests[1m]))",
+ "interval": "",
+ "legendFormat": "total",
+ "refId": "A"
+ }
+ ],
+ "title": "Requests / s",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "errors"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "hits"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 8,
+ "y": 18
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "rate(project_registry_cache_errors[1m]) or vector(0)",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "errors",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "sum(rate(project_registry_cache_misses[1m]))",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "misses",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "sum(rate(project_registry_cache_hits[1m]))",
+ "interval": "",
+ "legendFormat": "hits",
+ "refId": "A"
+ }
+ ],
+ "title": "Cache reads / s",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "errors"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "writes"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 8,
+ "x": 16,
+ "y": 18
+ },
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom"
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "rate(project_registry_cache_write_errors[1m]) or vector(0)",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "errors",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${prometheus_data_source_uid}"
+
+ },
+ "exemplar": true,
+ "expr": "sum(rate(project_registry_cache_writes[1m]))",
+ "interval": "",
+ "legendFormat": "writes",
+ "refId": "A"
+ }
+ ],
+ "title": "Cache writes / s",
+ "type": "timeseries"
+ },
+ ],
+ "refresh": false,
+ "schemaVersion": 35,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "${environment}_verify",
+ "uid": "${environment}_verify",
+ "version": 10,
+ "weekStart": ""
+}
diff --git a/terraform/monitoring/grafonnet-lib b/terraform/monitoring/grafonnet-lib
new file mode 160000
index 0000000..b085843
--- /dev/null
+++ b/terraform/monitoring/grafonnet-lib
@@ -0,0 +1 @@
+Subproject commit b085843dd64a0c8426f6f5cbd616fc1438379235
diff --git a/terraform/monitoring/main.tf b/terraform/monitoring/main.tf
new file mode 100644
index 0000000..dbbd06a
--- /dev/null
+++ b/terraform/monitoring/main.tf
@@ -0,0 +1,23 @@
+data "jsonnet_file" "dashboard" {
+ source = "${path.module}/dashboard.jsonnet"
+
+ ext_str = {
+ dashboard_title = "Verify Server - ${title(module.this.stage)}"
+ dashboard_uid = "verify-${module.this.stage}"
+
+ prometheus_uid = grafana_data_source.prometheus.uid
+ cloudwatch_uid = grafana_data_source.cloudwatch.uid
+
+ environment = module.this.stage
+ notifications = jsonencode(var.notification_channels)
+
+ ecs_service_name = var.ecs_service_name
+ redis_cluster_id = var.redis_cluster_id
+ }
+}
+
+resource "grafana_dashboard" "at_a_glance" {
+ overwrite = true
+ message = "Updated by Terraform"
+ config_json = data.jsonnet_file.dashboard.rendered
+}
diff --git a/terraform/monitoring/panels/ecs/availability.libsonnet b/terraform/monitoring/panels/ecs/availability.libsonnet
new file mode 100644
index 0000000..e13d670
--- /dev/null
+++ b/terraform/monitoring/panels/ecs/availability.libsonnet
@@ -0,0 +1,55 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+local alert = grafana.alert;
+local alertCondition = grafana.alertCondition;
+
+local error_alert(vars) = alert.new(
+ namespace = vars.namespace,
+ name = "%s - Availability" % vars.environment,
+ message = "%s - Availability" % vars.environment,
+ period = '5m',
+ frequency = '1m',
+ noDataState = 'alerting',
+ notifications = vars.notifications,
+ alertRuleTags = {
+ 'og_priority': 'P3',
+ },
+
+ conditions = [
+ alertCondition.new(
+ evaluatorParams = [ 95 ],
+ evaluatorType = 'lt',
+ operatorType = 'or',
+ queryRefId = 'availability',
+ queryTimeStart = '5m',
+ reducerType = 'avg',
+ ),
+ ]
+);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Availability',
+ datasource = ds.prometheus,
+ )
+ .configure(
+ defaults.configuration.timeseries
+ .withUnit('percent')
+ .withSoftLimit(
+ axisSoftMin = 98,
+ axisSoftMax = 100,
+ )
+ )
+ .setAlert(vars.environment, error_alert(vars))
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ expr = '(1-(sum(rate(http_call_counter_total{code=~"5.+"}[5m])) or vector(0))/(sum(rate(http_call_counter_total{}[5m]))))*100',
+ refId = "availability",
+ exemplar = false,
+ ))
+}
diff --git a/terraform/monitoring/panels/ecs/cpu.libsonnet b/terraform/monitoring/panels/ecs/cpu.libsonnet
new file mode 100644
index 0000000..52ec396
--- /dev/null
+++ b/terraform/monitoring/panels/ecs/cpu.libsonnet
@@ -0,0 +1,73 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: defaults.values.resource.thresholds.warning,
+ critical: defaults.values.resource.thresholds.critical,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit('percent')
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.warn },
+ { value: thresholds.critical, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'CPU_Avg',
+ color = defaults.values.colors.cpu
+ ),
+ grafana.override.newColorOverride(
+ name = 'CPU_Max',
+ color = defaults.values.colors.cpu_alt
+ )
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'CPU Utilization',
+ datasource = ds.cloudwatch,
+ )
+ .configure(_configuration)
+ .setAlert(vars.environment, defaults.alerts.cpu(
+ namespace = vars.namespace,
+ env = vars.environment,
+ title = 'ECS',
+ notifications = vars.notifications,
+ ))
+
+ .addTarget(targets.cloudwatch(
+ alias = 'CPU (Max)',
+ datasource = ds.cloudwatch,
+ namespace = 'AWS/ECS',
+ metricName = 'CPUUtilization',
+ dimensions = {
+ ServiceName: vars.ecs_service_name
+ },
+ statistic = 'Maximum',
+ refId = 'CPU_Max',
+ ))
+ .addTarget(targets.cloudwatch(
+ alias = 'CPU (Avg)',
+ datasource = ds.cloudwatch,
+ namespace = 'AWS/ECS',
+ metricName = 'CPUUtilization',
+ dimensions = {
+ ServiceName: vars.ecs_service_name
+ },
+ statistic = 'Average',
+ refId = 'CPU_Avg',
+ ))
+}
diff --git a/terraform/monitoring/panels/ecs/memory.libsonnet b/terraform/monitoring/panels/ecs/memory.libsonnet
new file mode 100644
index 0000000..82e9963
--- /dev/null
+++ b/terraform/monitoring/panels/ecs/memory.libsonnet
@@ -0,0 +1,73 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: defaults.values.resource.thresholds.warning,
+ critical: defaults.values.resource.thresholds.critical,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit('percent')
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.warn },
+ { value: thresholds.critical, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'Mem_Avg',
+ color = defaults.values.colors.memory
+ ),
+ grafana.override.newColorOverride(
+ name = 'Mem_Max',
+ color = defaults.values.colors.memory_alt
+ )
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Memory Utilization',
+ datasource = ds.cloudwatch,
+ )
+ .configure(_configuration)
+ .setAlert(vars.environment, defaults.alerts.memory(
+ namespace = vars.namespace,
+ env = vars.environment,
+ title = 'ECS',
+ notifications = vars.notifications,
+ ))
+
+ .addTarget(targets.cloudwatch(
+ alias = 'Memory (Max)',
+ datasource = ds.cloudwatch,
+ namespace = 'AWS/ECS',
+ metricName = 'MemoryUtilization',
+ dimensions = {
+ ServiceName: vars.ecs_service_name
+ },
+ statistic = 'Maximum',
+ refId = 'Mem_Max',
+ ))
+ .addTarget(targets.cloudwatch(
+ alias = 'Memory (Avg)',
+ datasource = ds.cloudwatch,
+ namespace = 'AWS/ECS',
+ metricName = 'MemoryUtilization',
+ dimensions = {
+ ServiceName: vars.ecs_service_name
+ },
+ statistic = 'Average',
+ refId = 'Mem_Avg',
+ ))
+}
diff --git a/terraform/monitoring/panels/http/average_latency.libsonnet b/terraform/monitoring/panels/http/average_latency.libsonnet
new file mode 100644
index 0000000..c04a45c
--- /dev/null
+++ b/terraform/monitoring/panels/http/average_latency.libsonnet
@@ -0,0 +1,57 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 100,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit(grafana.fieldConfig.units.Milliseconds)
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'read',
+ color = 'green'
+ ),
+ grafana.override.newColorOverride(
+ name = 'write',
+ color = 'blue'
+ )
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Max avg latency [1m]',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{method}} {{endpoint}}',
+ refId = 'write',
+ expr = 'max(increase(axum_http_requests_duration_seconds_sum{method !~ "GET|HEAD"}[1m]) * 1000 / increase(axum_http_requests_duration_seconds_count{method !~ "GET|HEAD"}[1m])) by (method, endpoint)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{method}} {{endpoint}}',
+ refId = 'read',
+ expr = 'max(increase(axum_http_requests_duration_seconds_sum{method =~ "GET|HEAD"}[1m]) * 1000 / increase(axum_http_requests_duration_seconds_count{method =~ "GET|HEAD"}[1m])) by (status, method, endpoint)',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/http/latency_quantiles.libsonnet b/terraform/monitoring/panels/http/latency_quantiles.libsonnet
new file mode 100644
index 0000000..815521a
--- /dev/null
+++ b/terraform/monitoring/panels/http/latency_quantiles.libsonnet
@@ -0,0 +1,59 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit(grafana.fieldConfig.units.Milliseconds)
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ );
+
+{
+ new(ds, vars)::
+ panels.barGauge(
+ title = 'Latency quantiles',
+ datasource = ds.prometheus,
+ )
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '0.5',
+ refId = 'A',
+ expr = 'histogram_quantile(0.5, sum by (le) (rate(axum_http_requests_duration_seconds_bucket[$__range]))) * 1000',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '0.9',
+ refId = 'B',
+ expr = 'histogram_quantile(0.9, sum by (le) (rate(axum_http_requests_duration_seconds_bucket[$__range]))) * 1000',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '0.99',
+ refId = 'C',
+ expr = 'histogram_quantile(0.99, sum by (le) (rate(axum_http_requests_duration_seconds_bucket[$__range]))) * 1000',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '0.999',
+ refId = 'D',
+ expr = 'histogram_quantile(0.999, sum by (le) (rate(axum_http_requests_duration_seconds_bucket[$__range]))) * 1000',
+ exemplar = true,
+ )) + {
+ fieldConfig+: _configuration.fieldConfig,
+ options+: _configuration.options,
+ }
+}
diff --git a/terraform/monitoring/panels/http/request_response.libsonnet b/terraform/monitoring/panels/http/request_response.libsonnet
new file mode 100644
index 0000000..e480bb1
--- /dev/null
+++ b/terraform/monitoring/panels/http/request_response.libsonnet
@@ -0,0 +1,75 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit(grafana.fieldConfig.units.Milliseconds)
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'status_2xx',
+ color = 'green'
+ ),
+ grafana.override.newColorOverride(
+ name = 'status_4xx',
+ color = 'yellow'
+ ),
+ grafana.override.newColorOverride(
+ name = 'status_5xx',
+ color = 'red'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Request-Response / s',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{status}} {{method}} {{endpoint}}',
+ refId = 'status_2xx',
+ expr = 'sum(rate(axum_http_requests_total{status=~"^2.*$"}[1m])) by (status, method, endpoint)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{status}} {{method}} {{endpoint}}',
+ refId = 'status_4xx',
+ expr = 'sum(rate(axum_http_requests_total{status=~"^4.*"}[1m])) by (status, method, endpoint)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{status}} {{method}} {{endpoint}}',
+ refId = 'status_5xx',
+ expr = 'sum(rate(axum_http_requests_total{status=~"^5.*"}[1m])) by (status, method, endpoint)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = '{{status}} {{method}} {{endpoint}}',
+ refId = 'status_all',
+ expr = 'sum(rate(axum_http_requests_total{status!~"^(2|4|5).*"}[1m])) by (status, method, endpoint)',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/http/response_status.libsonnet b/terraform/monitoring/panels/http/response_status.libsonnet
new file mode 100644
index 0000000..525daf5
--- /dev/null
+++ b/terraform/monitoring/panels/http/response_status.libsonnet
@@ -0,0 +1,65 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local _configuration = defaults.configuration.timeseries
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'status_2XX',
+ color = 'green'
+ ),
+ grafana.override.newColorOverride(
+ name = 'status_4XX',
+ color = 'orange'
+ ),
+ grafana.override.newColorOverride(
+ name = 'status_5XX',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'status_other',
+ color = 'blue'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.pieChart(
+ title = 'Response Status Codes',
+ datasource = ds.prometheus,
+ )
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='2XX',
+ refId = 'status_2XX',
+ expr = 'sum(increase(axum_http_requests_total { status =~ "^2.*"}[$__range]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='4XX',
+ refId = 'status_4XX',
+ expr = 'sum(increase(axum_http_requests_total { status =~ "^4.*"}[$__range]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='5XX',
+ refId = 'status_5XX',
+ expr = 'sum(increase(axum_http_requests_total { status =~ "^5.*"}[$__range]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='Other',
+ refId = 'status_other',
+ expr = 'sum(increase(axum_http_requests_total { status !~ "^(2|4|5).*"}[$__range]))',
+ exemplar = true,
+ )) + {
+ fieldConfig+: _configuration.fieldConfig,
+ options+: _configuration.options,
+ }
+}
diff --git a/terraform/monitoring/panels/panels.libsonnet b/terraform/monitoring/panels/panels.libsonnet
new file mode 100644
index 0000000..66bc7ce
--- /dev/null
+++ b/terraform/monitoring/panels/panels.libsonnet
@@ -0,0 +1,27 @@
+{
+ ecs: {
+ availability: (import 'ecs/availability.libsonnet' ).new,
+ cpu: (import 'ecs/cpu.libsonnet' ).new,
+ memory: (import 'ecs/memory.libsonnet' ).new,
+ },
+
+ http: {
+ response_status: (import 'http/response_status.libsonnet' ).new,
+ request_response: (import 'http/request_response.libsonnet' ).new,
+ latency_quantiles: (import 'http/latency_quantiles.libsonnet' ).new,
+ average_latency: (import 'http/average_latency.libsonnet' ).new,
+ },
+
+ redis: {
+ reads: (import 'redis/reads.libsonnet' ).new,
+ writes: (import 'redis/writes.libsonnet' ).new,
+ cpu: (import 'redis/cpu.libsonnet' ).new,
+ memory: (import 'redis/memory.libsonnet' ).new,
+ },
+
+ registry: {
+ requests: (import 'registry/requests.libsonnet' ).new,
+ cache_read: (import 'registry/cache_read.libsonnet' ).new,
+ cache_write: (import 'registry/cache_write.libsonnet' ).new,
+ }
+}
diff --git a/terraform/monitoring/panels/redis/cpu.libsonnet b/terraform/monitoring/panels/redis/cpu.libsonnet
new file mode 100644
index 0000000..4c826a3
--- /dev/null
+++ b/terraform/monitoring/panels/redis/cpu.libsonnet
@@ -0,0 +1,75 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: defaults.values.resource.thresholds.warning,
+ critical: defaults.values.resource.thresholds.critical,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit('percent')
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.warn },
+ { value: thresholds.critical, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'CPU_Avg',
+ color = defaults.values.colors.cpu
+ ),
+ grafana.override.newColorOverride(
+ name = 'CPU_Max',
+ color = defaults.values.colors.cpu_alt
+ )
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Redis CPU',
+ datasource = ds.cloudwatch,
+ )
+ .configure(_configuration)
+ .setAlert(vars.environment, defaults.alerts.cpu(
+ namespace = vars.namespace,
+ env = vars.environment,
+ title = 'Redis',
+ notifications = vars.notifications,
+ ))
+
+ .addTarget(targets.cloudwatch(
+ alias = 'CPU (Max)',
+ datasource = ds.cloudwatch,
+ dimensions = {
+ CacheClusterId: vars.redis_cluster_id,
+ },
+ matchExact = true,
+ metricName = 'CPUUtilization',
+ namespace = 'AWS/ElastiCache',
+ statistic = 'Maximum',
+ refId = 'CPU_Max',
+ ))
+ .addTarget(targets.cloudwatch(
+ alias = 'CPU (Avg)',
+ datasource = ds.cloudwatch,
+ dimensions = {
+ CacheClusterId: vars.redis_cluster_id,
+ },
+ matchExact = true,
+ metricName = 'CPUUtilization',
+ namespace = 'AWS/ElastiCache',
+ statistic = 'Average',
+ refId = 'CPU_Avg',
+ ))
+}
diff --git a/terraform/monitoring/panels/redis/memory.libsonnet b/terraform/monitoring/panels/redis/memory.libsonnet
new file mode 100644
index 0000000..d1d08cb
--- /dev/null
+++ b/terraform/monitoring/panels/redis/memory.libsonnet
@@ -0,0 +1,75 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: defaults.values.resource.thresholds.warning,
+ critical: defaults.values.resource.thresholds.critical,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withUnit('percent')
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.warn },
+ { value: thresholds.critical, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'Mem_Avg',
+ color = defaults.values.colors.memory
+ ),
+ grafana.override.newColorOverride(
+ name = 'Mem_Max',
+ color = defaults.values.colors.memory_alt
+ )
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Redis Memory',
+ datasource = ds.cloudwatch,
+ )
+ .configure(_configuration)
+ .setAlert(vars.environment, defaults.alerts.memory(
+ namespace = vars.namespace,
+ env = vars.environment,
+ title = 'Redis',
+ notifications = vars.notifications,
+ ))
+
+ .addTarget(targets.cloudwatch(
+ alias = 'Memory (Max)',
+ datasource = ds.cloudwatch,
+ dimensions = {
+ CacheClusterId: vars.redis_cluster_id,
+ },
+ matchExact = true,
+ metricName = 'DatabaseMemoryUsagePercentage',
+ namespace = 'AWS/ElastiCache',
+ statistic = 'Maximum',
+ refId = 'Mem_Max',
+ ))
+ .addTarget(targets.cloudwatch(
+ alias = 'Memory (Avg)',
+ datasource = ds.cloudwatch,
+ dimensions = {
+ CacheClusterId: vars.redis_cluster_id,
+ },
+ matchExact = true,
+ metricName = 'DatabaseMemoryUsagePercentage',
+ namespace = 'AWS/ElastiCache',
+ statistic = 'Average',
+ refId = 'Mem_Avg',
+ ))
+}
diff --git a/terraform/monitoring/panels/redis/reads.libsonnet b/terraform/monitoring/panels/redis/reads.libsonnet
new file mode 100644
index 0000000..8afa656
--- /dev/null
+++ b/terraform/monitoring/panels/redis/reads.libsonnet
@@ -0,0 +1,78 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'total_errors',
+ color = 'dark-red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'total',
+ color = 'dark-green'
+ ),
+ grafana.override.newColorOverride(
+ name = 'errors_per_database',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'per_database',
+ color = 'green'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Redis Reads / sec',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='total errors',
+ refId = 'total_errors',
+ expr = 'sum(rate(redis_read_errors{}[1m])) or vector(0)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='total',
+ refId = 'total',
+ expr = 'sum(rate(redis_reads{}[1m]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='{{db}} errors',
+ refId = 'errors_per_database',
+ expr = 'sum(rate(redis_read_errors{}[1m]) or vector(0)) by (db)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='{{db}}',
+ refId = 'per_database',
+ expr = 'sum(rate(redis_reads{}[1m])) by (db)',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/redis/writes.libsonnet b/terraform/monitoring/panels/redis/writes.libsonnet
new file mode 100644
index 0000000..fc5805c
--- /dev/null
+++ b/terraform/monitoring/panels/redis/writes.libsonnet
@@ -0,0 +1,78 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'total_errors',
+ color = 'dark-red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'total',
+ color = 'dark-green'
+ ),
+ grafana.override.newColorOverride(
+ name = 'errors_per_database',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'per_database',
+ color = 'green'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Redis Writes / sec',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='total errors',
+ refId = 'total_errors',
+ expr = 'sum(rate(redis_write_errors{}[1m])) or vector(0)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='total',
+ refId = 'total',
+ expr = 'sum(rate(redis_writes{}[1m]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='{{db}} errors',
+ refId = 'errors_per_database',
+ expr = 'sum(rate(redis_write_errors{}[1m]) or vector(0)) by (db)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat ='{{db}}',
+ refId = 'per_database',
+ expr = 'sum(rate(redis_writes{}[1m])) by (db)',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/registry/cache_read.libsonnet b/terraform/monitoring/panels/registry/cache_read.libsonnet
new file mode 100644
index 0000000..2be600c
--- /dev/null
+++ b/terraform/monitoring/panels/registry/cache_read.libsonnet
@@ -0,0 +1,63 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'errors',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'hits',
+ color = 'green'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Cache Weads / s',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'errors',
+ refId = 'errors',
+ expr = 'rate(project_registry_cache_errors[1m]) or vector(0)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'misses',
+ refId = 'misses',
+ expr = 'sum(rate(project_registry_cache_misses[1m]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'hits',
+ refId = 'hits',
+ expr = 'sum(rate(project_registry_cache_hits[1m]))',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/registry/cache_write.libsonnet b/terraform/monitoring/panels/registry/cache_write.libsonnet
new file mode 100644
index 0000000..7c1f7cc
--- /dev/null
+++ b/terraform/monitoring/panels/registry/cache_write.libsonnet
@@ -0,0 +1,56 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .withSoftLimit(
+ axisSoftMin = 0,
+ axisSoftMax = thresholds.warning,
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'errors',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'writes',
+ color = 'green'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Cache Writes / s',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'errors',
+ refId = 'errors',
+ expr = 'rate(project_registry_cache_write_errors[1m]) or vector(0)',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'writes',
+ refId = 'writes',
+ expr = 'sum(rate(project_registry_cache_writes[1m]))',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/panels/registry/requests.libsonnet b/terraform/monitoring/panels/registry/requests.libsonnet
new file mode 100644
index 0000000..8b06d89
--- /dev/null
+++ b/terraform/monitoring/panels/registry/requests.libsonnet
@@ -0,0 +1,52 @@
+local grafana = import '../../grafonnet-lib/grafana.libsonnet';
+local defaults = import '../../grafonnet-lib/defaults.libsonnet';
+
+local panels = grafana.panels;
+local targets = grafana.targets;
+
+local thresholds = {
+ warning: 80,
+};
+
+local _configuration = defaults.configuration.timeseries
+ .withThresholdStyle(grafana.fieldConfig.thresholdStyle.Area)
+ .withThresholds(
+ baseColor = defaults.values.colors.ok,
+ steps = [
+ { value: thresholds.warning, color: defaults.values.colors.critical },
+ ]
+ )
+ .addOverrides([
+ grafana.override.newColorOverride(
+ name = 'errors',
+ color = 'red'
+ ),
+ grafana.override.newColorOverride(
+ name = 'fixed',
+ color = 'green'
+ ),
+ ]);
+
+{
+ new(ds, vars)::
+ panels.timeseries(
+ title = 'Requests / s',
+ datasource = ds.prometheus,
+ )
+ .configure(_configuration)
+
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'errors',
+ refId = 'errors',
+ expr = 'sum(rate(project_registry_errors[1m]))',
+ exemplar = true,
+ ))
+ .addTarget(targets.prometheus(
+ datasource = ds.prometheus,
+ legendFormat = 'total',
+ refId = 'total',
+ expr = 'sum(rate(project_registry_requests[1m]))',
+ exemplar = true,
+ ))
+}
diff --git a/terraform/monitoring/terraform.tf b/terraform/monitoring/terraform.tf
new file mode 100644
index 0000000..d733dd6
--- /dev/null
+++ b/terraform/monitoring/terraform.tf
@@ -0,0 +1,14 @@
+terraform {
+ required_version = ">= 1.0"
+
+ required_providers {
+ grafana = {
+ source = "grafana/grafana"
+ version = "~> 2.0"
+ }
+ jsonnet = {
+ source = "alxrem/jsonnet"
+ version = "~> 2.2.0"
+ }
+ }
+}
diff --git a/terraform/monitoring/variables.tf b/terraform/monitoring/variables.tf
new file mode 100644
index 0000000..4cf69c2
--- /dev/null
+++ b/terraform/monitoring/variables.tf
@@ -0,0 +1,24 @@
+variable "monitoring_role_arn" {
+ description = "The ARN of the monitoring role."
+ type = string
+}
+
+variable "notification_channels" {
+ description = "The notification channels to send alerts to"
+ type = list(any)
+}
+
+variable "prometheus_endpoint" {
+ description = "The endpoint for the Prometheus server."
+ type = string
+}
+
+variable "ecs_service_name" {
+ description = "The name of the ECS service."
+ type = string
+}
+
+variable "redis_cluster_id" {
+ description = "The ID of the Redis cluster."
+ type = string
+}
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 0000000..e69de29
diff --git a/terraform/providers.tf b/terraform/providers.tf
new file mode 100644
index 0000000..f7d191e
--- /dev/null
+++ b/terraform/providers.tf
@@ -0,0 +1,12 @@
+provider "aws" {
+ region = var.region
+
+ default_tags {
+ tags = module.this.tags
+ }
+}
+
+provider "grafana" {
+ url = "https://${data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.grafana_endpoint}"
+ auth = var.grafana_auth
+}
diff --git a/terraform/redis/README.md b/terraform/redis/README.md
new file mode 100644
index 0000000..bd3eda3
--- /dev/null
+++ b/terraform/redis/README.md
@@ -0,0 +1,43 @@
+# `redis` module
+
+This module creates a Redis database.
+
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | ~> 1.0 |
+| [aws](#requirement\_aws) | ~> 5.7 |
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | ~> 5.7 |
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [this](#module\_this) | app.terraform.io/wallet-connect/label/null | 0.3.2 |
+
+## Inputs
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [context](#input\_context) | Single object for setting entire context at once.any
| {
"attributes": [],
"delimiter": null,
"id\_length\_limit": null,
"label\_key\_case": null,
"label\_order": [],
"label\_value\_case": null,
"name": null,
"namespace": null,
"regex\_replace\_chars": null,
"region": null,
"stage": null,
"tags": {}
}
| no |
+| [egress\_cidr\_blocks](#input\_egress\_cidr\_blocks) | The CIDR blocks to allow egress to, default to VPC only. | set(string)
| null
| no |
+| [ingress\_cidr\_blocks](#input\_ingress\_cidr\_blocks) | The CIDR blocks to allow ingress from, default to VPC only. | set(string)
| null
| no |
+| [node\_engine\_version](#input\_node\_engine\_version) | The version of Redis to use | string
| "6.x"
| no |
+| [node\_type](#input\_node\_type) | The instance type to use for the database nodes | string
| "cache.t4g.micro"
| no |
+| [num\_cache\_nodes](#input\_num\_cache\_nodes) | The number of nodes to create in the cluster | number
| 1
| no |
+| [subnets\_ids](#input\_subnets\_ids) | The list of subnet IDs to create the cluster in | set(string)
| n/a
| yes |
+| [vpc\_id](#input\_vpc\_id) | The VPC ID to create the security group in | string
| n/a
| yes |
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [cluster\_id](#output\_cluster\_id) | The ID of the cluster |
+| [endpoint](#output\_endpoint) | The endpoint of the Redis cluster |
+
+
+
diff --git a/terraform/redis/context.tf b/terraform/redis/context.tf
new file mode 100644
index 0000000..9871e49
--- /dev/null
+++ b/terraform/redis/context.tf
@@ -0,0 +1,193 @@
+module "this" {
+ source = "app.terraform.io/wallet-connect/label/null"
+ version = "0.3.2"
+
+ namespace = var.namespace
+ region = var.region
+ stage = var.stage
+ name = var.name
+ delimiter = var.delimiter
+ attributes = var.attributes
+ tags = var.tags
+ label_order = var.label_order
+ regex_replace_chars = var.regex_replace_chars
+ id_length_limit = var.id_length_limit
+ label_key_case = var.label_key_case
+ label_value_case = var.label_value_case
+
+ context = var.context
+}
+
+################################################################################
+# Copy contents of label/variables.tf here
+
+#tflint-ignore: terraform_standard_module_structure
+variable "context" {
+ type = any
+ default = {
+ namespace = null
+ region = null
+ stage = null
+ name = null
+ delimiter = null
+ attributes = []
+ tags = {}
+ label_order = []
+ regex_replace_chars = null
+ id_length_limit = null
+ label_key_case = null
+ label_value_case = null
+ }
+ description = <<-EOT
+ Single object for setting entire context at once.
+ See description of individual variables for details.
+ Leave string and numeric variables as `null` to use default value.
+ Individual variable settings (non-null) override settings in context object,
+ except for attributes and tags, which are merged.
+ EOT
+
+ validation {
+ condition = lookup(var.context, "label_key_case", null) == null ? true : contains(["lower", "title", "upper"], var.context["label_key_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+
+ validation {
+ condition = lookup(var.context, "label_value_case", null) == null ? true : contains(["lower", "title", "upper", "none"], var.context["label_value_case"])
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "namespace" {
+ type = string
+ default = null
+ description = "ID element. Usually the organization name, i.e. 'walletconnect' to help ensure generated IDs are globally unique."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "region" {
+ type = string
+ default = null
+ description = "ID element. Usually used for region e.g. 'uw2', 'us-west-2'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "stage" {
+ type = string
+ default = null
+ description = "ID element. Usually used to indicate role, e.g. 'prod', 'staging', 'source', 'build', 'test', 'deploy', 'release'."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "name" {
+ type = string
+ default = null
+ description = <<-EOT
+ ID element. Usually the component name.
+ This is the only ID element not also included as a `tag`.
+ The "name" tag is set to the full `id` string. There is no tag with the value of the `name` input.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "delimiter" {
+ type = string
+ default = null
+ description = <<-EOT
+ Delimiter to be used between ID elements.
+ Defaults to `-` (hyphen). Set to `""` to use no delimiter at all.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "attributes" {
+ type = list(string)
+ default = []
+ description = <<-EOT
+ ID element. Additional attributes (e.g. `workers` or `cluster`) to add to `id`,
+ in the order they appear in the list. New attributes are appended to the
+ end of the list. The elements of the list are joined by the `delimiter`
+ and treated as a single ID element.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "tags" {
+ type = map(string)
+ default = {}
+ description = "Additional tags."
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_order" {
+ type = list(string)
+ default = null
+ description = <<-EOT
+ The order in which the labels (ID elements) appear in the `id`.
+ Defaults to ["namespace", "region", "stage", "name", "attributes"].
+ You can omit any of the 5 labels, but at least one must be present.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "regex_replace_chars" {
+ type = string
+ default = null
+ description = <<-EOT
+ Terraform regular expression (regex) string.
+ Characters matching the regex will be removed from the ID elements.
+ If not set, `"/[^a-zA-Z0-9-]/"` is used to remove all characters other than hyphens, letters and digits.
+ EOT
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "id_length_limit" {
+ type = number
+ default = null
+ description = <<-EOT
+ Limit `id` to this many characters (minimum 6).
+ Set to `0` for unlimited length.
+ Set to `null` for keep the existing setting, which defaults to `0`.
+ Does not affect `id_full`.
+ EOT
+ validation {
+ condition = var.id_length_limit == null ? true : var.id_length_limit >= 6 || var.id_length_limit == 0
+ error_message = "The id_length_limit must be >= 6 if supplied (not null), or 0 for unlimited length."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_key_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of the `tags` keys (label names) for tags generated by this module.
+ Does not affect keys of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper`.
+ Default value: `title`.
+ EOT
+
+ validation {
+ condition = var.label_key_case == null ? true : contains(["lower", "title", "upper"], var.label_key_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`."
+ }
+}
+
+#tflint-ignore: terraform_standard_module_structure
+variable "label_value_case" {
+ type = string
+ default = null
+ description = <<-EOT
+ Controls the letter case of ID elements (labels) as included in `id`,
+ set as tag values, and output by this module individually.
+ Does not affect values of tags passed in via the `tags` input.
+ Possible values: `lower`, `title`, `upper` and `none` (no transformation).
+ Set this to `title` and set `delimiter` to `""` to yield Pascal Case IDs.
+ Default value: `lower`.
+ EOT
+
+ validation {
+ condition = var.label_value_case == null ? true : contains(["lower", "title", "upper", "none"], var.label_value_case)
+ error_message = "Allowed values: `lower`, `title`, `upper`, `none`."
+ }
+}
diff --git a/terraform/redis/main.tf b/terraform/redis/main.tf
new file mode 100644
index 0000000..dcdeaa0
--- /dev/null
+++ b/terraform/redis/main.tf
@@ -0,0 +1,45 @@
+data "aws_vpc" "vpc" {
+ id = var.vpc_id
+}
+
+resource "aws_elasticache_cluster" "cache" {
+ cluster_id = module.this.id
+ engine = "redis"
+ node_type = var.node_type
+ num_cache_nodes = var.num_cache_nodes
+ parameter_group_name = "default.redis6.x"
+ engine_version = var.node_engine_version
+ port = 6379
+ subnet_group_name = aws_elasticache_subnet_group.private_subnets.name
+ security_group_ids = [
+ aws_security_group.service_security_group.id
+ ]
+ snapshot_retention_limit = 2
+}
+
+resource "aws_elasticache_subnet_group" "private_subnets" {
+ name = "${module.this.id}-private-subnet-group"
+ subnet_ids = var.subnets_ids
+}
+
+# Allow only the app to access Redis
+resource "aws_security_group" "service_security_group" {
+ name = "${module.this.id}-redis-service-ingress"
+ description = "Allow ingress from the application"
+ vpc_id = var.vpc_id
+ ingress {
+ description = "${module.this.id} - ingress from application"
+ from_port = 6379
+ to_port = 6379
+ protocol = "TCP"
+ cidr_blocks = var.ingress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.ingress_cidr_blocks
+ }
+
+ egress {
+ description = "${module.this.id} - egress to application"
+ from_port = 0 # Allowing any incoming port
+ to_port = 0 # Allowing any outgoing port
+ protocol = "-1" # Allowing any outgoing protocol
+ cidr_blocks = var.egress_cidr_blocks == null ? [data.aws_vpc.vpc.cidr_block] : var.egress_cidr_blocks
+ }
+}
diff --git a/terraform/redis/outputs.tf b/terraform/redis/outputs.tf
new file mode 100644
index 0000000..88c618c
--- /dev/null
+++ b/terraform/redis/outputs.tf
@@ -0,0 +1,9 @@
+output "cluster_id" {
+ description = "The ID of the cluster"
+ value = aws_elasticache_cluster.cache.id
+}
+
+output "endpoint" {
+ description = "The endpoint of the Redis cluster"
+ value = "${aws_elasticache_cluster.cache.cache_nodes[0].address}:${aws_elasticache_cluster.cache.cache_nodes[0].port}"
+}
diff --git a/terraform/redis/terraform.tf b/terraform/redis/terraform.tf
new file mode 100644
index 0000000..f4c0a25
--- /dev/null
+++ b/terraform/redis/terraform.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = "~> 1.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.7"
+ }
+ }
+}
diff --git a/terraform/redis/variables.tf b/terraform/redis/variables.tf
new file mode 100644
index 0000000..cf2f70c
--- /dev/null
+++ b/terraform/redis/variables.tf
@@ -0,0 +1,45 @@
+#-------------------------------------------------------------------------------
+# Nodes Configuration
+
+variable "node_type" {
+ description = "The instance type to use for the database nodes"
+ type = string
+ default = "cache.t4g.micro" # https://aws.amazon.com/elasticache/pricing/?nc=sn&loc=5#On-demand_nodes
+}
+
+variable "num_cache_nodes" {
+ description = "The number of nodes to create in the cluster"
+ type = number
+ default = 1
+}
+
+variable "node_engine_version" {
+ description = "The version of Redis to use"
+ type = string
+ default = "6.x"
+}
+
+#-------------------------------------------------------------------------------
+# Networking
+
+variable "vpc_id" {
+ description = "The VPC ID to create the security group in"
+ type = string
+}
+
+variable "subnets_ids" {
+ description = "The list of subnet IDs to create the cluster in"
+ type = set(string)
+}
+
+variable "ingress_cidr_blocks" {
+ description = "The CIDR blocks to allow ingress from, default to VPC only."
+ type = set(string)
+ default = null
+}
+
+variable "egress_cidr_blocks" {
+ description = "The CIDR blocks to allow egress to, default to VPC only."
+ type = set(string)
+ default = null
+}
diff --git a/terraform/res_alerting.tf b/terraform/res_alerting.tf
new file mode 100644
index 0000000..82c1087
--- /dev/null
+++ b/terraform/res_alerting.tf
@@ -0,0 +1,12 @@
+module "alerting" {
+ source = "./alerting"
+ context = module.this
+
+ webhook_cloudwatch_p2 = var.webhook_cloudwatch_p2
+ webhook_prometheus_p2 = var.webhook_prometheus_p2
+
+ ecs_cluster_name = module.ecs.ecs_cluster_name
+ ecs_service_name = module.ecs.ecs_service_name
+
+ redis_cluster_id = module.redis.cluster_id
+}
diff --git a/terraform/res_dns.tf b/terraform/res_dns.tf
new file mode 100644
index 0000000..fb4750d
--- /dev/null
+++ b/terraform/res_dns.tf
@@ -0,0 +1,13 @@
+locals {
+ zones = { for k, v in tomap(data.terraform_remote_state.infra_aws.outputs.zones.verify[local.stage]) : v.id => v.name }
+ zones_certificates = { for k, v in module.dns_certificate : v.zone_id => v.certificate_arn }
+}
+
+module "dns_certificate" {
+ for_each = local.zones
+ source = "app.terraform.io/wallet-connect/dns/aws"
+ version = "0.1.3"
+ context = module.this
+ hosted_zone_name = each.value
+ fqdn = each.value
+}
diff --git a/terraform/res_ecs.tf b/terraform/res_ecs.tf
new file mode 100644
index 0000000..d93e4a2
--- /dev/null
+++ b/terraform/res_ecs.tf
@@ -0,0 +1,82 @@
+data "aws_s3_bucket" "geoip" {
+ bucket = data.terraform_remote_state.infra_aws.outputs.geoip_bucked_id
+}
+
+resource "aws_prometheus_workspace" "prometheus" {
+ alias = "prometheus-${module.this.id}"
+}
+
+resource "aws_iam_role" "application_role" {
+ name = "${module.this.id}-ecs-task-execution"
+ assume_role_policy = jsonencode({
+ Version = "2012-10-17",
+ Statement = [
+ {
+ Action = "sts:AssumeRole"
+ Effect = "Allow"
+ Principal = {
+ Service = "ecs-tasks.amazonaws.com"
+ }
+ }
+ ]
+ })
+}
+
+# ECS Cluster, Task, Service, and Load Balancer for our app
+module "ecs" {
+ source = "./ecs"
+ context = module.this
+
+ # Cluster
+ ecr_repository_url = local.ecr_repository_url
+ image_version = var.image_version
+ task_execution_role_name = aws_iam_role.application_role.name
+ task_cpu = var.task_cpu
+ task_memory = var.task_memory
+ autoscaling_desired_count = var.app_autoscaling_desired_count
+ autoscaling_min_capacity = var.app_autoscaling_min_capacity
+ autoscaling_max_capacity = var.app_autoscaling_max_capacity
+ cloudwatch_logs_key_arn = aws_kms_key.cloudwatch_logs.arn
+
+ # DNS
+ route53_zones = local.zones
+ route53_zones_certificates = local.zones_certificates
+
+ # Network
+ vpc_id = module.vpc.vpc_id
+ public_subnets = module.vpc.public_subnets
+ private_subnets = module.vpc.private_subnets
+ allowed_app_ingress_cidr_blocks = module.vpc.vpc_cidr_block
+ allowed_lb_ingress_cidr_blocks = module.vpc.vpc_cidr_block
+
+ # Application
+ app_secret = var.app_secret
+
+ port = 8080
+ log_level = var.log_level
+
+ project_registry_api_url = var.project_registry_api_url
+ project_registry_api_auth_token = var.project_registry_api_auth_token
+
+ data_api_url = var.data_api_url
+ data_api_auth_token = var.data_api_auth_token
+
+ attestation_cache_url = "redis://${module.redis.endpoint}/0"
+ project_registry_cache_url = "redis://${module.redis.endpoint}/1"
+ scam_guard_cache_url = "redis://${module.redis.endpoint}/2"
+
+ ofac_blocked_countries = var.ofac_blocked_countries
+
+ # Analytics
+ analytics_datalake_bucket_name = data.terraform_remote_state.datalake.outputs.datalake_bucket_id
+ analytics_datalake_kms_key_arn = data.terraform_remote_state.datalake.outputs.datalake_kms_key_arn
+
+ # Monitoring
+ prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint
+
+ # GeoIP
+ geoip_db_bucket_name = data.aws_s3_bucket.geoip.id
+ geoip_db_key = var.geoip_db_key
+
+ depends_on = [aws_iam_role.application_role]
+}
diff --git a/terraform/res_monitoring.tf b/terraform/res_monitoring.tf
new file mode 100644
index 0000000..563290d
--- /dev/null
+++ b/terraform/res_monitoring.tf
@@ -0,0 +1,10 @@
+module "monitoring" {
+ source = "./monitoring"
+ context = module.this
+
+ monitoring_role_arn = data.terraform_remote_state.monitoring.outputs.grafana_workspaces.central.iam_role_arn
+ notification_channels = var.notification_channels
+ prometheus_endpoint = aws_prometheus_workspace.prometheus.prometheus_endpoint
+ ecs_service_name = module.ecs.ecs_service_name
+ redis_cluster_id = module.redis.cluster_id
+}
diff --git a/terraform/res_network.tf b/terraform/res_network.tf
new file mode 100644
index 0000000..05c96a7
--- /dev/null
+++ b/terraform/res_network.tf
@@ -0,0 +1,134 @@
+locals {
+ vpc_cidr = "10.0.0.0/16"
+ vpc_azs = slice(data.aws_availability_zones.available.names, 0, 3)
+ vpc_flow_s3_bucket_name = substr("vpc-flow-logs-${module.this.id}-${random_pet.this.id}", 0, 63)
+}
+
+#-------------------------------------------------------------------------------
+# VPC
+
+data "aws_availability_zones" "available" {}
+
+#tfsec:ignore:aws-ec2-no-public-ingress-acl
+#tfsec:ignore:aws-ec2-require-vpc-flow-logs-for-all-vpcs
+#tfsec:ignore:aws-ec2-no-excessive-port-access
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
+
+ name = module.this.id
+ cidr = local.vpc_cidr
+ azs = local.vpc_azs
+
+ database_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k)]
+ intra_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k + 4)]
+ public_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k + 8)]
+ private_subnets = [for k, v in local.vpc_azs : cidrsubnet(local.vpc_cidr, 8, k + 12)]
+
+ enable_dns_support = true
+ enable_dns_hostnames = true
+ enable_nat_gateway = true
+ single_nat_gateway = true
+ one_nat_gateway_per_az = false
+
+ enable_flow_log = true
+ flow_log_file_format = "parquet"
+ flow_log_destination_type = "s3"
+ flow_log_destination_arn = module.vpc_flow_s3_bucket.s3_bucket_arn
+ vpc_flow_log_tags = module.this.tags
+}
+
+module "vpc_endpoints" {
+ source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints"
+ version = "5.1"
+
+ vpc_id = module.vpc.vpc_id
+
+ endpoints = {
+ cloudwatch = {
+ service = "monitoring"
+ },
+ cloudwatch-events = {
+ service = "events"
+ },
+ cloudwatch-logs = {
+ service = "logs"
+ },
+ ecs = {
+ service = "ecs"
+ },
+ ecs-agent = {
+ service = "ecs-agent"
+ },
+ ecs-telemetry = {
+ service = "ecs-telemetry"
+ },
+ elastic-load-balancing = {
+ service = "elasticloadbalancing"
+ },
+ kms = {
+ service = "kms"
+ },
+ s3 = {
+ service = "s3"
+ },
+ }
+}
+
+#-------------------------------------------------------------------------------
+# VPC Flow S3 Bucket
+
+#TODO: Enable bucket logging and send logs to bucket on security account.
+#tfsec:ignore:aws-s3-enable-versioning
+#tfsec:ignore:aws-s3-enable-bucket-logging
+#tfsec:ignore:aws-s3-enable-bucket-encryption
+#tfsec:ignore:aws-s3-encryption-customer-key
+module "vpc_flow_s3_bucket" {
+ source = "terraform-aws-modules/s3-bucket/aws"
+ version = "~> 3.14"
+
+ bucket = local.vpc_flow_s3_bucket_name
+ force_destroy = true
+
+ policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Sid = "AWSLogDeliveryAclCheck"
+ Effect = "Allow"
+ Principal = {
+ Service = "delivery.logs.amazonaws.com"
+ }
+ Action = "s3:GetBucketAcl"
+ Resource = "arn:aws:s3:::${local.vpc_flow_s3_bucket_name}"
+ },
+ {
+ Sid = "AWSLogDeliveryWrite"
+ Effect = "Allow"
+ Principal = {
+ Service = "delivery.logs.amazonaws.com"
+ }
+ Action = "s3:PutObject"
+ Resource = "arn:aws:s3:::${local.vpc_flow_s3_bucket_name}/AWSLogs/*"
+ }
+ ]
+ })
+
+ lifecycle_rule = [
+ {
+ id = "transition-old-logs"
+ enabled = true
+
+ transition = [
+ {
+ days = 30
+ storage_class = "ONEZONE_IA"
+ },
+ {
+ days = 60
+ storage_class = "GLACIER"
+ }
+ ]
+ }
+ ]
+}
diff --git a/terraform/res_redis.tf b/terraform/res_redis.tf
new file mode 100644
index 0000000..e997b2d
--- /dev/null
+++ b/terraform/res_redis.tf
@@ -0,0 +1,7 @@
+module "redis" {
+ source = "./redis"
+ context = module.this
+
+ vpc_id = module.vpc.vpc_id
+ subnets_ids = module.vpc.intra_subnets
+}
diff --git a/terraform/terraform.tf b/terraform/terraform.tf
new file mode 100644
index 0000000..11c6c1f
--- /dev/null
+++ b/terraform/terraform.tf
@@ -0,0 +1,27 @@
+# Terraform Configuration
+terraform {
+ required_version = ">= 1.0"
+
+ backend "remote" {
+ hostname = "app.terraform.io"
+ organization = "wallet-connect"
+ workspaces {
+ prefix = "verify-server-"
+ }
+ }
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7"
+ }
+ grafana = {
+ source = "grafana/grafana"
+ version = ">= 2.1"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "3.5.1"
+ }
+ }
+}
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 0000000..e364e94
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,136 @@
+#-------------------------------------------------------------------------------
+# Configuration
+
+variable "grafana_auth" {
+ description = "The API Token for the Grafana instance"
+ type = string
+ default = ""
+}
+
+
+#-------------------------------------------------------------------------------
+# Service
+
+variable "name" {
+ description = "The name of the application"
+ type = string
+ default = "verify-server"
+}
+
+variable "region" {
+ description = "AWS region to deploy to"
+ type = string
+}
+
+variable "image_version" {
+ description = "The ECS tag of the image to deploy"
+ type = string
+}
+
+variable "task_cpu" {
+ description = "The number of CPU units to allocate to the task"
+ type = number
+}
+
+variable "task_memory" {
+ description = "The amount of memory to allocate to the task"
+ type = number
+}
+
+variable "app_autoscaling_desired_count" {
+ description = "The desired number of tasks to run"
+ type = number
+ default = 1
+}
+
+variable "app_autoscaling_min_capacity" {
+ description = "The minimum number of tasks to run when autoscaling"
+ type = number
+ default = 1
+}
+
+variable "app_autoscaling_max_capacity" {
+ description = "The maximum number of tasks to run when autoscaling"
+ type = number
+ default = 1
+}
+
+#-------------------------------------------------------------------------------
+# Application
+
+variable "app_secret" {
+ description = "The application secret"
+ type = string
+ sensitive = true
+}
+
+variable "log_level" {
+ description = "Defines logging level for the application"
+ type = string
+}
+
+variable "ofac_blocked_countries" {
+ description = "The list of countries to block"
+ type = string
+ default = ""
+}
+
+
+#-------------------------------------------------------------------------------
+# Project Registry
+
+variable "project_registry_api_url" {
+ description = "The url of the project registry API"
+ type = string
+}
+
+variable "project_registry_api_auth_token" {
+ description = "The auth token for the project registry API"
+ type = string
+ sensitive = true
+}
+
+#-------------------------------------------------------------------------------
+# Data API
+
+variable "data_api_url" {
+ description = "The url of the data API"
+ type = string
+}
+
+variable "data_api_auth_token" {
+ description = "The auth token for the data API"
+ type = string
+ sensitive = true
+}
+
+
+#-------------------------------------------------------------------------------
+# Analytics
+
+variable "geoip_db_key" {
+ description = "The name to the GeoIP database"
+ type = string
+}
+
+
+#-------------------------------------------------------------------------------
+# Alerting / Monitoring
+
+variable "notification_channels" {
+ description = "The notification channels to send alerts to"
+ type = list(any)
+ default = []
+}
+
+variable "webhook_cloudwatch_p2" {
+ description = "The webhook to send CloudWatch P2 alerts to"
+ type = string
+ default = ""
+}
+
+variable "webhook_prometheus_p2" {
+ description = "The webhook to send Prometheus P2 alerts to"
+ type = string
+ default = ""
+}