From c67cfdc21cb03a2ed10a0c725ea7ea27b6133d4c Mon Sep 17 00:00:00 2001
From: jonny <65790298+day0hero@users.noreply.github.com>
Date: Wed, 28 Feb 2024 13:56:53 -0600
Subject: [PATCH] Initial commit
---
.ansible-lint | 18 +
.github/dependabot.yml | 9 +
.github/linters/.gitleaks.toml | 8 +
.github/linters/.markdown-lint.yml | 6 +
.github/workflows/ansible-lint.yml | 17 +
.github/workflows/jsonschema.yaml | 72 +
.github/workflows/linter.yml | 65 +
.github/workflows/superlinter.yml | 38 +
.gitignore | 9 +
.gitleaks.toml | 1 +
LICENSE | 202 +
Makefile | 25 +
README.md | 19 +
ansible.cfg | 6 +
ansible/site.yaml | 17 +
charts/all/config-demo/Chart.yaml | 6 +
.../config-demo/templates/config-demo-cm.yaml | 24 +
.../templates/config-demo-deployment.yaml | 65 +
.../config-demo-external-secret.yaml | 18 +
.../config-demo/templates/config-demo-is.yaml | 12 +
.../templates/config-demo-route.yaml | 14 +
.../templates/config-demo-svc.yaml | 17 +
charts/all/config-demo/values.yaml | 14 +
charts/all/hello-world/Chart.yaml | 6 +
.../hello-world/templates/hello-world-cm.yaml | 23 +
.../templates/hello-world-deployment.yaml | 58 +
.../templates/hello-world-route.yaml | 14 +
.../templates/hello-world-svc.yaml | 17 +
charts/all/hello-world/values.yaml | 4 +
charts/region/.keep | 0
common/.ansible-lint | 20 +
common/.github/dependabot.yml | 9 +
common/.github/linters/.gitleaks.toml | 8 +
common/.github/linters/.markdown-lint.yml | 6 +
common/.github/workflows/ansible-lint.yml | 17 +
common/.github/workflows/ansible-unittest.yml | 52 +
common/.github/workflows/chart-branches.yml | 118 +
common/.github/workflows/chart-split.yml | 38 +
common/.github/workflows/jsonschema.yaml | 57 +
common/.github/workflows/linter.yml | 65 +
common/.github/workflows/superlinter.yml | 38 +
common/.gitignore | 13 +
common/.gitleaks.toml | 1 +
common/Changes.md | 145 +
common/LICENSE | 202 +
common/Makefile | 256 +
common/README.md | 22 +
.../.github/workflows/update-helm-repo.yml | 29 +
common/acm/.helmignore | 1 +
common/acm/Chart.yaml | 6 +
common/acm/templates/_helpers.tpl | 13 +
common/acm/templates/multiclusterhub.yaml | 11 +
.../templates/policies/acm-hub-ca-policy.yaml | 71 +
.../policies/application-policies.yaml | 152 +
.../templates/policies/ocp-gitops-policy.yaml | 82 +
.../policies/private-repo-policies.yaml | 161 +
.../templates/provision/_install-config.tpl | 66 +
.../acm/templates/provision/clusterpool.yaml | 95 +
.../acm/templates/provision/secrets-aws.yaml | 84 +
.../templates/provision/secrets-azure.yaml | 84 +
.../templates/provision/secrets-common.yaml | 61 +
common/acm/test.yaml | 35 +
common/acm/values.yaml | 35 +
common/ansible/ansible.cfg | 6 +
.../ansible/playbooks/acm/acmhub-get-ca.yaml | 53 +
.../playbooks/hello-world/hello-world.yaml | 23 +
common/ansible/playbooks/iib-ci/iib-ci.yaml | 8 +
common/ansible/playbooks/iib-ci/lookup.yml | 46 +
.../playbooks/k8s_secrets/k8s_secrets.yml | 9 +
.../process_secrets/display_secrets_info.yml | 29 +
.../process_secrets/process_secrets.yml | 50 +
common/ansible/playbooks/vault/vault.yaml | 9 +
common/ansible/plugins/__init__.py | 0
.../plugins/filter/parse_acm_secrets.py | 83 +
.../module_utils/load_secrets_common.py | 124 +
.../plugins/module_utils/load_secrets_v1.py | 267 +
.../plugins/module_utils/load_secrets_v2.py | 456 +
.../plugins/module_utils/parse_secrets_v2.py | 527 +
.../plugins/modules/parse_secrets_info.py | 149 +
.../modules/vault_load_parsed_secrets.py | 302 +
.../plugins/modules/vault_load_secrets.py | 209 +
.../roles/cluster_pre_check/defaults/main.yml | 3 +
.../roles/cluster_pre_check/tasks/main.yml | 26 +
.../roles/find_vp_secrets/tasks/main.yml | 87 +
common/ansible/roles/iib_ci/README.md | 97 +
common/ansible/roles/iib_ci/defaults/main.yml | 17 +
common/ansible/roles/iib_ci/handlers/main.yml | 2 +
common/ansible/roles/iib_ci/meta/main.yml | 29 +
.../iib_ci/tasks/fetch-operator-images.yml | 98 +
.../iib_ci/tasks/install-iib-in-cluster.yml | 52 +
common/ansible/roles/iib_ci/tasks/main.yml | 43 +
.../iib_ci/tasks/mirror-related-images.yml | 226 +
.../iib_ci/tasks/setup-external-registry.yml | 45 +
.../iib_ci/tasks/setup-internal-registry.yml | 108 +
.../iib_ci/templates/catalogSource.yaml.j2 | 9 +
.../iib_ci/templates/htpasswd-oauth.yaml | 14 +
.../imageContentSourcePolicy.yaml.j2 | 19 +
.../templates/imageDigestMirror.yaml.j2 | 18 +
.../roles/iib_ci/templates/mirror.map.j2 | 3 +
common/ansible/roles/iib_ci/vars/main.yml | 2 +
.../roles/k8s_secret_utils/defaults/main.yml | 2 +
.../tasks/inject_k8s_secret.yml | 15 +
.../tasks/inject_k8s_secrets.yml | 5 +
.../roles/k8s_secret_utils/tasks/main.yml | 6 +
.../k8s_secret_utils/tasks/parse_secrets.yml | 12 +
common/ansible/roles/vault_utils/README.md | 230 +
.../roles/vault_utils/defaults/main.yml | 24 +
.../roles/vault_utils/handlers/main.yml | 2 +
.../ansible/roles/vault_utils/meta/main.yml | 31 +
.../ansible/roles/vault_utils/tasks/main.yml | 20 +
.../tasks/push_parsed_secrets.yaml | 43 +
.../roles/vault_utils/tasks/push_secrets.yaml | 125 +
.../roles/vault_utils/tasks/vault_init.yaml | 47 +
.../vault_utils/tasks/vault_secrets_init.yaml | 96 +
.../vault_utils/tasks/vault_spokes_init.yaml | 190 +
.../roles/vault_utils/tasks/vault_status.yaml | 61 +
.../roles/vault_utils/tasks/vault_unseal.yaml | 88 +
.../ansible/roles/vault_utils/tests/inventory | 2 +
.../ansible/roles/vault_utils/tests/test.yml | 6 +
.../vault_utils/values-secrets.v1.schema.json | 38 +
.../vault_utils/values-secrets.v2.schema.json | 335 +
.../ansible/roles/vault_utils/vars/main.yml | 2 +
common/ansible/tests/unit/test_ini_file.py | 56 +
.../ansible/tests/unit/test_parse_secrets.py | 981 ++
.../tests/unit/test_util_datastructures.py | 205 +
.../unit/test_vault_load_parsed_secrets.py | 320 +
.../tests/unit/test_vault_load_secrets.py | 388 +
.../tests/unit/test_vault_load_secrets_v2.py | 760 +
.../tests/unit/v1/mcg-values-secret.yaml | 27 +
.../tests/unit/v1/template-mcg-missing.yaml | 27 +
.../tests/unit/v1/template-mcg-working.yaml | 26 +
.../tests/unit/v1/values-secret-broken1.yaml | 6 +
.../tests/unit/v1/values-secret-broken2.yaml | 6 +
.../tests/unit/v1/values-secret-broken3.yaml | 9 +
.../unit/v1/values-secret-empty-files.yaml | 15 +
.../unit/v1/values-secret-empty-secrets.yaml | 16 +
.../tests/unit/v1/values-secret-fqdn.yaml | 11 +
.../tests/unit/v1/values-secret-good.yaml | 36 +
common/ansible/tests/unit/v2/aws-example.ini | 4 +
.../ansible/tests/unit/v2/test-file-contents | 1 +
.../tests/unit/v2/test-file-contents.b64 | 1 +
.../v2/values-secret-v2-base-k8s-backend.yaml | 9 +
.../values-secret-v2-base-none-backend.yaml | 11 +
...values-secret-v2-base-unknown-backend.yaml | 9 +
.../tests/unit/v2/values-secret-v2-base.yaml | 38 +
.../v2/values-secret-v2-block-yamlstring.yaml | 16 +
.../values-secret-v2-default-annotations.yaml | 13 +
.../v2/values-secret-v2-default-labels.yaml | 11 +
.../values-secret-v2-default-namespace.yaml | 8 +
.../v2/values-secret-v2-defaultvp-policy.yaml | 25 +
.../v2/values-secret-v2-emptyvaultprefix.yaml | 9 +
.../values-secret-v2-file-contents-b64.yaml | 9 +
...es-secret-v2-file-contents-double-b64.yaml | 9 +
.../v2/values-secret-v2-file-contents.yaml | 8 +
.../v2/values-secret-v2-files-emptypath.yaml | 25 +
...-secret-v2-files-wrong-onmissingvalue.yaml | 26 +
.../v2/values-secret-v2-files-wrongpath.yaml | 26 +
.../v2/values-secret-v2-generate-base64.yaml | 21 +
...values-secret-v2-generic-onlygenerate.yaml | 33 +
.../v2/values-secret-v2-ini-file-b64.yaml | 23 +
.../unit/v2/values-secret-v2-ini-file.yaml | 21 +
.../v2/values-secret-v2-more-namespaces.yaml | 11 +
...values-secret-v2-nondefault-namespace.yaml | 8 +
...es-secret-v2-none-no-targetnamespaces.yaml | 33 +
...es-secret-v2-nonexisting-backingstore.yaml | 23 +
.../unit/v2/values-secret-v2-nopolicies.yaml | 24 +
.../v2/values-secret-v2-novaultprefix.yaml | 8 +
.../v2/values-secret-v2-onlygenerate.yaml | 33 +
.../v2/values-secret-v2-override-labels.yaml | 13 +
.../values-secret-v2-override-namespace.yaml | 10 +
.../values-secret-v2-override-type-none.yaml | 14 +
.../v2/values-secret-v2-override-type.yaml | 12 +
.../v2/values-secret-v2-same-field-names.yaml | 14 +
.../values-secret-v2-same-secret-names.yaml | 20 +
.../v2/values-secret-v2-secret-base64.yaml | 11 +
.../values-secret-v2-secret-binary-b64.yaml | 10 +
.../v2/values-secret-v2-test-override.yaml | 28 +
.../v2/values-secret-v2-wrong-ini-file.yaml | 9 +
...values-secret-v2-wrong-onmissingvalue.yaml | 20 +
.../v2/values-secret-v2-wrong-override.yaml | 11 +
.../values-secret-v2-wrong-vaultpolicy.yaml | 20 +
.../.github/workflows/update-helm-repo.yml | 30 +
common/clustergroup/.helmignore | 1 +
common/clustergroup/Chart.yaml | 6 +
common/clustergroup/templates/_helpers.tpl | 72 +
.../templates/core/catalog-sources.yaml | 14 +
.../templates/core/namespaces.yaml | 32 +
.../templates/core/operatorgroup.yaml | 38 +
.../templates/core/subscriptions.yaml | 73 +
.../templates/imperative/_helpers.tpl | 65 +
.../templates/imperative/clusterrole.yaml | 21 +
.../templates/imperative/configmap.yaml | 12 +
.../templates/imperative/job.yaml | 69 +
.../templates/imperative/namespace.yaml | 10 +
.../templates/imperative/rbac.yaml | 30 +
.../templates/imperative/role.yaml | 20 +
.../templates/imperative/serviceaccount.yaml | 10 +
.../templates/imperative/unsealjob.yaml | 60 +
.../templates/plumbing/applications.yaml | 286 +
.../plumbing/argocd-cmp-plugin-cms.yaml | 12 +
.../templates/plumbing/argocd-super-role.yaml | 43 +
.../templates/plumbing/argocd.yaml | 167 +
.../plumbing/cluster-external-secrets.yaml | 43 +
.../templates/plumbing/gitops-namespace.yaml | 13 +
.../templates/plumbing/hosted-sites.yaml | 172 +
.../templates/plumbing/projects.yaml | 29 +
common/clustergroup/values.schema.json | 908 ++
common/clustergroup/values.yaml | 97 +
common/common | 1 +
common/examples/blank/Chart.yaml | 6 +
common/examples/blank/templates/manifest.yaml | 4 +
common/examples/blank/values.yaml | 2 +
common/examples/industrial-edge-factory.yaml | 112 +
common/examples/industrial-edge-hub.yaml | 241 +
common/examples/kustomize-renderer/Chart.yaml | 6 +
.../kustomize-renderer/environment.yaml | 34 +
.../kustomize-renderer/kustomization.yaml | 5 +
common/examples/kustomize-renderer/kustomize | 14 +
.../templates/environment.yaml | 34 +
.../examples/kustomize-renderer/values.yaml | 12 +
common/examples/medical-diagnosis-hub.yaml | 228 +
common/examples/secrets/values-secret.v1.yaml | 33 +
common/examples/secrets/values-secret.v2.yaml | 114 +
common/examples/values-example.yaml | 159 +
.../.github/workflows/update-helm-repo.yml | 29 +
common/golang-external-secrets/Chart.yaml | 11 +
common/golang-external-secrets/README.md | 14 +
.../charts/external-secrets-0.9.12.tgz | Bin 0 -> 93006 bytes
.../0001-runasuser-comment-out.patch | 48 +
...ternal-secrets-hub-clusterrolebinding.yaml | 23 +
.../golang-external-secrets-hub-role.yaml | 22 +
...lang-external-secrets-hub-secretstore.yaml | 34 +
...lang-external-secrets-hub-secretstore.yaml | 44 +
.../update-helm-dependency.sh | 29 +
common/golang-external-secrets/values.yaml | 46 +
.../.github/workflows/update-helm-repo.yml | 29 +
common/hashicorp-vault/Chart.yaml | 10 +
common/hashicorp-vault/README.md | 25 +
.../hashicorp-vault/charts/vault-0.27.0.tgz | Bin 0 -> 49088 bytes
.../0001-Allow-per-service-annotations.patch | 116 +
.../hashicorp-vault/templates/vault-app.yaml | 12 +
.../hashicorp-vault/update-helm-dependency.sh | 29 +
common/hashicorp-vault/values.yaml | 51 +
.../.github/workflows/update-helm-repo.yml | 29 +
common/letsencrypt/.helmignore | 23 +
common/letsencrypt/Chart.yaml | 16 +
common/letsencrypt/README.md | 68 +
common/letsencrypt/templates/api-cert.yaml | 28 +
.../templates/cert-manager-installation.yaml | 38 +
.../templates/credentials-request.yaml | 24 +
.../letsencrypt/templates/default-routes.yaml | 46 +
common/letsencrypt/templates/issuer.yaml | 25 +
common/letsencrypt/templates/namespaces.yaml | 20 +
.../letsencrypt/templates/wildcard-cert.yaml | 28 +
common/letsencrypt/values.yaml | 60 +
common/operator-install/Chart.yaml | 6 +
common/operator-install/README.md | 4 +
...ops.hybrid-cloud-patterns.io_patterns.yaml | 234 +
.../templates/pattern-operator-configmap.yaml | 13 +
.../operator-install/templates/pattern.yaml | 35 +
.../templates/subscription.yaml | 13 +
common/operator-install/values.yaml | 25 +
common/reference-output.yaml | 119 +
common/scripts/determine-main-clustergroup.sh | 16 +
common/scripts/determine-pattern-name.sh | 15 +
.../scripts/determine-secretstore-backend.sh | 15 +
common/scripts/display-secrets-info.sh | 30 +
common/scripts/lint.sh | 18 +
common/scripts/load-k8s-secrets.sh | 19 +
common/scripts/make_common_subtree.sh | 76 +
common/scripts/manage-secret-app.sh | 49 +
common/scripts/manage-secret-namespace.sh | 28 +
common/scripts/pattern-util.sh | 64 +
common/scripts/preview-all.sh | 15 +
common/scripts/preview.sh | 79 +
common/scripts/process-secrets.sh | 20 +
common/scripts/set-secret-backend.sh | 5 +
common/scripts/test.sh | 129 +
common/scripts/vault-utils.sh | 31 +
.../acm-industrial-edge-factory.expected.yaml | 111 +
.../acm-industrial-edge-hub.expected.yaml | 318 +
.../acm-medical-diagnosis-hub.expected.yaml | 309 +
common/tests/acm-naked.expected.yaml | 112 +
common/tests/acm-normal.expected.yaml | 808 ++
common/tests/acm.expected.diff | 651 +
...roup-industrial-edge-factory.expected.yaml | 781 ++
...tergroup-industrial-edge-hub.expected.yaml | 1603 +++
...rgroup-medical-diagnosis-hub.expected.yaml | 1677 +++
common/tests/clustergroup-naked.expected.yaml | 405 +
.../tests/clustergroup-normal.expected.yaml | 1172 ++
common/tests/clustergroup.expected.diff | 381 +
...rets-industrial-edge-factory.expected.yaml | 11512 ++++++++++++++++
...-secrets-industrial-edge-hub.expected.yaml | 11512 ++++++++++++++++
...ecrets-medical-diagnosis-hub.expected.yaml | 11512 ++++++++++++++++
...olang-external-secrets-naked.expected.yaml | 11512 ++++++++++++++++
...lang-external-secrets-normal.expected.yaml | 11512 ++++++++++++++++
.../golang-external-secrets.expected.diff | 11 +
...ault-industrial-edge-factory.expected.yaml | 408 +
...rp-vault-industrial-edge-hub.expected.yaml | 408 +
...-vault-medical-diagnosis-hub.expected.yaml | 408 +
.../tests/hashicorp-vault-naked.expected.yaml | 408 +
.../hashicorp-vault-normal.expected.yaml | 408 +
common/tests/hashicorp-vault.expected.diff | 11 +
...tall-industrial-edge-factory.expected.yaml | 66 +
.../install-industrial-edge-hub.expected.yaml | 66 +
...nstall-medical-diagnosis-hub.expected.yaml | 66 +
...rypt-industrial-edge-factory.expected.yaml | 202 +
...sencrypt-industrial-edge-hub.expected.yaml | 202 +
...ncrypt-medical-diagnosis-hub.expected.yaml | 202 +
common/tests/letsencrypt-naked.expected.yaml | 202 +
common/tests/letsencrypt-normal.expected.yaml | 202 +
...tall-industrial-edge-factory.expected.yaml | 47 +
...-install-industrial-edge-hub.expected.yaml | 47 +
...nstall-medical-diagnosis-hub.expected.yaml | 47 +
.../operator-install-naked.expected.yaml | 47 +
.../operator-install-normal.expected.yaml | 47 +
common/tests/operator-install.expected.diff | 11 +
common/values-global.yaml | 20 +
overrides/values-AWS.yaml | 26 +
overrides/values-IBMCloud.yaml | 10 +
pattern.sh | 1 +
...demo-industrial-edge-factory.expected.yaml | 161 +
...fig-demo-industrial-edge-hub.expected.yaml | 161 +
...g-demo-medical-diagnosis-hub.expected.yaml | 161 +
tests/all-config-demo-naked.expected.yaml | 161 +
tests/all-config-demo-normal.expected.yaml | 161 +
tests/all-config-demo.expected.diff | 13 +
...orld-industrial-edge-factory.expected.yaml | 120 +
...lo-world-industrial-edge-hub.expected.yaml | 120 +
...-world-medical-diagnosis-hub.expected.yaml | 120 +
tests/all-hello-world-naked.expected.yaml | 120 +
tests/all-hello-world-normal.expected.yaml | 120 +
...-acm-industrial-edge-factory.expected.yaml | 111 +
...mmon-acm-industrial-edge-hub.expected.yaml | 318 +
...on-acm-medical-diagnosis-hub.expected.yaml | 309 +
tests/common-acm-naked.expected.yaml | 112 +
tests/common-acm-normal.expected.yaml | 808 ++
tests/common-acm.expected.diff | 151 +
...roup-industrial-edge-factory.expected.yaml | 773 ++
...tergroup-industrial-edge-hub.expected.yaml | 1590 +++
...rgroup-medical-diagnosis-hub.expected.yaml | 1669 +++
tests/common-clustergroup-naked.expected.yaml | 405 +
.../common-clustergroup-normal.expected.yaml | 1169 ++
tests/common-clustergroup.expected.diff | 764 +
.../common-examples-blank-naked.expected.yaml | 6 +
...common-examples-blank-normal.expected.yaml | 6 +
tests/common-examples-blank.expected.diff | 0
...les-kustomize-renderer-naked.expected.yaml | 36 +
...es-kustomize-renderer-normal.expected.yaml | 36 +
...-examples-kustomize-renderer.expected.diff | 14 +
...rets-industrial-edge-factory.expected.yaml | 11512 ++++++++++++++++
...-secrets-industrial-edge-hub.expected.yaml | 11512 ++++++++++++++++
...ecrets-medical-diagnosis-hub.expected.yaml | 11512 ++++++++++++++++
...olang-external-secrets-naked.expected.yaml | 11512 ++++++++++++++++
...lang-external-secrets-normal.expected.yaml | 11512 ++++++++++++++++
...mmon-golang-external-secrets.expected.diff | 11 +
...ault-industrial-edge-factory.expected.yaml | 408 +
...rp-vault-industrial-edge-hub.expected.yaml | 408 +
...-vault-medical-diagnosis-hub.expected.yaml | 408 +
...common-hashicorp-vault-naked.expected.yaml | 408 +
...ommon-hashicorp-vault-normal.expected.yaml | 408 +
tests/common-hashicorp-vault.expected.diff | 11 +
...tall-industrial-edge-factory.expected.yaml | 66 +
...-install-industrial-edge-hub.expected.yaml | 66 +
...nstall-medical-diagnosis-hub.expected.yaml | 66 +
tests/common-install-naked.expected.yaml | 66 +
tests/common-install-normal.expected.yaml | 66 +
tests/common-install.expected.diff | 43 +
...rypt-industrial-edge-factory.expected.yaml | 202 +
...sencrypt-industrial-edge-hub.expected.yaml | 202 +
...ncrypt-medical-diagnosis-hub.expected.yaml | 202 +
tests/common-letsencrypt-naked.expected.yaml | 202 +
tests/common-letsencrypt-normal.expected.yaml | 202 +
...tall-industrial-edge-factory.expected.yaml | 47 +
...-install-industrial-edge-hub.expected.yaml | 47 +
...nstall-medical-diagnosis-hub.expected.yaml | 47 +
...ommon-operator-install-naked.expected.yaml | 47 +
...mmon-operator-install-normal.expected.yaml | 47 +
tests/common-operator-install.expected.diff | 11 +
tests/interop/__init__.py | 2 +
tests/interop/conftest.py | 51 +
tests/interop/crd.py | 55 +
tests/interop/css_logger.py | 57 +
tests/interop/edge_util.py | 147 +
.../interop/test_subscription_status_edge.py | 94 +
tests/interop/test_subscription_status_hub.py | 165 +
.../test_validate_edge_site_components.py | 240 +
.../test_validate_hub_site_components.py | 322 +
values-global.yaml | 11 +
values-group-one.yaml | 106 +
values-hub.yaml | 168 +
values-secret.yaml.template | 38 +
392 files changed, 158298 insertions(+)
create mode 100644 .ansible-lint
create mode 100644 .github/dependabot.yml
create mode 100644 .github/linters/.gitleaks.toml
create mode 100644 .github/linters/.markdown-lint.yml
create mode 100644 .github/workflows/ansible-lint.yml
create mode 100644 .github/workflows/jsonschema.yaml
create mode 100644 .github/workflows/linter.yml
create mode 100644 .github/workflows/superlinter.yml
create mode 100644 .gitignore
create mode 120000 .gitleaks.toml
create mode 100644 LICENSE
create mode 100644 Makefile
create mode 100644 README.md
create mode 100644 ansible.cfg
create mode 100644 ansible/site.yaml
create mode 100644 charts/all/config-demo/Chart.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-cm.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-deployment.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-external-secret.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-is.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-route.yaml
create mode 100644 charts/all/config-demo/templates/config-demo-svc.yaml
create mode 100644 charts/all/config-demo/values.yaml
create mode 100644 charts/all/hello-world/Chart.yaml
create mode 100644 charts/all/hello-world/templates/hello-world-cm.yaml
create mode 100644 charts/all/hello-world/templates/hello-world-deployment.yaml
create mode 100644 charts/all/hello-world/templates/hello-world-route.yaml
create mode 100644 charts/all/hello-world/templates/hello-world-svc.yaml
create mode 100644 charts/all/hello-world/values.yaml
create mode 100644 charts/region/.keep
create mode 100644 common/.ansible-lint
create mode 100644 common/.github/dependabot.yml
create mode 100644 common/.github/linters/.gitleaks.toml
create mode 100644 common/.github/linters/.markdown-lint.yml
create mode 100644 common/.github/workflows/ansible-lint.yml
create mode 100644 common/.github/workflows/ansible-unittest.yml
create mode 100644 common/.github/workflows/chart-branches.yml
create mode 100644 common/.github/workflows/chart-split.yml
create mode 100644 common/.github/workflows/jsonschema.yaml
create mode 100644 common/.github/workflows/linter.yml
create mode 100644 common/.github/workflows/superlinter.yml
create mode 100644 common/.gitignore
create mode 120000 common/.gitleaks.toml
create mode 100644 common/Changes.md
create mode 100644 common/LICENSE
create mode 100644 common/Makefile
create mode 100644 common/README.md
create mode 100644 common/acm/.github/workflows/update-helm-repo.yml
create mode 100644 common/acm/.helmignore
create mode 100644 common/acm/Chart.yaml
create mode 100644 common/acm/templates/_helpers.tpl
create mode 100644 common/acm/templates/multiclusterhub.yaml
create mode 100644 common/acm/templates/policies/acm-hub-ca-policy.yaml
create mode 100644 common/acm/templates/policies/application-policies.yaml
create mode 100644 common/acm/templates/policies/ocp-gitops-policy.yaml
create mode 100644 common/acm/templates/policies/private-repo-policies.yaml
create mode 100644 common/acm/templates/provision/_install-config.tpl
create mode 100644 common/acm/templates/provision/clusterpool.yaml
create mode 100644 common/acm/templates/provision/secrets-aws.yaml
create mode 100644 common/acm/templates/provision/secrets-azure.yaml
create mode 100644 common/acm/templates/provision/secrets-common.yaml
create mode 100644 common/acm/test.yaml
create mode 100644 common/acm/values.yaml
create mode 100644 common/ansible/ansible.cfg
create mode 100644 common/ansible/playbooks/acm/acmhub-get-ca.yaml
create mode 100644 common/ansible/playbooks/hello-world/hello-world.yaml
create mode 100644 common/ansible/playbooks/iib-ci/iib-ci.yaml
create mode 100644 common/ansible/playbooks/iib-ci/lookup.yml
create mode 100644 common/ansible/playbooks/k8s_secrets/k8s_secrets.yml
create mode 100644 common/ansible/playbooks/process_secrets/display_secrets_info.yml
create mode 100644 common/ansible/playbooks/process_secrets/process_secrets.yml
create mode 100644 common/ansible/playbooks/vault/vault.yaml
create mode 100644 common/ansible/plugins/__init__.py
create mode 100644 common/ansible/plugins/filter/parse_acm_secrets.py
create mode 100644 common/ansible/plugins/module_utils/load_secrets_common.py
create mode 100644 common/ansible/plugins/module_utils/load_secrets_v1.py
create mode 100644 common/ansible/plugins/module_utils/load_secrets_v2.py
create mode 100644 common/ansible/plugins/module_utils/parse_secrets_v2.py
create mode 100644 common/ansible/plugins/modules/parse_secrets_info.py
create mode 100644 common/ansible/plugins/modules/vault_load_parsed_secrets.py
create mode 100644 common/ansible/plugins/modules/vault_load_secrets.py
create mode 100644 common/ansible/roles/cluster_pre_check/defaults/main.yml
create mode 100644 common/ansible/roles/cluster_pre_check/tasks/main.yml
create mode 100644 common/ansible/roles/find_vp_secrets/tasks/main.yml
create mode 100644 common/ansible/roles/iib_ci/README.md
create mode 100644 common/ansible/roles/iib_ci/defaults/main.yml
create mode 100644 common/ansible/roles/iib_ci/handlers/main.yml
create mode 100644 common/ansible/roles/iib_ci/meta/main.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/fetch-operator-images.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/install-iib-in-cluster.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/main.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/mirror-related-images.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/setup-external-registry.yml
create mode 100644 common/ansible/roles/iib_ci/tasks/setup-internal-registry.yml
create mode 100644 common/ansible/roles/iib_ci/templates/catalogSource.yaml.j2
create mode 100644 common/ansible/roles/iib_ci/templates/htpasswd-oauth.yaml
create mode 100644 common/ansible/roles/iib_ci/templates/imageContentSourcePolicy.yaml.j2
create mode 100644 common/ansible/roles/iib_ci/templates/imageDigestMirror.yaml.j2
create mode 100644 common/ansible/roles/iib_ci/templates/mirror.map.j2
create mode 100644 common/ansible/roles/iib_ci/vars/main.yml
create mode 100644 common/ansible/roles/k8s_secret_utils/defaults/main.yml
create mode 100644 common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secret.yml
create mode 100644 common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secrets.yml
create mode 100644 common/ansible/roles/k8s_secret_utils/tasks/main.yml
create mode 100644 common/ansible/roles/k8s_secret_utils/tasks/parse_secrets.yml
create mode 100644 common/ansible/roles/vault_utils/README.md
create mode 100644 common/ansible/roles/vault_utils/defaults/main.yml
create mode 100644 common/ansible/roles/vault_utils/handlers/main.yml
create mode 100644 common/ansible/roles/vault_utils/meta/main.yml
create mode 100644 common/ansible/roles/vault_utils/tasks/main.yml
create mode 100644 common/ansible/roles/vault_utils/tasks/push_parsed_secrets.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/push_secrets.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/vault_init.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/vault_secrets_init.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/vault_spokes_init.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/vault_status.yaml
create mode 100644 common/ansible/roles/vault_utils/tasks/vault_unseal.yaml
create mode 100644 common/ansible/roles/vault_utils/tests/inventory
create mode 100644 common/ansible/roles/vault_utils/tests/test.yml
create mode 100644 common/ansible/roles/vault_utils/values-secrets.v1.schema.json
create mode 100644 common/ansible/roles/vault_utils/values-secrets.v2.schema.json
create mode 100644 common/ansible/roles/vault_utils/vars/main.yml
create mode 100644 common/ansible/tests/unit/test_ini_file.py
create mode 100644 common/ansible/tests/unit/test_parse_secrets.py
create mode 100644 common/ansible/tests/unit/test_util_datastructures.py
create mode 100644 common/ansible/tests/unit/test_vault_load_parsed_secrets.py
create mode 100644 common/ansible/tests/unit/test_vault_load_secrets.py
create mode 100644 common/ansible/tests/unit/test_vault_load_secrets_v2.py
create mode 100644 common/ansible/tests/unit/v1/mcg-values-secret.yaml
create mode 100644 common/ansible/tests/unit/v1/template-mcg-missing.yaml
create mode 100644 common/ansible/tests/unit/v1/template-mcg-working.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-broken1.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-broken2.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-broken3.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-empty-files.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-empty-secrets.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-fqdn.yaml
create mode 100644 common/ansible/tests/unit/v1/values-secret-good.yaml
create mode 100644 common/ansible/tests/unit/v2/aws-example.ini
create mode 100644 common/ansible/tests/unit/v2/test-file-contents
create mode 100644 common/ansible/tests/unit/v2/test-file-contents.b64
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-base-k8s-backend.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-base-none-backend.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-base-unknown-backend.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-base.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-block-yamlstring.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-default-annotations.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-default-labels.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-default-namespace.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-defaultvp-policy.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-emptyvaultprefix.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-file-contents-b64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-file-contents-double-b64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-file-contents.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-files-emptypath.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-files-wrong-onmissingvalue.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-files-wrongpath.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-generate-base64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-generic-onlygenerate.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-ini-file-b64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-ini-file.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-more-namespaces.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-nondefault-namespace.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-none-no-targetnamespaces.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-nonexisting-backingstore.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-nopolicies.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-novaultprefix.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-onlygenerate.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-override-labels.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-override-namespace.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-override-type-none.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-override-type.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-same-field-names.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-same-secret-names.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-secret-base64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-secret-binary-b64.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-test-override.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-wrong-ini-file.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-wrong-onmissingvalue.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-wrong-override.yaml
create mode 100644 common/ansible/tests/unit/v2/values-secret-v2-wrong-vaultpolicy.yaml
create mode 100644 common/clustergroup/.github/workflows/update-helm-repo.yml
create mode 100644 common/clustergroup/.helmignore
create mode 100644 common/clustergroup/Chart.yaml
create mode 100644 common/clustergroup/templates/_helpers.tpl
create mode 100644 common/clustergroup/templates/core/catalog-sources.yaml
create mode 100644 common/clustergroup/templates/core/namespaces.yaml
create mode 100644 common/clustergroup/templates/core/operatorgroup.yaml
create mode 100644 common/clustergroup/templates/core/subscriptions.yaml
create mode 100644 common/clustergroup/templates/imperative/_helpers.tpl
create mode 100644 common/clustergroup/templates/imperative/clusterrole.yaml
create mode 100644 common/clustergroup/templates/imperative/configmap.yaml
create mode 100644 common/clustergroup/templates/imperative/job.yaml
create mode 100644 common/clustergroup/templates/imperative/namespace.yaml
create mode 100644 common/clustergroup/templates/imperative/rbac.yaml
create mode 100644 common/clustergroup/templates/imperative/role.yaml
create mode 100644 common/clustergroup/templates/imperative/serviceaccount.yaml
create mode 100644 common/clustergroup/templates/imperative/unsealjob.yaml
create mode 100644 common/clustergroup/templates/plumbing/applications.yaml
create mode 100644 common/clustergroup/templates/plumbing/argocd-cmp-plugin-cms.yaml
create mode 100644 common/clustergroup/templates/plumbing/argocd-super-role.yaml
create mode 100644 common/clustergroup/templates/plumbing/argocd.yaml
create mode 100644 common/clustergroup/templates/plumbing/cluster-external-secrets.yaml
create mode 100644 common/clustergroup/templates/plumbing/gitops-namespace.yaml
create mode 100644 common/clustergroup/templates/plumbing/hosted-sites.yaml
create mode 100644 common/clustergroup/templates/plumbing/projects.yaml
create mode 100644 common/clustergroup/values.schema.json
create mode 100644 common/clustergroup/values.yaml
create mode 120000 common/common
create mode 100644 common/examples/blank/Chart.yaml
create mode 100644 common/examples/blank/templates/manifest.yaml
create mode 100644 common/examples/blank/values.yaml
create mode 100644 common/examples/industrial-edge-factory.yaml
create mode 100644 common/examples/industrial-edge-hub.yaml
create mode 100644 common/examples/kustomize-renderer/Chart.yaml
create mode 100644 common/examples/kustomize-renderer/environment.yaml
create mode 100644 common/examples/kustomize-renderer/kustomization.yaml
create mode 100755 common/examples/kustomize-renderer/kustomize
create mode 100644 common/examples/kustomize-renderer/templates/environment.yaml
create mode 100644 common/examples/kustomize-renderer/values.yaml
create mode 100644 common/examples/medical-diagnosis-hub.yaml
create mode 100644 common/examples/secrets/values-secret.v1.yaml
create mode 100644 common/examples/secrets/values-secret.v2.yaml
create mode 100644 common/examples/values-example.yaml
create mode 100644 common/golang-external-secrets/.github/workflows/update-helm-repo.yml
create mode 100644 common/golang-external-secrets/Chart.yaml
create mode 100644 common/golang-external-secrets/README.md
create mode 100644 common/golang-external-secrets/charts/external-secrets-0.9.12.tgz
create mode 100644 common/golang-external-secrets/local-patches/0001-runasuser-comment-out.patch
create mode 100644 common/golang-external-secrets/templates/golang-external-secrets-hub-clusterrolebinding.yaml
create mode 100644 common/golang-external-secrets/templates/kubernetes/golang-external-secrets-hub-role.yaml
create mode 100644 common/golang-external-secrets/templates/kubernetes/golang-external-secrets-hub-secretstore.yaml
create mode 100644 common/golang-external-secrets/templates/vault/golang-external-secrets-hub-secretstore.yaml
create mode 100755 common/golang-external-secrets/update-helm-dependency.sh
create mode 100644 common/golang-external-secrets/values.yaml
create mode 100644 common/hashicorp-vault/.github/workflows/update-helm-repo.yml
create mode 100644 common/hashicorp-vault/Chart.yaml
create mode 100644 common/hashicorp-vault/README.md
create mode 100644 common/hashicorp-vault/charts/vault-0.27.0.tgz
create mode 100644 common/hashicorp-vault/local-patches/0001-Allow-per-service-annotations.patch
create mode 100644 common/hashicorp-vault/templates/vault-app.yaml
create mode 100755 common/hashicorp-vault/update-helm-dependency.sh
create mode 100644 common/hashicorp-vault/values.yaml
create mode 100644 common/letsencrypt/.github/workflows/update-helm-repo.yml
create mode 100644 common/letsencrypt/.helmignore
create mode 100644 common/letsencrypt/Chart.yaml
create mode 100644 common/letsencrypt/README.md
create mode 100644 common/letsencrypt/templates/api-cert.yaml
create mode 100644 common/letsencrypt/templates/cert-manager-installation.yaml
create mode 100644 common/letsencrypt/templates/credentials-request.yaml
create mode 100644 common/letsencrypt/templates/default-routes.yaml
create mode 100644 common/letsencrypt/templates/issuer.yaml
create mode 100644 common/letsencrypt/templates/namespaces.yaml
create mode 100644 common/letsencrypt/templates/wildcard-cert.yaml
create mode 100644 common/letsencrypt/values.yaml
create mode 100644 common/operator-install/Chart.yaml
create mode 100644 common/operator-install/README.md
create mode 100644 common/operator-install/crds/gitops.hybrid-cloud-patterns.io_patterns.yaml
create mode 100644 common/operator-install/templates/pattern-operator-configmap.yaml
create mode 100644 common/operator-install/templates/pattern.yaml
create mode 100644 common/operator-install/templates/subscription.yaml
create mode 100644 common/operator-install/values.yaml
create mode 100644 common/reference-output.yaml
create mode 100755 common/scripts/determine-main-clustergroup.sh
create mode 100755 common/scripts/determine-pattern-name.sh
create mode 100755 common/scripts/determine-secretstore-backend.sh
create mode 100755 common/scripts/display-secrets-info.sh
create mode 100755 common/scripts/lint.sh
create mode 100755 common/scripts/load-k8s-secrets.sh
create mode 100755 common/scripts/make_common_subtree.sh
create mode 100755 common/scripts/manage-secret-app.sh
create mode 100755 common/scripts/manage-secret-namespace.sh
create mode 100755 common/scripts/pattern-util.sh
create mode 100755 common/scripts/preview-all.sh
create mode 100755 common/scripts/preview.sh
create mode 100755 common/scripts/process-secrets.sh
create mode 100755 common/scripts/set-secret-backend.sh
create mode 100755 common/scripts/test.sh
create mode 100755 common/scripts/vault-utils.sh
create mode 100644 common/tests/acm-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/acm-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/acm-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/acm-naked.expected.yaml
create mode 100644 common/tests/acm-normal.expected.yaml
create mode 100644 common/tests/acm.expected.diff
create mode 100644 common/tests/clustergroup-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/clustergroup-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/clustergroup-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/clustergroup-naked.expected.yaml
create mode 100644 common/tests/clustergroup-normal.expected.yaml
create mode 100644 common/tests/clustergroup.expected.diff
create mode 100644 common/tests/golang-external-secrets-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/golang-external-secrets-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/golang-external-secrets-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/golang-external-secrets-naked.expected.yaml
create mode 100644 common/tests/golang-external-secrets-normal.expected.yaml
create mode 100644 common/tests/golang-external-secrets.expected.diff
create mode 100644 common/tests/hashicorp-vault-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/hashicorp-vault-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/hashicorp-vault-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/hashicorp-vault-naked.expected.yaml
create mode 100644 common/tests/hashicorp-vault-normal.expected.yaml
create mode 100644 common/tests/hashicorp-vault.expected.diff
create mode 100644 common/tests/install-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/install-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/install-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/letsencrypt-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/letsencrypt-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/letsencrypt-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/letsencrypt-naked.expected.yaml
create mode 100644 common/tests/letsencrypt-normal.expected.yaml
create mode 100644 common/tests/operator-install-industrial-edge-factory.expected.yaml
create mode 100644 common/tests/operator-install-industrial-edge-hub.expected.yaml
create mode 100644 common/tests/operator-install-medical-diagnosis-hub.expected.yaml
create mode 100644 common/tests/operator-install-naked.expected.yaml
create mode 100644 common/tests/operator-install-normal.expected.yaml
create mode 100644 common/tests/operator-install.expected.diff
create mode 100644 common/values-global.yaml
create mode 100644 overrides/values-AWS.yaml
create mode 100644 overrides/values-IBMCloud.yaml
create mode 120000 pattern.sh
create mode 100644 tests/all-config-demo-industrial-edge-factory.expected.yaml
create mode 100644 tests/all-config-demo-industrial-edge-hub.expected.yaml
create mode 100644 tests/all-config-demo-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/all-config-demo-naked.expected.yaml
create mode 100644 tests/all-config-demo-normal.expected.yaml
create mode 100644 tests/all-config-demo.expected.diff
create mode 100644 tests/all-hello-world-industrial-edge-factory.expected.yaml
create mode 100644 tests/all-hello-world-industrial-edge-hub.expected.yaml
create mode 100644 tests/all-hello-world-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/all-hello-world-naked.expected.yaml
create mode 100644 tests/all-hello-world-normal.expected.yaml
create mode 100644 tests/common-acm-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-acm-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-acm-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-acm-naked.expected.yaml
create mode 100644 tests/common-acm-normal.expected.yaml
create mode 100644 tests/common-acm.expected.diff
create mode 100644 tests/common-clustergroup-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-clustergroup-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-clustergroup-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-clustergroup-naked.expected.yaml
create mode 100644 tests/common-clustergroup-normal.expected.yaml
create mode 100644 tests/common-clustergroup.expected.diff
create mode 100644 tests/common-examples-blank-naked.expected.yaml
create mode 100644 tests/common-examples-blank-normal.expected.yaml
create mode 100644 tests/common-examples-blank.expected.diff
create mode 100644 tests/common-examples-kustomize-renderer-naked.expected.yaml
create mode 100644 tests/common-examples-kustomize-renderer-normal.expected.yaml
create mode 100644 tests/common-examples-kustomize-renderer.expected.diff
create mode 100644 tests/common-golang-external-secrets-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-golang-external-secrets-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-golang-external-secrets-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-golang-external-secrets-naked.expected.yaml
create mode 100644 tests/common-golang-external-secrets-normal.expected.yaml
create mode 100644 tests/common-golang-external-secrets.expected.diff
create mode 100644 tests/common-hashicorp-vault-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-hashicorp-vault-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-hashicorp-vault-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-hashicorp-vault-naked.expected.yaml
create mode 100644 tests/common-hashicorp-vault-normal.expected.yaml
create mode 100644 tests/common-hashicorp-vault.expected.diff
create mode 100644 tests/common-install-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-install-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-install-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-install-naked.expected.yaml
create mode 100644 tests/common-install-normal.expected.yaml
create mode 100644 tests/common-install.expected.diff
create mode 100644 tests/common-letsencrypt-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-letsencrypt-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-letsencrypt-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-letsencrypt-naked.expected.yaml
create mode 100644 tests/common-letsencrypt-normal.expected.yaml
create mode 100644 tests/common-operator-install-industrial-edge-factory.expected.yaml
create mode 100644 tests/common-operator-install-industrial-edge-hub.expected.yaml
create mode 100644 tests/common-operator-install-medical-diagnosis-hub.expected.yaml
create mode 100644 tests/common-operator-install-naked.expected.yaml
create mode 100644 tests/common-operator-install-normal.expected.yaml
create mode 100644 tests/common-operator-install.expected.diff
create mode 100644 tests/interop/__init__.py
create mode 100644 tests/interop/conftest.py
create mode 100644 tests/interop/crd.py
create mode 100644 tests/interop/css_logger.py
create mode 100644 tests/interop/edge_util.py
create mode 100644 tests/interop/test_subscription_status_edge.py
create mode 100644 tests/interop/test_subscription_status_hub.py
create mode 100644 tests/interop/test_validate_edge_site_components.py
create mode 100644 tests/interop/test_validate_hub_site_components.py
create mode 100644 values-global.yaml
create mode 100644 values-group-one.yaml
create mode 100644 values-hub.yaml
create mode 100644 values-secret.yaml.template
diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 00000000..040b3902
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,18 @@
+# Vim filetype=yaml
+---
+offline: false
+#requirements: ansible/execution_environment/requirements.yml
+
+exclude_paths:
+ - .cache/
+ - .github/
+ - charts/
+ - common/
+ - tests/
+
+# warn_list:
+# - yaml
+# - schema
+# - experimental
+# - risky-file-permissions
+# - var-spacing
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..a175e666
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,9 @@
+---
+version: 2
+updates:
+ # Check for updates to GitHub Actions every week
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
diff --git a/.github/linters/.gitleaks.toml b/.github/linters/.gitleaks.toml
new file mode 100644
index 00000000..7f59b235
--- /dev/null
+++ b/.github/linters/.gitleaks.toml
@@ -0,0 +1,8 @@
+[whitelist]
+# As of v4, gitleaks only matches against filename, not path in the
+# files directive. Leaving content for backwards compatibility.
+files = [
+ "ansible/plugins/modules/*.py",
+ "ansible/tests/unit/test_*.py",
+ "ansible/tests/unit/*.yaml",
+]
diff --git a/.github/linters/.markdown-lint.yml b/.github/linters/.markdown-lint.yml
new file mode 100644
index 00000000..a0bc47d1
--- /dev/null
+++ b/.github/linters/.markdown-lint.yml
@@ -0,0 +1,6 @@
+{
+ "default": true,
+ "MD003": false,
+ "MD013": false,
+ "MD033": false
+}
\ No newline at end of file
diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml
new file mode 100644
index 00000000..c2b2981b
--- /dev/null
+++ b/.github/workflows/ansible-lint.yml
@@ -0,0 +1,17 @@
+name: Ansible Lint # feel free to pick your own name
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ # Important: This sets up your GITHUB_WORKSPACE environment variable
+ - uses: actions/checkout@v4
+
+ - name: Lint Ansible Playbook
+ uses: ansible/ansible-lint-action@v6
+ # Let's point it to the path
+ with:
+ path: "ansible/"
diff --git a/.github/workflows/jsonschema.yaml b/.github/workflows/jsonschema.yaml
new file mode 100644
index 00000000..75a161ae
--- /dev/null
+++ b/.github/workflows/jsonschema.yaml
@@ -0,0 +1,72 @@
+---
+name: Verify json schema
+
+#
+# Documentation:
+# https://help.github.com/en/articles/workflow-syntax-for-github-actions
+#
+
+#############################
+# Start the job on all push #
+#############################
+on: [push, pull_request]
+
+###############
+# Set the Job #
+###############
+jobs:
+ jsonschema_tests:
+ # Name the Job
+ name: Json Schema tests
+ strategy:
+ matrix:
+ python-version: [3.11]
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ ##################
+ # Load all steps #
+ ##################
+ steps:
+ ##########################
+ # Checkout the code base #
+ ##########################
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install check-jsonschema
+
+ - name: Install yq
+ uses: chrisdickinson/setup-yq@latest
+ with:
+ yq-version: v4.30.7
+
+ - name: Verify secrets json schema against templates
+ run: |
+ # check-jsonschema needs .yaml as an extension
+ cp ./values-secret.yaml.template ./values-secret.yaml
+ check-jsonschema --schemafile ./common/ansible/roles/vault_utils/values-secrets.v2.schema.json values-secret.yaml
+ rm -f ./values-secret.yaml
+
+ - name: Verify ClusterGroup values.schema.json against values-*yaml files
+ run: |
+ set -e; for i in values-hub.yaml values-group-one.yaml; do
+ echo "$i"
+ # disable shellcheck of single quotes in yq
+ # shellcheck disable=2016
+ yq eval-all '. as $item ireduce ({}; . * $item )' values-global.yaml "$i" > tmp.yaml
+ check-jsonschema --schemafile ./common/clustergroup/values.schema.json tmp.yaml
+ rm -f tmp.yaml
+ done
+
diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml
new file mode 100644
index 00000000..0440184d
--- /dev/null
+++ b/.github/workflows/linter.yml
@@ -0,0 +1,65 @@
+---
+name: Unit tests
+
+#
+# Documentation:
+# https://help.github.com/en/articles/workflow-syntax-for-github-actions
+#
+
+#############################
+# Start the job on all push #
+#############################
+on: [push, pull_request]
+
+###############
+# Set the Job #
+###############
+jobs:
+ build:
+ # Name the Job
+ name: Unit Test Code Base
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ ##################
+ # Load all steps #
+ ##################
+ steps:
+ ##########################
+ # Checkout the code base #
+ ##########################
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+ - name: Setup helm
+ uses: azure/setup-helm@v3
+ with:
+ version: 'v3.13.2'
+ id: install
+
+ ################################
+ # Run Linter against code base #
+ ################################
+ # - name: Lint Code Base
+ # uses: github/super-linter@v4
+ # env:
+ # VALIDATE_ALL_CODEBASE: false
+ # DEFAULT_BRANCH: main
+ # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Run make test
+ run: |
+ make test
+
+ - name: Run make helmlint
+ run: |
+ make helmlint
+
+ # Disable kubeconform for the time being
+ # - name: Run make helm kubeconform
+ # run: |
+ # curl -L -O https://github.com/yannh/kubeconform/releases/download/v0.4.13/kubeconform-linux-amd64.tar.gz
+ # tar xf kubeconform-linux-amd64.tar.gz
+ # sudo mv -v kubeconform /usr/local/bin
+ # make kubeconform
diff --git a/.github/workflows/superlinter.yml b/.github/workflows/superlinter.yml
new file mode 100644
index 00000000..2b35e91e
--- /dev/null
+++ b/.github/workflows/superlinter.yml
@@ -0,0 +1,38 @@
+---
+name: Super linter
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ # Name the Job
+ name: Super linter
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+
+ ################################
+ # Run Linter against code base #
+ ################################
+ - name: Lint Code Base
+ uses: github/super-linter/slim@v5
+ env:
+ VALIDATE_ALL_CODEBASE: true
+ DEFAULT_BRANCH: main
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # These are the validation we disable atm
+ VALIDATE_BASH: false
+ VALIDATE_JSCPD: false
+ VALIDATE_KUBERNETES_KUBECONFORM: false
+ VALIDATE_YAML: false
+ VALIDATE_ANSIBLE: false
+ # VALIDATE_DOCKERFILE_HADOLINT: false
+ # VALIDATE_MARKDOWN: false
+ # VALIDATE_NATURAL_LANGUAGE: false
+ # VALIDATE_TEKTON: false
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..3f3db957
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+*~
+*.swp
+*.swo
+values-secret*
+.*.expected.yaml
+pattern-vault.init
+vault.init
+super-linter.log
+common/pattern-vault.init
diff --git a/.gitleaks.toml b/.gitleaks.toml
new file mode 120000
index 00000000..c05303b9
--- /dev/null
+++ b/.gitleaks.toml
@@ -0,0 +1 @@
+.github/linters/.gitleaks.toml
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..39d07b75
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,25 @@
+.PHONY: default
+default: help
+
+.PHONY: help
+##@ Pattern tasks
+
+# No need to add a comment here as help is described in common/
+help:
+ @make -f common/Makefile MAKEFILE_LIST="Makefile common/Makefile" help
+
+%:
+ make -f common/Makefile $*
+
+.PHONY: install
+install: operator-deploy post-install ## installs the pattern and loads the secrets
+ @echo "Installed"
+
+.PHONY: post-install
+post-install: ## Post-install tasks
+ make load-secrets
+ @echo "Done"
+
+.PHONY: test
+test:
+ @make -f common/Makefile PATTERN_OPTS="-f values-global.yaml -f values-hub.yaml" test
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..e09fd810
--- /dev/null
+++ b/README.md
@@ -0,0 +1,19 @@
+# Multicloud Gitops
+
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
+
+[Live build status](https://validatedpatterns.io/ci/?pattern=mcgitops)
+
+## Start Here
+
+If you've followed a link to this repository, but are not really sure what it contains
+or how to use it, head over to [Multicloud GitOps](https://validatedpatterns.io/patterns/multicloud-gitops/)
+for additional context and installation instructions
+
+## Rationale
+
+The goal for this pattern is to:
+
+* Use a GitOps approach to manage hybrid and multi-cloud deployments across both public and private clouds.
+* Enable cross-cluster governance and application lifecycle management.
+* Securely manage secrets across the deployment.
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 00000000..516f8b84
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,6 @@
+[defaults]
+localhost_warning=False
+retry_files_enabled=False
+library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules
+roles_path=~/.ansible/roles:./ansible/roles:./common/ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
+filter_plugins=~/.ansible/plugins/filter:./ansible/plugins/filter:./common/ansible/plugins/filter:/usr/share/ansible/plugins/filter
diff --git a/ansible/site.yaml b/ansible/site.yaml
new file mode 100644
index 00000000..f0b7c28d
--- /dev/null
+++ b/ansible/site.yaml
@@ -0,0 +1,17 @@
+# This is only needed for RHPDS
+- name: MultiCloud-GitOps RHPDS bootstrap
+ hosts: localhost
+ connection: local
+ tasks:
+ # We cannot use .package or .dnf modules because python3 that is used comes
+ # from a virtualenv
+ - name: Launch the installation
+ ansible.builtin.command: ./pattern.sh make install
+ args:
+ chdir: "{{ lookup('env', 'PWD') }}"
+ register: output
+ changed_when: false
+
+ - name: Print output of installation
+ ansible.builtin.debug:
+ msg: "{{ output }}"
diff --git a/charts/all/config-demo/Chart.yaml b/charts/all/config-demo/Chart.yaml
new file mode 100644
index 00000000..abe0f0bd
--- /dev/null
+++ b/charts/all/config-demo/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart to build and deploy a use of remote configuration enabled by ACM and Vault
+keywords:
+- pattern
+name: config-demo
+version: 0.0.1
diff --git a/charts/all/config-demo/templates/config-demo-cm.yaml b/charts/all/config-demo/templates/config-demo-cm.yaml
new file mode 100644
index 00000000..ac7fe991
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-cm.yaml
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: config-demo-configmap
+ labels:
+ app.kubernetes.io/instance: config-demo
+data:
+ "index.html": |-
+
+
+
+
+ Config Demo
+
+
+
+ Hub Cluster domain is '{{ .Values.global.hubClusterDomain }}'
+ Pod is running on Local Cluster Domain '{{ .Values.global.localClusterDomain }}'
+
+
+ The secret is secret
+
+
+
diff --git a/charts/all/config-demo/templates/config-demo-deployment.yaml b/charts/all/config-demo/templates/config-demo-deployment.yaml
new file mode 100644
index 00000000..9deee702
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-deployment.yaml
@@ -0,0 +1,65 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ application: config-demo
+ name: config-demo
+spec:
+ replicas: 2
+ revisionHistoryLimit: 3
+ selector:
+ matchLabels:
+ deploymentconfig: config-demo
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ app: config-demo
+ deploymentconfig: config-demo
+ name: config-demo
+ spec:
+ containers:
+ - name: apache
+ image: registry.access.redhat.com/ubi8/httpd-24:1-226
+ #imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /var/www/html
+ name: config-demo-configmap
+ - mountPath: /var/www/html/secret
+ readOnly: true
+ name: config-demo-secret
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ livenessProbe:
+ httpGet:
+ path: /index.html
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /index.html
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ volumes:
+ - name: config-demo-configmap
+ configMap:
+ defaultMode: 438
+ name: config-demo-configmap
+ - name: config-demo-secret
+ secret:
+ secretName: config-demo-secret
diff --git a/charts/all/config-demo/templates/config-demo-external-secret.yaml b/charts/all/config-demo/templates/config-demo-external-secret.yaml
new file mode 100644
index 00000000..0081dd87
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-external-secret.yaml
@@ -0,0 +1,18 @@
+---
+apiVersion: "external-secrets.io/v1beta1"
+kind: ExternalSecret
+metadata:
+ name: config-demo-secret
+ namespace: config-demo
+spec:
+ refreshInterval: 15s
+ secretStoreRef:
+ name: {{ .Values.secretStore.name }}
+ kind: {{ .Values.secretStore.kind }}
+ target:
+ name: config-demo-secret
+ template:
+ type: Opaque
+ dataFrom:
+ - extract:
+ key: {{ .Values.configdemosecret.key }}
diff --git a/charts/all/config-demo/templates/config-demo-is.yaml b/charts/all/config-demo/templates/config-demo-is.yaml
new file mode 100644
index 00000000..6a1aea4e
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-is.yaml
@@ -0,0 +1,12 @@
+apiVersion: image.openshift.io/v1
+kind: ImageStream
+metadata:
+ name: config-demo
+spec:
+ lookupPolicy:
+ local: true
+ tags:
+ - name: registry.access.redhat.com/ubi8/httpd-24
+ importPolicy: {}
+ referencePolicy:
+ type: Local
diff --git a/charts/all/config-demo/templates/config-demo-route.yaml b/charts/all/config-demo/templates/config-demo-route.yaml
new file mode 100644
index 00000000..85d2e38e
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-route.yaml
@@ -0,0 +1,14 @@
+apiVersion: route.openshift.io/v1
+kind: Route
+metadata:
+ labels:
+ app: config-demo
+ name: config-demo
+spec:
+ port:
+ targetPort: 8080-tcp
+ to:
+ kind: Service
+ name: config-demo
+ weight: 100
+ wildcardPolicy: None
diff --git a/charts/all/config-demo/templates/config-demo-svc.yaml b/charts/all/config-demo/templates/config-demo-svc.yaml
new file mode 100644
index 00000000..517c1aad
--- /dev/null
+++ b/charts/all/config-demo/templates/config-demo-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: config-demo
+ name: config-demo
+spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ app: config-demo
+ deploymentconfig: config-demo
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/charts/all/config-demo/values.yaml b/charts/all/config-demo/values.yaml
new file mode 100644
index 00000000..2dda4522
--- /dev/null
+++ b/charts/all/config-demo/values.yaml
@@ -0,0 +1,14 @@
+---
+secretStore:
+ name: vault-backend
+ kind: ClusterSecretStore
+
+configdemosecret:
+ key: secret/data/global/config-demo
+
+global:
+ hubClusterDomain: hub.example.com
+ localClusterDomain: region-one.example.com
+
+clusterGroup:
+ isHubCluster: true
diff --git a/charts/all/hello-world/Chart.yaml b/charts/all/hello-world/Chart.yaml
new file mode 100644
index 00000000..6c9611e3
--- /dev/null
+++ b/charts/all/hello-world/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart to show a webserver and with no other dependencies
+keywords:
+- pattern
+name: hello-world
+version: 0.0.1
diff --git a/charts/all/hello-world/templates/hello-world-cm.yaml b/charts/all/hello-world/templates/hello-world-cm.yaml
new file mode 100644
index 00000000..e59561ca
--- /dev/null
+++ b/charts/all/hello-world/templates/hello-world-cm.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: hello-world-configmap
+ labels:
+ app.kubernetes.io/instance: hello-world
+data:
+ "index.html": |-
+
+
+
+
+ Hello World
+
+
+ Hello World!
+
+
+ Hub Cluster domain is '{{ .Values.global.hubClusterDomain }}'
+ Pod is running on Local Cluster Domain '{{ .Values.global.localClusterDomain }}'
+
+
+
diff --git a/charts/all/hello-world/templates/hello-world-deployment.yaml b/charts/all/hello-world/templates/hello-world-deployment.yaml
new file mode 100644
index 00000000..878ebf5f
--- /dev/null
+++ b/charts/all/hello-world/templates/hello-world-deployment.yaml
@@ -0,0 +1,58 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ application: hello-world
+ name: hello-world
+spec:
+ replicas: 1
+ revisionHistoryLimit: 3
+ selector:
+ matchLabels:
+ deploymentconfig: hello-world
+ template:
+ metadata:
+ labels:
+ app: hello-world
+ deploymentconfig: hello-world
+ name: hello-world
+ spec:
+ containers:
+ - name: apache
+ image: registry.access.redhat.com/ubi8/httpd-24:1-226
+ #imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /var/www/html
+ name: hello-world-configmap
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ livenessProbe:
+ httpGet:
+ path: /index.html
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /index.html
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ volumes:
+ - name: hello-world-configmap
+ configMap:
+ defaultMode: 438
+ name: hello-world-configmap
diff --git a/charts/all/hello-world/templates/hello-world-route.yaml b/charts/all/hello-world/templates/hello-world-route.yaml
new file mode 100644
index 00000000..e321f9e4
--- /dev/null
+++ b/charts/all/hello-world/templates/hello-world-route.yaml
@@ -0,0 +1,14 @@
+apiVersion: route.openshift.io/v1
+kind: Route
+metadata:
+ labels:
+ app: hello-world
+ name: hello-world
+spec:
+ port:
+ targetPort: 8080-tcp
+ to:
+ kind: Service
+ name: hello-world
+ weight: 100
+ wildcardPolicy: None
diff --git a/charts/all/hello-world/templates/hello-world-svc.yaml b/charts/all/hello-world/templates/hello-world-svc.yaml
new file mode 100644
index 00000000..597f6d54
--- /dev/null
+++ b/charts/all/hello-world/templates/hello-world-svc.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: hello-world
+ name: hello-world
+spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ app: hello-world
+ deploymentconfig: hello-world
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/charts/all/hello-world/values.yaml b/charts/all/hello-world/values.yaml
new file mode 100644
index 00000000..55083f74
--- /dev/null
+++ b/charts/all/hello-world/values.yaml
@@ -0,0 +1,4 @@
+---
+global:
+ hubClusterDomain: hub.example.com
+ localCluster: local.example.com
diff --git a/charts/region/.keep b/charts/region/.keep
new file mode 100644
index 00000000..e69de29b
diff --git a/common/.ansible-lint b/common/.ansible-lint
new file mode 100644
index 00000000..aaffc6b5
--- /dev/null
+++ b/common/.ansible-lint
@@ -0,0 +1,20 @@
+# Vim filetype=yaml
+---
+offline: false
+skip_list:
+ - name[template] # Allow Jinja templating inside task and play names
+ - template-instead-of-copy # Templated files should use template instead of copy
+ - yaml[line-length] # too long lines
+ - yaml[indentation] # Forcing lists to be always indented by 2 chars is silly IMO
+ - var-naming[no-role-prefix] # This would be too much churn for very little gain
+ - no-changed-when
+ - var-naming[no-role-prefix] # There are too many changes now and it would be too risky
+
+# ansible-lint gh workflow cannot find ansible.cfg hence fails to import vault_utils role
+exclude_paths:
+ - ./ansible/playbooks/vault/vault.yaml
+ - ./ansible/playbooks/iib-ci/iib-ci.yaml
+ - ./ansible/playbooks/k8s_secrets/k8s_secrets.yml
+ - ./ansible/playbooks/process_secrets/process_secrets.yml
+ - ./ansible/playbooks/process_secrets/display_secrets_info.yml
+ - ./ansible/roles/vault_utils/tests/test.yml
diff --git a/common/.github/dependabot.yml b/common/.github/dependabot.yml
new file mode 100644
index 00000000..a175e666
--- /dev/null
+++ b/common/.github/dependabot.yml
@@ -0,0 +1,9 @@
+---
+version: 2
+updates:
+ # Check for updates to GitHub Actions every week
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+
diff --git a/common/.github/linters/.gitleaks.toml b/common/.github/linters/.gitleaks.toml
new file mode 100644
index 00000000..b80cdc04
--- /dev/null
+++ b/common/.github/linters/.gitleaks.toml
@@ -0,0 +1,8 @@
+[whitelist]
+# As of v4, gitleaks only matches against filename, not path in the
+# files directive. Leaving content for backwards compatibility.
+files = [
+ "ansible/plugins/modules/*.py",
+ "ansible/tests/unit/test_*.py",
+ "ansible/tests/unit/v1/*.yaml",
+]
diff --git a/common/.github/linters/.markdown-lint.yml b/common/.github/linters/.markdown-lint.yml
new file mode 100644
index 00000000..a0bc47d1
--- /dev/null
+++ b/common/.github/linters/.markdown-lint.yml
@@ -0,0 +1,6 @@
+{
+ "default": true,
+ "MD003": false,
+ "MD013": false,
+ "MD033": false
+}
\ No newline at end of file
diff --git a/common/.github/workflows/ansible-lint.yml b/common/.github/workflows/ansible-lint.yml
new file mode 100644
index 00000000..c2b2981b
--- /dev/null
+++ b/common/.github/workflows/ansible-lint.yml
@@ -0,0 +1,17 @@
+name: Ansible Lint # feel free to pick your own name
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ # Important: This sets up your GITHUB_WORKSPACE environment variable
+ - uses: actions/checkout@v4
+
+ - name: Lint Ansible Playbook
+ uses: ansible/ansible-lint-action@v6
+ # Let's point it to the path
+ with:
+ path: "ansible/"
diff --git a/common/.github/workflows/ansible-unittest.yml b/common/.github/workflows/ansible-unittest.yml
new file mode 100644
index 00000000..c9f7485a
--- /dev/null
+++ b/common/.github/workflows/ansible-unittest.yml
@@ -0,0 +1,52 @@
+---
+name: Ansible unit tests
+
+#
+# Documentation:
+# https://help.github.com/en/articles/workflow-syntax-for-github-actions
+#
+
+#############################
+# Start the job on all push #
+#############################
+on: [push, pull_request]
+
+###############
+# Set the Job #
+###############
+jobs:
+ ansible_unittests:
+ # Name the Job
+ name: Ansible unit tests
+ strategy:
+ matrix:
+ python-version: [3.11.3]
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ ##################
+ # Load all steps #
+ ##################
+ steps:
+ ##########################
+ # Checkout the code base #
+ ##########################
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install pytest ansible
+
+ - name: Run make ansible-unittest
+ run: |
+ make ansible-unittest
diff --git a/common/.github/workflows/chart-branches.yml b/common/.github/workflows/chart-branches.yml
new file mode 100644
index 00000000..1a4fb455
--- /dev/null
+++ b/common/.github/workflows/chart-branches.yml
@@ -0,0 +1,118 @@
+---
+name: Create per-chart branches
+
+# We only run this job on the charts that will be later moved to full blown charts
+# We also want to run the subtree comand only for the charts that have been actually changed
+# because git subtree split is a bit of an expensive operation
+# github actions do not support yaml anchors so there is more duplication than usual
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'acm/**'
+ - 'golang-external-secrets/**'
+ - 'hashicorp-vault/**'
+ - 'letsencrypt/**'
+ - 'clustergroup/**'
+
+jobs:
+ changes:
+ name: Figure out per-chart changes
+ if: github.repository == 'validatedpatterns/common'
+ runs-on: ubuntu-latest
+ permissions: read-all
+ outputs:
+ acm: ${{ steps.filter.outputs.acm }}
+ golang-external-secrets: ${{ steps.filter.outputs.golang-external-secrets }}
+ hashicorp-vault: ${{ steps.filter.outputs.hashicorp-vault }}
+ letsencrypt: ${{ steps.filter.outputs.letsencrypt }}
+ clustergroup: ${{ steps.filter.outputs.clustergroup }}
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+
+ - uses: dorny/paths-filter@v3
+ id: filter
+ with:
+ filters: |
+ acm:
+ - 'acm/**'
+ golang-external-secrets:
+ - 'golang-external-secrets/**'
+ hashicorp-vault:
+ - 'hashicorp-vault/**'
+ letsencrypt:
+ - 'letsencrypt/**'
+ clustergroup:
+ - 'clustergroup/**'
+
+ acm:
+ needs: changes
+ if: |
+ ${{ needs.changes.outputs.acm == 'true' }} &&
+ github.repository == 'validatedpatterns/common'
+ uses: validatedpatterns/common/.github/workflows/chart-split.yml@main
+ permissions:
+ actions: write
+ contents: write
+ with:
+ chart_name: acm
+ target_repository: validatedpatterns/acm-chart
+ secrets: inherit
+
+ golang-external-secrets:
+ needs: changes
+ if: |
+ ${{ needs.changes.outputs.golang-external-secrets == 'true' }} &&
+ github.repository == 'validatedpatterns/common'
+ uses: validatedpatterns/common/.github/workflows/chart-split.yml@main
+ permissions:
+ actions: write
+ contents: write
+ with:
+ chart_name: golang-external-secrets
+ target_repository: validatedpatterns/golang-external-secrets-chart
+ secrets: inherit
+
+ hashicorp-vault:
+ needs: changes
+ if: |
+ ${{ needs.changes.outputs.hashicorp-vault == 'true' }} &&
+ github.repository == 'validatedpatterns/common'
+ uses: validatedpatterns/common/.github/workflows/chart-split.yml@main
+ permissions:
+ actions: write
+ contents: write
+ with:
+ chart_name: hashicorp-vault
+ target_repository: validatedpatterns/hashicorp-vault-chart
+ secrets: inherit
+
+ letsencrypt:
+ needs: changes
+ if: |
+ ${{ needs.changes.outputs.letsencrypt == 'true' }} &&
+ github.repository == 'validatedpatterns/common'
+ uses: validatedpatterns/common/.github/workflows/chart-split.yml@main
+ permissions:
+ actions: write
+ contents: write
+ with:
+ chart_name: letsencrypt
+ target_repository: validatedpatterns/letsencrypt-chart
+ secrets: inherit
+
+ clustergroup:
+ needs: changes
+ if: |
+ ${{ needs.changes.outputs.clustergroup == 'true' }} &&
+ github.repository == 'validatedpatterns/common'
+ uses: validatedpatterns/common/.github/workflows/chart-split.yml@main
+ permissions:
+ actions: write
+ contents: write
+ with:
+ chart_name: clustergroup
+ target_repository: validatedpatterns/clustergroup-chart
+ secrets: inherit
diff --git a/common/.github/workflows/chart-split.yml b/common/.github/workflows/chart-split.yml
new file mode 100644
index 00000000..2792d6ad
--- /dev/null
+++ b/common/.github/workflows/chart-split.yml
@@ -0,0 +1,38 @@
+---
+name: Split into chart repo branches
+
+on:
+ workflow_call:
+ inputs:
+ chart_name:
+ required: true
+ type: string
+ target_repository:
+ required: true
+ type: string
+
+jobs:
+ split_chart:
+ runs-on: ubuntu-latest
+ permissions:
+ actions: write
+ contents: write
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ token: ${{ secrets.CHARTS_REPOS_TOKEN }}
+
+ - name: Run git subtree split and push
+ env:
+ GITHUB_TOKEN: ${{ secrets.CHARTS_REPOS_TOKEN }}
+ run: |
+ set -e
+ N="${{ inputs.chart_name }}"
+ B="${N}-main-single-chart"
+ git push origin -d "${B}" || /bin/true
+ git subtree split -P "${N}" -b "${B}"
+ git push -f -u origin "${B}"
+ #git clone https://validatedpatterns:${GITHUB_TOKEN}@github.com/validatedpatterns/common.git -b "acm-main-single-chart" --single-branch
+ git push --force https://validatedpatterns:"${GITHUB_TOKEN}"@github.com/${{ inputs.target_repository }}.git "${B}:main"
diff --git a/common/.github/workflows/jsonschema.yaml b/common/.github/workflows/jsonschema.yaml
new file mode 100644
index 00000000..e47de928
--- /dev/null
+++ b/common/.github/workflows/jsonschema.yaml
@@ -0,0 +1,57 @@
+---
+name: Verify json schema
+
+#
+# Documentation:
+# https://help.github.com/en/articles/workflow-syntax-for-github-actions
+#
+
+#############################
+# Start the job on all push #
+#############################
+on: [push, pull_request]
+
+###############
+# Set the Job #
+###############
+jobs:
+ jsonschema_tests:
+ # Name the Job
+ name: Json Schema tests
+ strategy:
+ matrix:
+ python-version: [3.11.3]
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ ##################
+ # Load all steps #
+ ##################
+ steps:
+ ##########################
+ # Checkout the code base #
+ ##########################
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install check-jsonschema
+
+ - name: Verify secrets json schema
+ run: |
+ check-jsonschema --schemafile ./ansible/roles/vault_utils/values-secrets.v1.schema.json examples/secrets/values-secret.v1.yaml
+ check-jsonschema --schemafile ./ansible/roles/vault_utils/values-secrets.v2.schema.json examples/secrets/values-secret.v2.yaml
+
+ - name: Verify ClusterGroup values.schema.json
+ run: |
+ set -e; for i in examples/*yaml; do echo "$i"; check-jsonschema --schemafile ./clustergroup/values.schema.json "$i"; done
diff --git a/common/.github/workflows/linter.yml b/common/.github/workflows/linter.yml
new file mode 100644
index 00000000..39aa63cb
--- /dev/null
+++ b/common/.github/workflows/linter.yml
@@ -0,0 +1,65 @@
+---
+name: Unit test common
+
+#
+# Documentation:
+# https://help.github.com/en/articles/workflow-syntax-for-github-actions
+#
+
+#############################
+# Start the job on all push #
+#############################
+on: [push, pull_request]
+
+###############
+# Set the Job #
+###############
+jobs:
+ build:
+ # Name the Job
+ name: Unit common/ Code Base
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ ##################
+ # Load all steps #
+ ##################
+ steps:
+ ##########################
+ # Checkout the code base #
+ ##########################
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+ - name: Setup helm
+ uses: azure/setup-helm@v3
+ with:
+ version: 'v3.13.2'
+
+
+ ################################
+ # Run Linter against code base #
+ ################################
+ # - name: Lint Code Base
+ # uses: github/super-linter@v4
+ # env:
+ # VALIDATE_ALL_CODEBASE: false
+ # DEFAULT_BRANCH: main
+ # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Run make test
+ run: |
+ make test
+
+ - name: Run make helmlint
+ run: |
+ make helmlint
+
+ # For now disable this until we have a nice and simple process to update the schemas in our repo
+ # - name: Run make helm kubeconform
+ # run: |
+ # curl -L -O https://github.com/yannh/kubeconform/releases/download/v0.4.13/kubeconform-linux-amd64.tar.gz
+ # tar xf kubeconform-linux-amd64.tar.gz
+ # sudo mv -v kubeconform /usr/local/bin
+ # make kubeconform
diff --git a/common/.github/workflows/superlinter.yml b/common/.github/workflows/superlinter.yml
new file mode 100644
index 00000000..7430db09
--- /dev/null
+++ b/common/.github/workflows/superlinter.yml
@@ -0,0 +1,38 @@
+---
+name: Super linter
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ # Name the Job
+ name: Super linter
+ # Set the agent to run on
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v4
+ with:
+ # Full git history is needed to get a proper list of changed files within `super-linter`
+ fetch-depth: 0
+
+ ################################
+ # Run Linter against code base #
+ ################################
+ - name: Lint Code Base
+ uses: github/super-linter/slim@v5
+ env:
+ VALIDATE_ALL_CODEBASE: true
+ DEFAULT_BRANCH: main
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # These are the validation we disable atm
+ VALIDATE_ANSIBLE: false
+ VALIDATE_BASH: false
+ VALIDATE_JSCPD: false
+ VALIDATE_KUBERNETES_KUBECONFORM: false
+ VALIDATE_YAML: false
+ # VALIDATE_DOCKERFILE_HADOLINT: false
+ # VALIDATE_MARKDOWN: false
+ # VALIDATE_NATURAL_LANGUAGE: false
+ # VALIDATE_TEKTON: false
diff --git a/common/.gitignore b/common/.gitignore
new file mode 100644
index 00000000..454efc9e
--- /dev/null
+++ b/common/.gitignore
@@ -0,0 +1,13 @@
+__pycache__/
+*.py[cod]
+*~
+*.swp
+*.swo
+values-secret.yaml
+.*.expected.yaml
+.vscode
+pattern-vault.init
+pattern-vault.init.bak
+super-linter.log
+golang-external-secrets/Chart.lock
+hashicorp-vault/Chart.lock
diff --git a/common/.gitleaks.toml b/common/.gitleaks.toml
new file mode 120000
index 00000000..c05303b9
--- /dev/null
+++ b/common/.gitleaks.toml
@@ -0,0 +1 @@
+.github/linters/.gitleaks.toml
\ No newline at end of file
diff --git a/common/Changes.md b/common/Changes.md
new file mode 100644
index 00000000..ed7d4bf6
--- /dev/null
+++ b/common/Changes.md
@@ -0,0 +1,145 @@
+# Changes
+
+## Sep 25, 2023
+
+* Upgraded ESO to v0.9.5
+
+## Aug 17, 2023
+
+* Introduced support for multisource applications via .chart + .chartVersion
+
+## Jul 8, 2023
+
+* Introduced a default of 20 for sync failures retries in argo applications (global override via global.options.applicationRetryLimit
+ and per-app override via .syncPolicy)
+
+## May 22, 2023
+
+* Upgraded ESO to 0.8.2
+* *Important* we now use the newly blessed sso config for argo. This means that gitops < 1.8 are *unsupported*
+
+## May 18, 2023
+
+* Introduce a EXTRA_HELM_OPTS env variable that will be passed to the helm invocations
+
+## April 21, 2023
+
+* Added labels and annotation support to namespaces.yaml template
+
+## Apr 11, 2023
+
+* Apply the ACM ocp-gitops-policy everywhere but the hub
+
+## Apr 7, 2023
+
+* Moved to gitops-1.8 channel by default (stable is unmaintained and will be dropped starting with ocp-4.13)
+
+## March 20, 2023
+
+* Upgraded ESO to 0.8.1
+
+## February 9, 2023
+
+* Add support for /values-.yaml and for /values--.yaml
+
+## January 29, 2023
+
+* Stop extracting the HUB's CA via an imperative job running on the imported cluster.
+ Just use ACM to push the HUB's CA out to the managed clusters.
+
+## January 23, 2023
+
+* Add initial support for running ESO on ACM-imported clusters
+
+## January 18, 2023
+
+* Add validate-schema target
+
+## January 13, 2023
+
+* Simplify the secrets paths when using argo hosted sites
+
+## January 10, 2023
+
+* vaultPrefixes is now optional in the v2 secret spec and defaults to ["hub"]
+
+## December 9, 2022
+
+* Dropped insecureUnsealVaultInsideCluster (and file_unseal) entirely. Now
+ vault is always unsealed via a cronjob in the cluster. It is recommended to
+ store the imperative/vaultkeys secret offline securely and then delete it.
+
+## December 8, 2022
+
+* Removed the legacy installation targets:
+ `deploy upgrade legacy-deploy legacy-upgrade`
+ Patterns must now use the operator-based installation
+
+## November 29, 2022
+
+* Upgraded vault-helm to 0.23.0
+* Enable vault-ssl by default
+
+## November 22, 2022
+
+* Implemented a new format for the values-secret.yaml. Example can be found in examples/ folder
+* Now the order of values-secret file lookup is the following:
+ 1. ~/values-secret-.yaml
+ 2. ~/values-secret.yaml
+ 3. /values-secret.yaml.template
+* Add support for ansible vault encrypted values-secret files. You can now encrypt your values-secret file
+ at rest with `ansible-vault encrypt ~/values-secret.yaml`. When running `make load-secrets` if an encrypted
+ file is encountered the user will be prompted automatically for the password to decrypt it.
+
+## November 6, 2022
+
+* Add support for /values--.yaml (e.g. /values-AWS-group-one.yaml)
+
+## October 28, 2022
+
+* Updated vault helm chart to v0.22.1 and vault containers to 1.12.0
+
+## October 25, 2022
+
+* Updated External Secrets Operator to v0.6.0
+* Moved to -UBI based ESO containers
+
+## October 13, 2022
+
+* Added global.clusterVersion as a new helm variable which represents the OCP
+ Major.Minor cluster version. By default now a user can add a
+ values--.yaml file to have specific cluster version
+ overrides (e.g. values-4.10-hub.yaml). Will need Validated Patterns Operator >= 0.0.6
+ when deploying with the operator. Note: When using the ArgoCD Hub and spoke model,
+ you cannot have spokes with a different version of OCP than the hub.
+
+## October 4, 2022
+
+* Extended the values-secret.yaml file to support multiple vault paths and re-wrote
+ the push_secrets feature as python module plugin. This requires the following line
+ in a pattern's ansible.cfg's '[defaults]' stanza:
+
+ `library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules`
+
+## October 3, 2022
+
+* Restore the ability to install a non-default site: `make TARGET_SITE=mysite install`
+* Revised tests (new output and filenames, requires adding new result files to git)
+* ACM 2.6 required for ACM-based managed sites
+* Introduced global.clusterDomain template variable (without the `apps.` prefix)
+* Removed the ability to send specific charts to another cluster, use hosted argo sites instead
+* Added the ability to have the hub host `values-{site}.yaml` for spoke clusters.
+
+ The following example would deploy the namespaces, subscriptions, and
+ applications defined in `values-group-one.yaml` to the `perth` cluster
+ directly from ArgoCD on the hub.
+
+ ```yaml
+ managedClusterGroups:
+ - name: group-one
+ hostedArgoSites:
+ - name: perth
+ domain: perth1.beekhof.net
+ bearerKeyPath: secret/data/hub/cluster_perth
+ caKeyPath: secret/data/hub/cluster_perth_ca
+ ```
diff --git a/common/LICENSE b/common/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/common/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/common/Makefile b/common/Makefile
new file mode 100644
index 00000000..0d5d0a36
--- /dev/null
+++ b/common/Makefile
@@ -0,0 +1,256 @@
+NAME ?= $(shell basename "`pwd`")
+ifneq ($(origin TARGET_SITE), undefined)
+ TARGET_SITE_OPT=--set main.clusterGroupName=$(TARGET_SITE)
+endif
+
+# This variable can be set in order to pass additional helm arguments from the
+# the command line. I.e. we can set things without having to tweak values files
+EXTRA_HELM_OPTS ?=
+
+# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248
+# or
+# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248,registry-proxy.engineering.redhat.com/rh-osbs/iib:394249
+INDEX_IMAGES ?=
+
+TARGET_ORIGIN ?= origin
+# This is to ensure that whether we start with a git@ or https:// URL, we end up with an https:// URL
+# This is because we expect to use tokens for repo authentication as opposed to SSH keys
+TARGET_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN) | sed -e 's/.*URL:[[:space:]]*//' -e 's%^git@%%' -e 's%^https://%%' -e 's%:%/%' -e 's%^%https://%')
+# git branch --show-current is also available as of git 2.22, but we will use this for compatibility
+TARGET_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
+
+UUID_FILE ?= ~/.config/validated-patterns/pattern-uuid
+UUID_HELM_OPTS ?=
+
+# --set values always take precedence over the contents of -f
+ifneq ("$(wildcard $(UUID_FILE))","")
+ UUID := $(shell cat $(UUID_FILE))
+ UUID_HELM_OPTS := --set main.analyticsUUID=$(UUID)
+endif
+
+# Set the secret name *and* its namespace when deploying from private repositories
+# The format of said secret is documented here: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories
+TOKEN_SECRET ?=
+TOKEN_NAMESPACE ?=
+
+ifeq ($(TOKEN_SECRET),)
+ HELM_OPTS=-f values-global.yaml --set main.git.repoURL="$(TARGET_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS)
+else
+ # When we are working with a private repository we do not escape the git URL as it might be using an ssh secret which does not use https://
+ TARGET_CLEAN_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN))
+ HELM_OPTS=-f values-global.yaml --set main.tokenSecret=$(TOKEN_SECRET) --set main.tokenSecretNamespace=$(TOKEN_NAMESPACE) --set main.git.repoURL="$(TARGET_CLEAN_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS)
+endif
+
+
+##@ Pattern Common Tasks
+
+.PHONY: help
+help: ## This help message
+ @echo "Pattern: $(NAME)"
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^(\s|[a-zA-Z_0-9-])+:.*?##/ { printf " \033[36m%-35s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+# Makefiles in the individual patterns should call these targets explicitly
+# e.g. from industrial-edge: make -f common/Makefile show
+.PHONY: show
+show: ## show the starting template without installing it
+ helm template common/operator-install/ --name-template $(NAME) $(HELM_OPTS)
+
+preview-all:
+ @common/scripts/preview-all.sh $(TARGET_REPO) $(TARGET_BRANCH)
+
+preview-%:
+ CLUSTERGROUP?=$(shell yq ".main.clusterGroupName" values-global.yaml)
+ @common/scripts/preview.sh $(CLUSTERGROUP) $* $(TARGET_REPO) $(TARGET_BRANCH)
+
+.PHONY: operator-deploy
+operator-deploy operator-upgrade: validate-prereq validate-origin validate-cluster ## runs helm install
+ @set -e -o pipefail
+ # Retry five times because the CRD might not be fully installed yet
+ for i in {1..5}; do \
+ helm template --include-crds --name-template $(NAME) common/operator-install/ $(HELM_OPTS) | oc apply -f- && break || sleep 10; \
+ done
+
+.PHONY: uninstall
+uninstall: ## runs helm uninstall
+ $(eval CSV := $(shell oc get subscriptions -n openshift-operators openshift-gitops-operator -ojsonpath={.status.currentCSV}))
+ helm uninstall $(NAME)
+ @oc delete csv -n openshift-operators $(CSV)
+
+.PHONY: load-secrets
+load-secrets: ## loads the secrets into the backend determined by values-global setting
+ common/scripts/process-secrets.sh $(NAME)
+
+.PHONY: legacy-load-secrets
+legacy-load-secrets: ## loads the secrets into vault (only)
+ common/scripts/vault-utils.sh push_secrets $(NAME)
+
+.PHONY: secrets-backend-vault
+secrets-backend-vault: ## Edits values files to use default Vault+ESO secrets config
+ common/scripts/set-secret-backend.sh vault
+ common/scripts/manage-secret-app.sh vault present
+ common/scripts/manage-secret-app.sh golang-external-secrets present
+ common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent
+ @git diff --exit-code || echo "Secrets backend set to vault, please review changes, commit, and push to activate in the pattern"
+
+.PHONY: secrets-backend-kubernetes
+secrets-backend-kubernetes: ## Edits values file to use Kubernetes+ESO secrets config
+ common/scripts/set-secret-backend.sh kubernetes
+ common/scripts/manage-secret-namespace.sh validated-patterns-secrets present
+ common/scripts/manage-secret-app.sh vault absent
+ common/scripts/manage-secret-app.sh golang-external-secrets present
+ @git diff --exit-code || echo "Secrets backend set to kubernetes, please review changes, commit, and push to activate in the pattern"
+
+.PHONY: secrets-backend-none
+secrets-backend-none: ## Edits values files to remove secrets manager + ESO
+ common/scripts/set-secret-backend.sh none
+ common/scripts/manage-secret-app.sh vault absent
+ common/scripts/manage-secret-app.sh golang-external-secrets absent
+ common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent
+ @git diff --exit-code || echo "Secrets backend set to none, please review changes, commit, and push to activate in the pattern"
+
+.PHONY: load-iib
+load-iib: ## CI target to install Index Image Bundles
+ @set -e; if [ x$(INDEX_IMAGES) != x ]; then \
+ for IIB in $(shell echo $(INDEX_IMAGES) | tr ',' '\n'); do \
+ INDEX_IMAGE="$${IIB}" ansible-playbook common/ansible/playbooks/iib-ci/iib-ci.yaml; \
+ done; \
+ else \
+ echo "No INDEX_IMAGES defined. Bailing out"; \
+ exit 1; \
+ fi
+
+
+##@ Validation Tasks
+
+# We only check the remote ssh git branch's existance if we're not running inside a container
+# as getting ssh auth working inside a container seems a bit brittle
+.PHONY: validate-origin
+validate-origin: ## verify the git origin is available
+ @echo "Checking repository:"
+ @echo -n " $(TARGET_REPO) - branch '$(TARGET_BRANCH)': "
+ @git ls-remote --exit-code --heads $(TARGET_REPO) $(TARGET_BRANCH) >/dev/null &&\
+ echo "OK" || (echo "NOT FOUND"; exit 1)
+
+.PHONY: validate-cluster
+validate-cluster: ## Do some cluster validations before installing
+ @echo "Checking cluster:"
+ @echo -n " cluster-info: "
+ @oc cluster-info >/dev/null && echo "OK" || (echo "Error"; exit 1)
+ @echo -n " storageclass: "
+ @if [ `oc get storageclass -o go-template='{{printf "%d\n" (len .items)}}'` -eq 0 ]; then\
+ echo "None Found"; exit 1;\
+ else\
+ echo "OK";\
+ fi
+
+
+.PHONY: validate-schema
+validate-schema: ## validates values files against schema in common/clustergroup
+ $(eval VAL_PARAMS := $(shell for i in ./values-*.yaml; do echo -n "$${i} "; done))
+ @echo -n "Validating clustergroup schema of: "
+ @set -e; for i in $(VAL_PARAMS); do echo -n " $$i"; helm template common/clustergroup $(HELM_OPTS) -f "$${i}" >/dev/null; done
+ @echo
+
+.PHONY: validate-prereq
+validate-prereq: ## verify pre-requisites
+ @if [ ! -f /run/.containerenv ]; then\
+ echo "Checking prerequisites:";\
+ for t in $(EXECUTABLES); do if ! which $$t > /dev/null 2>&1; then echo "No $$t in PATH"; exit 1; fi; done;\
+ echo " Check for '$(EXECUTABLES)': OK";\
+ echo -n " Check for python-kubernetes: ";\
+ if ! ansible -m ansible.builtin.command -a "{{ ansible_python_interpreter }} -c 'import kubernetes'" localhost > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\
+ echo "OK";\
+ echo -n " Check for kubernetes.core collection: ";\
+ if ! ansible-galaxy collection list | grep kubernetes.core > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\
+ echo "OK";\
+ else\
+ echo "Skipping prerequisites check as we're running inside a container";\
+ fi
+
+.PHONY: argo-healthcheck
+argo-healthcheck: ## Checks if all argo applications are synced
+ @echo "Checking argo applications"
+ $(eval APPS := $(shell oc get applications -A -o jsonpath='{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\n"}{end}'))
+ @NOTOK=0; \
+ for i in $(APPS); do\
+ n=`echo "$${i}" | cut -f1 -d,`;\
+ a=`echo "$${i}" | cut -f2 -d,`;\
+ STATUS=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.sync.status}'`;\
+ if [[ $$STATUS != "Synced" ]]; then\
+ NOTOK=$$(( $${NOTOK} + 1));\
+ fi;\
+ HEALTH=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.health.status}'`;\
+ if [[ $$HEALTH != "Healthy" ]]; then\
+ NOTOK=$$(( $${NOTOK} + 1));\
+ fi;\
+ echo "$${n} $${a} -> Sync: $${STATUS} - Health: $${HEALTH}";\
+ done;\
+ if [ $${NOTOK} -gt 0 ]; then\
+ echo "Some applications are not synced or are unhealthy";\
+ exit 1;\
+ fi
+
+
+##@ Test and Linters Tasks
+
+CHARTS=$(shell find . -type f -iname 'Chart.yaml' -exec dirname "{}" \; | grep -v examples | sed -e 's/.\///')
+# Section related to tests and linting
+TEST_OPTS= -f values-global.yaml \
+ --set global.repoURL="https://github.com/pattern-clone/mypattern" \
+ --set main.git.repoURL="https://github.com/pattern-clone/mypattern" \
+ --set main.git.revision=main --set global.pattern="mypattern" \
+ --set global.namespace="pattern-namespace" \
+ --set global.hubClusterDomain=apps.hub.example.com \
+ --set global.localClusterDomain=apps.region.example.com \
+ --set global.clusterDomain=region.example.com \
+ --set global.clusterVersion="4.12" \
+ --set global.clusterPlatform=aws \
+ --set "clusterGroup.imperative.jobs[0].name"="test" \
+ --set "clusterGroup.imperative.jobs[0].playbook"="ansible/test.yml"
+PATTERN_OPTS=-f common/examples/values-example.yaml
+EXECUTABLES=git helm oc ansible
+
+.PHONY: test
+test: ## run helm tests
+ @for t in $(CHARTS); do common/scripts/test.sh $$t all "$(TEST_OPTS)"; if [ $$? != 0 ]; then exit 1; fi; done
+
+.PHONY: helmlint
+helmlint: ## run helm lint
+ @for t in $(CHARTS); do common/scripts/lint.sh $$t $(TEST_OPTS); if [ $$? != 0 ]; then exit 1; fi; done
+
+API_URL ?= https://raw.githubusercontent.com/hybrid-cloud-patterns/ocp-schemas/main/openshift/4.10/
+KUBECONFORM_SKIP ?= -skip 'CustomResourceDefinition,ClusterIssuer,CertManager,Certificate,ArgoCD'
+
+# We need to skip 'CustomResourceDefinition' as openapi2jsonschema seems to be unable to generate them ATM
+.PHONY: kubeconform
+kubeconform: ## run helm kubeconform
+ @for t in $(CHARTS); do helm template $(TEST_OPTS) $(PATTERN_OPTS) $$t | kubeconform -strict $(KUBECONFORM_SKIP) -verbose -schema-location $(API_URL); if [ $$? != 0 ]; then exit 1; fi; done
+
+.PHONY: super-linter
+super-linter: ## Runs super linter locally
+ rm -rf .mypy_cache
+ podman run -e RUN_LOCAL=true -e USE_FIND_ALGORITHM=true \
+ -e VALIDATE_BASH=false \
+ -e VALIDATE_JSCPD=false \
+ -e VALIDATE_KUBERNETES_KUBECONFORM=false \
+ -e VALIDATE_YAML=false \
+ -e VALIDATE_ANSIBLE=false \
+ -e VALIDATE_DOCKERFILE_HADOLINT=false \
+ -e VALIDATE_TEKTON=false \
+ $(DISABLE_LINTERS) \
+ -v $(PWD):/tmp/lint:rw,z \
+ -w /tmp/lint \
+ docker.io/github/super-linter:slim-v5
+
+.PHONY: ansible-lint
+ansible-lint: ## run ansible lint on ansible/ folder
+ podman run -it -v $(PWD):/workspace:rw,z --workdir /workspace --env ANSIBLE_CONFIG=./ansible/ansible.cfg \
+ --entrypoint "/usr/local/bin/ansible-lint" quay.io/ansible/creator-ee:latest "-vvv" "ansible/"
+
+.PHONY: ansible-unittest
+ansible-unittest: ## run ansible unit tests
+ pytest -r a --fulltrace --color yes ansible/tests/unit/test_*.py
+
+.PHONY: deploy upgrade legacy-deploy legacy-upgrade
+deploy upgrade legacy-deploy legacy-upgrade:
+ @echo "UNSUPPORTED TARGET: please switch to 'operator-deploy'"; exit 1
diff --git a/common/README.md b/common/README.md
new file mode 100644
index 00000000..568a2396
--- /dev/null
+++ b/common/README.md
@@ -0,0 +1,22 @@
+# Validated Patterns common/ repository
+
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
+
+## Start Here
+
+This repository is never used as standalone. It is usually imported in each pattern as a subtree.
+In order to import the common/ the very first time you can use
+`https://github.com/validatedpatterns/multicloud-gitops/blob/main/common/scripts/make_common_subtree.sh`
+
+In order to update your common subtree inside your pattern repository you can either use
+`https://github.com/validatedpatterns/utilities/blob/main/scripts/update-common-everywhere.sh` or
+do it manually by doing the following:
+
+```sh
+git remote add -f upstream-common https://github.com/validatedpatterns/common.git
+git merge -s subtree -Xtheirs -Xsubtree=common upstream-common/ha-vault
+```
+
+## Secrets
+
+There are two different secret formats parsed by the ansible bits. Both are documented [here](https://github.com/validatedpatterns/common/tree/main/ansible/roles/vault_utils/README.md)
diff --git a/common/acm/.github/workflows/update-helm-repo.yml b/common/acm/.github/workflows/update-helm-repo.yml
new file mode 100644
index 00000000..c12af2b5
--- /dev/null
+++ b/common/acm/.github/workflows/update-helm-repo.yml
@@ -0,0 +1,29 @@
+# This invokes the workflow named 'publish-charts' in the umbrella repo
+# It expects to have a secret called CHARTS_REPOS_TOKEN which contains
+# the GitHub token that has permissions to invoke workflows and commit code
+# inside the umbrella-repo.
+# The following fine-grained permissions were used in testing and were limited
+# to the umbrella repo only:
+# - Actions: r/w
+# - Commit statuses: r/w
+# - Contents: r/w
+# - Deployments: r/w
+# - Pages: r/w
+
+name: vp-patterns/update-helm-repo
+on:
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+
+jobs:
+ helmlint:
+ uses: validatedpatterns/helm-charts/.github/workflows/helmlint.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions:
+ contents: read
+
+ update-helm-repo:
+ needs: [helmlint]
+ uses: validatedpatterns/helm-charts/.github/workflows/update-helm-repo.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions: read-all
+ secrets: inherit
diff --git a/common/acm/.helmignore b/common/acm/.helmignore
new file mode 100644
index 00000000..b25c15b8
--- /dev/null
+++ b/common/acm/.helmignore
@@ -0,0 +1 @@
+*~
diff --git a/common/acm/Chart.yaml b/common/acm/Chart.yaml
new file mode 100644
index 00000000..3bae9da5
--- /dev/null
+++ b/common/acm/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart to configure Advanced Cluster Manager for OpenShift.
+keywords:
+- pattern
+name: acm
+version: 0.0.1
diff --git a/common/acm/templates/_helpers.tpl b/common/acm/templates/_helpers.tpl
new file mode 100644
index 00000000..910b3970
--- /dev/null
+++ b/common/acm/templates/_helpers.tpl
@@ -0,0 +1,13 @@
+{{/*
+Default always defined valueFiles to be included when pushing the cluster wide argo application via acm
+*/}}
+{{- define "acm.app.policies.valuefiles" -}}
+- "/values-global.yaml"
+- "/values-{{ .name }}.yaml"
+- '/values-{{ `{{ (lookup "config.openshift.io/v1" "Infrastructure" "" "cluster").spec.platformSpec.type }}` }}.yaml'
+- '/values-{{ `{{ (lookup "config.openshift.io/v1" "Infrastructure" "" "cluster").spec.platformSpec.type }}` }}-{{ `{{ printf "%d.%d" ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Major) ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Minor) }}` }}.yaml'
+- '/values-{{ `{{ (lookup "config.openshift.io/v1" "Infrastructure" "" "cluster").spec.platformSpec.type }}` }}-{{ .name }}.yaml'
+# We cannot use $.Values.global.clusterVersion because that gets resolved to the
+# hub's cluster version, whereas we want to include the spoke cluster version
+- '/values-{{ `{{ printf "%d.%d" ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Major) ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Minor) }}` }}.yaml'
+{{- end }} {{- /*acm.app.policies.valuefiles */}}
diff --git a/common/acm/templates/multiclusterhub.yaml b/common/acm/templates/multiclusterhub.yaml
new file mode 100644
index 00000000..79ef9339
--- /dev/null
+++ b/common/acm/templates/multiclusterhub.yaml
@@ -0,0 +1,11 @@
+apiVersion: operator.open-cluster-management.io/v1
+kind: MultiClusterHub
+metadata:
+ name: multiclusterhub
+ namespace: open-cluster-management
+ annotations:
+ argocd.argoproj.io/sync-wave: "-1"
+ {{- if kindIs "map" .Values.clusterGroup.subscriptions }}
+ installer.open-cluster-management.io/mce-subscription-spec: '{"source": "{{ default "redhat-operators" .Values.clusterGroup.subscriptions.acm.source }}" }'
+ {{- end }}
+spec: {}
diff --git a/common/acm/templates/policies/acm-hub-ca-policy.yaml b/common/acm/templates/policies/acm-hub-ca-policy.yaml
new file mode 100644
index 00000000..890e6bae
--- /dev/null
+++ b/common/acm/templates/policies/acm-hub-ca-policy.yaml
@@ -0,0 +1,71 @@
+# This pushes out the HUB's Certificate Authorities on to the imported clusters
+{{ if .Values.clusterGroup.isHubCluster }}
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: Policy
+metadata:
+ name: acm-hub-ca-policy
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+ remediationAction: enforce
+ disabled: false
+ policy-templates:
+ - objectDefinition:
+ apiVersion: policy.open-cluster-management.io/v1
+ kind: ConfigurationPolicy
+ metadata:
+ name: acm-hub-ca-config-policy
+ spec:
+ remediationAction: enforce
+ severity: medium
+ namespaceSelector:
+ include:
+ - default
+ object-templates:
+ - complianceType: mustonlyhave
+ objectDefinition:
+ kind: Secret
+ apiVersion: v1
+ type: Opaque
+ metadata:
+ name: hub-ca
+ namespace: imperative
+ data:
+ hub-kube-root-ca.crt: '{{ `{{hub fromConfigMap "" "kube-root-ca.crt" "ca.crt" | base64enc hub}}` }}'
+ hub-openshift-service-ca.crt: '{{ `{{hub fromConfigMap "" "openshift-service-ca.crt" "service-ca.crt" | base64enc hub}}` }}'
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: PlacementBinding
+metadata:
+ name: acm-hub-ca-policy-placement-binding
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+placementRef:
+ name: acm-hub-ca-policy-placement
+ kind: PlacementRule
+ apiGroup: apps.open-cluster-management.io
+subjects:
+ - name: acm-hub-ca-policy
+ kind: Policy
+ apiGroup: policy.open-cluster-management.io
+---
+apiVersion: apps.open-cluster-management.io/v1
+kind: PlacementRule
+metadata:
+ name: acm-hub-ca-policy-placement
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+spec:
+ clusterConditions:
+ - status: 'True'
+ type: ManagedClusterConditionAvailable
+ clusterSelector:
+ matchExpressions:
+ - key: local-cluster
+ operator: NotIn
+ values:
+ - 'true'
+{{ end }}
+
diff --git a/common/acm/templates/policies/application-policies.yaml b/common/acm/templates/policies/application-policies.yaml
new file mode 100644
index 00000000..01082e54
--- /dev/null
+++ b/common/acm/templates/policies/application-policies.yaml
@@ -0,0 +1,152 @@
+# TODO: Also create a GitOpsCluster.apps.open-cluster-management.io
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- if not .hostedArgoSites }}
+apiVersion: policy.open-cluster-management.io/v1
+kind: Policy
+metadata:
+ name: {{ .name }}-clustergroup-policy
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+ remediationAction: enforce
+ disabled: false
+ policy-templates:
+ - objectDefinition:
+ apiVersion: policy.open-cluster-management.io/v1
+ kind: ConfigurationPolicy
+ metadata:
+ name: {{ .name }}-clustergroup-config
+ spec:
+ remediationAction: enforce
+ severity: medium
+ namespaceSelector:
+ include:
+ - default
+ object-templates:
+ - complianceType: mustonlyhave
+ objectDefinition:
+ apiVersion: argoproj.io/v1alpha1
+ kind: Application
+ metadata:
+ name: {{ $.Values.global.pattern }}-{{ .name }}
+ namespace: openshift-gitops
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io/foreground
+ spec:
+ project: default
+ source:
+ repoURL: {{ coalesce .repoURL $.Values.global.repoURL }}
+ targetRevision: {{ coalesce .targetRevision $.Values.global.targetRevision }}
+ path: {{ default "common/clustergroup" .path }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ {{- include "acm.app.policies.valuefiles" . | nindent 24 }}
+ {{- range $valueFile := $.Values.global.extraValueFiles }}
+ - {{ $valueFile | quote }}
+ {{- end }}
+ {{- range $valueFile := .extraValueFiles }}
+ - {{ $valueFile | quote }}
+ {{- end }}
+ parameters:
+ - name: global.repoURL
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+ - name: global.targetRevision
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+ - name: global.namespace
+ value: $ARGOCD_APP_NAMESPACE
+ - name: global.pattern
+ value: {{ $.Values.global.pattern }}
+ - name: global.hubClusterDomain
+ value: {{ $.Values.global.hubClusterDomain }}
+ - name: global.localClusterDomain
+ value: '{{ `{{ (lookup "config.openshift.io/v1" "Ingress" "" "cluster").spec.domain }}` }}'
+ # Requires ACM 2.6 or higher
+ - name: global.clusterDomain
+ value: '{{ `{{ (lookup "config.openshift.io/v1" "Ingress" "" "cluster").spec.domain | replace "apps." "" }}` }}'
+ # Requires ACM 2.6 or higher (I could not come up with something less terrible to get maj.min)
+ - name: global.clusterVersion
+ value: '{{ `{{ printf "%d.%d" ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Major) ((semver (index (lookup "config.openshift.io/v1" "ClusterVersion" "" "version").status.history 0).version).Minor) }}` }}'
+ - name: global.localClusterName
+ value: '{{ `{{ (split "." (lookup "config.openshift.io/v1" "Ingress" "" "cluster").spec.domain)._1 }}` }}'
+ - name: global.clusterPlatform
+ value: {{ $.Values.global.clusterPlatform }}
+ - name: clusterGroup.name
+ value: {{ $group.name }}
+ {{- range .helmOverrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- end }}
+ {{- if .fileParameters }}
+ fileParameters:
+ {{- range .fileParameters }}
+ - name: {{ .name }}
+ path: {{ .path }}
+ {{- end }}
+ {{- end }}
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: {{ $.Values.global.pattern }}-{{ .name }}
+ syncPolicy:
+ automated:
+ prune: false
+ selfHeal: true
+ retry:
+ limit: {{ default 20 $.Values.global.options.applicationRetryLimit }}
+ ignoreDifferences:
+ - group: apps
+ kind: Deployment
+ jsonPointers:
+ - /spec/replicas
+ - group: route.openshift.io
+ kind: Route
+ jsonPointers:
+ - /status
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: PlacementBinding
+metadata:
+ name: {{ .name }}-placement-binding
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+placementRef:
+ name: {{ .name }}-placement
+ kind: PlacementRule
+ apiGroup: apps.open-cluster-management.io
+subjects:
+ - name: {{ .name }}-clustergroup-policy
+ kind: Policy
+ apiGroup: policy.open-cluster-management.io
+---
+apiVersion: apps.open-cluster-management.io/v1
+kind: PlacementRule
+metadata:
+ name: {{ .name }}-placement
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+spec:
+ clusterConditions:
+ - status: 'True'
+ type: ManagedClusterConditionAvailable
+ {{- if .clusterSelector }}
+ clusterSelector: {{ .clusterSelector | toPrettyJson }}
+ {{- else if (not $group.acmlabels) }}
+ clusterSelector:
+ matchLabels:
+ clusterGroup: {{ $group.name }}
+ {{- else if eq (len $group.acmlabels) 0 }}
+ clusterSelector:
+ matchLabels:
+ clusterGroup: {{ $group.name }}
+ {{- else }}
+ clusterSelector:
+ matchLabels:
+ {{- range .acmlabels }}
+ {{ .name }}: {{ .value }}
+ {{- end }}
+ {{- end }}
+---
+{{- end }}
+{{- end }}
diff --git a/common/acm/templates/policies/ocp-gitops-policy.yaml b/common/acm/templates/policies/ocp-gitops-policy.yaml
new file mode 100644
index 00000000..a0ed611f
--- /dev/null
+++ b/common/acm/templates/policies/ocp-gitops-policy.yaml
@@ -0,0 +1,82 @@
+apiVersion: policy.open-cluster-management.io/v1
+kind: Policy
+metadata:
+ name: openshift-gitops-policy
+ annotations:
+ policy.open-cluster-management.io/standards: NIST-CSF
+ policy.open-cluster-management.io/categories: PR.DS Data Security
+ policy.open-cluster-management.io/controls: PR.DS-1 Data-at-rest
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+ remediationAction: enforce
+ disabled: false
+ policy-templates:
+ - objectDefinition:
+ apiVersion: policy.open-cluster-management.io/v1
+ kind: ConfigurationPolicy
+ metadata:
+ name: openshift-gitops-config
+ spec:
+ remediationAction: enforce
+ severity: medium
+ namespaceSelector:
+ include:
+ - default
+ object-templates:
+ - complianceType: mustonlyhave
+ objectDefinition:
+ # This is an auto-generated file. DO NOT EDIT
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ name: openshift-gitops-operator
+ namespace: openshift-operators
+ labels:
+ operators.coreos.com/openshift-gitops-operator.openshift-operators: ''
+ spec:
+ channel: {{ default "gitops-1.11" .Values.main.gitops.channel }}
+ installPlanApproval: Automatic
+ name: openshift-gitops-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ config:
+ env:
+ - name: ARGOCD_CLUSTER_CONFIG_NAMESPACES
+ value: "*"
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: PlacementBinding
+metadata:
+ name: openshift-gitops-placement-binding
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+placementRef:
+ name: openshift-gitops-placement
+ kind: PlacementRule
+ apiGroup: apps.open-cluster-management.io
+subjects:
+ - name: openshift-gitops-policy
+ kind: Policy
+ apiGroup: policy.open-cluster-management.io
+---
+apiVersion: apps.open-cluster-management.io/v1
+kind: PlacementRule
+metadata:
+ name: openshift-gitops-placement
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+spec:
+ clusterConditions:
+ - status: 'True'
+ type: ManagedClusterConditionAvailable
+ clusterSelector:
+ matchExpressions:
+ - key: vendor
+ operator: In
+ values:
+ - OpenShift
+ - key: local-cluster
+ operator: NotIn
+ values:
+ - 'true'
diff --git a/common/acm/templates/policies/private-repo-policies.yaml b/common/acm/templates/policies/private-repo-policies.yaml
new file mode 100644
index 00000000..0b7db0da
--- /dev/null
+++ b/common/acm/templates/policies/private-repo-policies.yaml
@@ -0,0 +1,161 @@
+# We copy the vp-private-repo-credentials from the "openshift-gitops" namespace
+# to the "open-cluster-management" via the "private-hub-policy"
+#
+# Then we copy the secret from the "open-cluster-management" namespace to the
+# managed clusters "openshift-gitops" instance
+#
+# And we also copy the same secret to the namespaced argo's namespace
+{{ if $.Values.global.privateRepo }}
+{{ if .Values.clusterGroup.isHubCluster }}
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: Policy
+metadata:
+ name: private-hub-policy
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+ remediationAction: enforce
+ disabled: false
+ policy-templates:
+ - objectDefinition:
+ apiVersion: policy.open-cluster-management.io/v1
+ kind: ConfigurationPolicy
+ metadata:
+ name: private-hub-config
+ spec:
+ remediationAction: enforce
+ severity: medium
+ namespaceSelector:
+ include:
+ - default
+ object-templates:
+ - complianceType: mustonlyhave
+ objectDefinition:
+ kind: Secret
+ apiVersion: v1
+ type: Opaque
+ metadata:
+ name: vp-private-repo-credentials
+ namespace: open-cluster-management
+ labels:
+ argocd.argoproj.io/secret-type: repository
+ data: '{{ `{{copySecretData "openshift-gitops" "vp-private-repo-credentials"}}` }}'
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: PlacementBinding
+metadata:
+ name: private-hub-placement-binding
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+placementRef:
+ name: private-hub-placement
+ kind: PlacementRule
+ apiGroup: apps.open-cluster-management.io
+subjects:
+ - name: private-hub-policy
+ kind: Policy
+ apiGroup: policy.open-cluster-management.io
+---
+apiVersion: apps.open-cluster-management.io/v1
+kind: PlacementRule
+metadata:
+ name: private-hub-placement
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+spec:
+ clusterConditions:
+ - status: 'True'
+ type: ManagedClusterConditionAvailable
+ clusterSelector:
+ matchExpressions:
+ - key: local-cluster
+ operator: In
+ values:
+ - 'true'
+---
+{{ end }}{{- /* if .Values.clusterGroup.isHubCluster */}}
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- if not .hostedArgoSites }}
+apiVersion: policy.open-cluster-management.io/v1
+kind: Policy
+metadata:
+ name: private-{{ .name }}-policy
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+ remediationAction: enforce
+ disabled: false
+ policy-templates:
+ - objectDefinition:
+ apiVersion: policy.open-cluster-management.io/v1
+ kind: ConfigurationPolicy
+ metadata:
+ name: private-{{ .name }}-config
+ spec:
+ remediationAction: enforce
+ severity: medium
+ namespaceSelector:
+ include:
+ - default
+ object-templates:
+ - complianceType: mustonlyhave
+ objectDefinition:
+ kind: Secret
+ apiVersion: v1
+ type: Opaque
+ metadata:
+ name: vp-private-repo-credentials
+ namespace: openshift-gitops
+ labels:
+ argocd.argoproj.io/secret-type: repository
+ data: '{{ `{{hub copySecretData "open-cluster-management" "vp-private-repo-credentials" hub}}` }}'
+ - complianceType: mustonlyhave
+ objectDefinition:
+ kind: Secret
+ apiVersion: v1
+ type: Opaque
+ metadata:
+ name: vp-private-repo-credentials
+ namespace: {{ $.Values.global.pattern }}-{{ .name }}
+ labels:
+ argocd.argoproj.io/secret-type: repository
+ data: '{{ `{{hub copySecretData "open-cluster-management" "vp-private-repo-credentials" hub}}` }}'
+---
+apiVersion: policy.open-cluster-management.io/v1
+kind: PlacementBinding
+metadata:
+ name: private-{{ .name }}-placement-binding
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+placementRef:
+ name: private-{{ .name }}-placement
+ kind: PlacementRule
+ apiGroup: apps.open-cluster-management.io
+subjects:
+ - name: private-{{ .name }}-policy
+ kind: Policy
+ apiGroup: policy.open-cluster-management.io
+---
+apiVersion: apps.open-cluster-management.io/v1
+kind: PlacementRule
+metadata:
+ name: private-{{ .name }}-placement
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+spec:
+ clusterConditions:
+ - status: 'True'
+ type: ManagedClusterConditionAvailable
+ clusterSelector:
+ matchExpressions:
+ - key: local-cluster
+ operator: NotIn
+ values:
+ - 'true'
+{{- end }}{{- /* if not .hostedArgoSites */}}
+{{- end }}{{- /* range .Values.clusterGroup.managedClusterGroups */}}
+{{- end }}{{- /* if $.Values.global.privateRepo */}}
diff --git a/common/acm/templates/provision/_install-config.tpl b/common/acm/templates/provision/_install-config.tpl
new file mode 100644
index 00000000..b0336627
--- /dev/null
+++ b/common/acm/templates/provision/_install-config.tpl
@@ -0,0 +1,66 @@
+{{- define "cluster.install-config" -}}
+
+{{- $type := "None" }}
+{{- $cloud := "None" }}
+{{- $region := "None" }}
+
+{{- if .platform.aws }}
+{{- $cloud = "aws" }}
+{{- $region = .platform.aws.region }}
+{{- $type = "m5.xlarge" }}
+{{- else if .platform.azure }}
+{{- $cloud = "azure" }}
+{{- $region = .platform.azure.region }}
+{{- $type = "Standard_D8s_v3" }}
+{{- end }}
+
+apiVersion: v1
+metadata:
+ name: '{{ .name }}'
+baseDomain: {{ .baseDomain }}
+controlPlane:
+ architecture: amd64
+ hyperthreading: Enabled
+ name: controlPlane
+ {{- if .controlPlane }}
+ replicas: {{ default 3 .controlPlane.count }}
+ {{- if .controlPlane.platform }}
+ platform:
+ {{- toYaml .controlPlane.platform | nindent 4 }}
+ {{- end }}
+ {{- else }}
+ replicas: 3
+ platform:
+ {{ $cloud }}:
+ type: {{ $type }}
+ {{- end }}
+compute:
+- hyperthreading: Enabled
+ architecture: amd64
+ name: 'worker'
+ {{- if .workers }}
+ replicas: {{ default 0 .workers.count }}
+ {{- if .workers.platform }}
+ platform:
+ {{- toYaml .workers.platform | nindent 4 }}
+ {{- end }}
+ {{- else }}
+ replicas: 3
+ platform:
+ {{ $cloud }}:
+ type: {{ $type }}
+ {{- end }}
+networking:
+ clusterNetwork:
+ - cidr: 10.128.0.0/14
+ hostPrefix: 23
+ machineNetwork:
+ - cidr: 10.0.0.0/16
+ networkType: OVNKubernetes
+ serviceNetwork:
+ - 172.30.0.0/16
+platform:
+{{- toYaml .platform | nindent 2 }}
+pullSecret: "" # skip, hive will inject based on it's secrets
+sshKey: "" # skip, hive will inject based on it's secrets
+{{- end -}}
diff --git a/common/acm/templates/provision/clusterpool.yaml b/common/acm/templates/provision/clusterpool.yaml
new file mode 100644
index 00000000..e2f9d3d1
--- /dev/null
+++ b/common/acm/templates/provision/clusterpool.yaml
@@ -0,0 +1,95 @@
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- if .clusterPools }}{{- /* We only create ManagedClusterSets if there are clusterPools defined */}}
+apiVersion: cluster.open-cluster-management.io/v1beta1
+kind: ManagedClusterSet
+metadata:
+ annotations:
+ cluster.open-cluster-management.io/submariner-broker-ns: {{ .name }}-broker
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ name: {{ .name }}
+spec:
+ clusterSelector:
+ selectorType: LegacyClusterSetLabel
+---
+{{- range .clusterPools }}
+
+{{- $pool := . }}
+{{- $poolName := print .name "-" $group.name }}
+
+{{- $cloud := "None" }}
+{{- $region := "None" }}
+{{- $numClusters := 0 }}
+
+{{- if .platform.aws }}
+{{- $cloud = "aws" }}
+{{- $region = .platform.aws.region }}
+{{- else if .platform.azure }}
+{{- $cloud = "azure" }}
+{{- $region = .platform.azure.region }}
+{{- end }}
+
+{{- if .clusters }}
+{{- $numClusters = len .clusters }}
+{{- end }}
+
+apiVersion: hive.openshift.io/v1
+kind: ClusterPool
+metadata:
+ name: "{{ $poolName }}"
+ annotations:
+ argocd.argoproj.io/sync-wave: "10"
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ labels:
+ cloud: {{ $cloud }}
+ region: '{{ $region }}'
+ vendor: OpenShift
+ cluster.open-cluster-management.io/clusterset: {{ .name }}
+spec:
+ {{- if .size }}
+ size: {{ .size }}
+ {{- else }}
+ size: {{ $numClusters }}
+ {{- end }}
+ runningCount: {{ $numClusters }}
+ baseDomain: {{ .baseDomain }}
+ installConfigSecretTemplateRef:
+ name: {{ $poolName }}-install-config
+ imageSetRef:
+ name: img{{ .openshiftVersion }}-multi-appsub
+ pullSecretRef:
+ name: {{ $poolName }}-pull-secret
+ skipMachinePools: true # Disable MachinePool as using custom install-config
+ platform:
+ {{ $cloud }}:
+ credentialsSecretRef:
+ name: {{ $poolName }}-creds
+ region: {{ $region }}
+---
+{{- range .clusters }}
+apiVersion: hive.openshift.io/v1
+kind: ClusterClaim
+metadata:
+ name: '{{ lower . }}-{{ lower $group.name }}'
+ annotations:
+ argocd.argoproj.io/sync-wave: "20"
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ cluster.open-cluster-management.io/createmanagedcluster: "true"
+ labels:
+ clusterClaimName: {{ lower . }}-{{ lower $group.name }}
+ {{- if (not $group.acmlabels) }}
+ clusterGroup: {{ $group.name }}
+ {{- else if eq (len $group.acmlabels) 0 }}
+ clusterGroup: {{ $group.name }}
+ {{- else }}
+ {{- range $group.acmlabels }}
+ {{ .name }}: {{ .value }}
+ {{- end }}
+ {{- end }}
+spec:
+ clusterPoolName: {{ $poolName }}
+---
+{{- end }}{{- /* range .range clusters */}}
+{{- end }}{{- /* range .clusterPools */}}
+{{- end }}{{- /* if .clusterPools) */}}
+{{- end }}{{- /* range .Values.clusterGroup.managedClusterGroups */}}
diff --git a/common/acm/templates/provision/secrets-aws.yaml b/common/acm/templates/provision/secrets-aws.yaml
new file mode 100644
index 00000000..002c9247
--- /dev/null
+++ b/common/acm/templates/provision/secrets-aws.yaml
@@ -0,0 +1,84 @@
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- range .clusterPools }}
+{{- $poolName := print .name "-" $group.name }}
+{{- if .platform.aws }}
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-creds
+spec:
+ dataFrom:
+ - extract:
+ # Expects entries called: aws_access_key_id and aws_secret_access_key
+ key: {{ default "secret/data/hub/aws" .awsKeyPath }}
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-creds
+ creationPolicy: Owner
+ template:
+ type: Opaque
+---
+# For use when manually creating clusters with ACM
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-infra-creds
+spec:
+ data:
+ - secretKey: openshiftPullSecret
+ remoteRef:
+ key: {{ default "secret/data/hub/openshiftPullSecret" .pullSecretKeyPath }}
+ property: content
+ - secretKey: awsKeyId
+ remoteRef:
+ key: {{ default "secret/data/hub/aws" .awsKeyPath }}
+ property: aws_access_key_id
+ - secretKey: awsAccessKey
+ remoteRef:
+ key: {{ default "secret/data/hub/aws" .awsKeyPath }}
+ property: aws_secret_access_key
+ - secretKey: sshPublicKey
+ remoteRef:
+ key: {{ default "secret/data/hub/publickey" .sshPublicKeyPath }}
+ property: content
+ - secretKey: sshPrivateKey
+ remoteRef:
+ key: {{ default "secret/data/hub/privatekey" .sshPrivateKeyPath }}
+ property: content
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-infra-creds
+ creationPolicy: Owner
+ template:
+ type: Opaque
+ metadata:
+ labels:
+ cluster.open-cluster-management.io/credentials: ""
+ cluster.open-cluster-management.io/type: aws
+ data:
+ baseDomain: "{{ .baseDomain }}"
+ pullSecret: |-
+ {{ "{{ .openshiftPullSecret | toString }}" }}
+ aws_access_key_id: |-
+ {{ "{{ .awsKeyId | toString }}" }}
+ aws_secret_access_key: |-
+ {{ "{{ .awsAccessKey | toString }}" }}
+ ssh-privatekey: |-
+ {{ "{{ .sshPrivateKey | toString }}" }}
+ ssh-publickey: |-
+ {{ "{{ .sshPublicKey | toString }}" }}
+ httpProxy: ""
+ httpsProxy: ""
+ noProxy: ""
+ additionalTrustBundle: ""
+---
+{{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/common/acm/templates/provision/secrets-azure.yaml b/common/acm/templates/provision/secrets-azure.yaml
new file mode 100644
index 00000000..7fe6271b
--- /dev/null
+++ b/common/acm/templates/provision/secrets-azure.yaml
@@ -0,0 +1,84 @@
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- range .clusterPools }}
+{{- $poolName := print .name "-" $group.name }}
+{{- if .platform.azure }}
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-creds
+spec:
+ data:
+ - secretKey: azureOsServicePrincipal
+ remoteRef:
+ key: {{ default "secret/data/hub/azureOsServicePrincipal" .azureKeyPath }}
+ property: content
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-creds
+ creationPolicy: Owner
+ template:
+ type: Opaque
+ data:
+ osServicePrincipal.json: |-
+ {{ "{{ .azureOsServicePrincipal | toString }}" }}
+---
+# For use when manually creating clusters with ACM
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-infra-creds
+spec:
+ data:
+ - secretKey: openshiftPullSecret
+ remoteRef:
+ key: {{ default "secret/data/hub/openshiftPullSecret" .pullSecretKeyPath }}
+ property: content
+ - secretKey: sshPublicKey
+ remoteRef:
+ key: {{ default "secret/data/hub/publickey" .sshPublicKeyPath }}
+ property: content
+ - secretKey: sshPrivateKey
+ remoteRef:
+ key: {{ default "secret/data/hub/privatekey" .sshPrivateKeyPath }}
+ property: content
+ - secretKey: azureOsServicePrincipal
+ remoteRef:
+ key: {{ default "secret/data/hub/azureOsServicePrincipal" .azureKeyPath }}
+ property: content
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-infra-creds
+ creationPolicy: Owner
+ template:
+ type: Opaque
+ metadata:
+ labels:
+ cluster.open-cluster-management.io/credentials: ""
+ cluster.open-cluster-management.io/type: aws
+ data:
+ cloudName: AzurePublicCloud
+ osServicePrincipal.json: |-
+ {{ "{{ .azureOsServicePrincipal | toString }}" }}
+ baseDomain: "{{ .baseDomain }}"
+ baseDomainResourceGroupName: "{{ .platform.azure.baseDomainResourceGroupName | toString }}"
+ pullSecret: |-
+ {{ "{{ .openshiftPullSecret | toString }}" }}
+ ssh-privatekey: |-
+ {{ "{{ .sshPrivateKey | toString }}" }}
+ ssh-publickey: |-
+ {{ "{{ .sshPublicKey | toString }}" }}
+ httpProxy: ""
+ httpsProxy: ""
+ noProxy: ""
+ additionalTrustBundle: ""
+---
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/common/acm/templates/provision/secrets-common.yaml b/common/acm/templates/provision/secrets-common.yaml
new file mode 100644
index 00000000..21a03b73
--- /dev/null
+++ b/common/acm/templates/provision/secrets-common.yaml
@@ -0,0 +1,61 @@
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- range .clusterPools }}
+{{- $poolName := print .name "-" $group.name }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $poolName }}-install-config
+data:
+ # Base64 encoding of install-config yaml
+ install-config.yaml: {{ include "cluster.install-config" . | b64enc }}
+type: Opaque
+---
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-pull-secret
+spec:
+ data:
+ - secretKey: openshiftPullSecret
+ remoteRef:
+ key: {{ default "secret/data/hub/openshiftPullSecret" .pullSecretKeyPath }}
+ property: content
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-pull-secret
+ creationPolicy: Owner
+ template:
+ type: kubernetes.io/dockerconfigjson
+ data:
+ .dockerconfigjson: |-
+ {{ "{{ .openshiftPullSecret | toString }}" }}
+---
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: {{ $poolName }}-ssh-private-key
+spec:
+ data:
+ - secretKey: sshPrivateKey
+ remoteRef:
+ key: {{ default "secret/data/hub/privatekey" .sshPrivateKeyPath }}
+ property: content
+ refreshInterval: 24h0m0s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ $poolName }}-ssh-private-key
+ creationPolicy: Owner
+ template:
+ type: Opaque
+ data:
+ ssh-privatekey: |-
+ {{ "{{ .sshPrivateKey | toString }}" }}
+---
+{{- end }}
+{{- end }}
diff --git a/common/acm/test.yaml b/common/acm/test.yaml
new file mode 100644
index 00000000..669daf07
--- /dev/null
+++ b/common/acm/test.yaml
@@ -0,0 +1,35 @@
+clusterGroup:
+ managedClusterGroups:
+ exampleRegion:
+ name: region-one
+
+ # Before enabling cluster provisioning, ensure AWS/Azure credentials and OCP
+ # pull secrets are defined in Vault. See values-secret.yaml.template
+ #
+ clusterPools:
+ exampleAWSPool:
+ name: aws-ap
+ openshiftVersion: 4.10.18
+ baseDomain: blueprints.rhecoeng.com
+ platform:
+ aws:
+ region: ap-southeast-2
+ clusters:
+ - One
+ exampleAzurePool:
+ name: azure-us
+ openshiftVersion: 4.10.18
+ baseDomain: blueprints.rhecoeng.com
+ platform:
+ azure:
+ baseDomainResourceGroupName: dojo-dns-zones
+ region: eastus
+ clusters:
+ - Two
+ - Three
+ acmlabels:
+ - name: clusterGroup
+ value: region-one
+ helmOverrides:
+ - name: clusterGroup.isHubCluster
+ value: false
diff --git a/common/acm/values.yaml b/common/acm/values.yaml
new file mode 100644
index 00000000..fb7cb03a
--- /dev/null
+++ b/common/acm/values.yaml
@@ -0,0 +1,35 @@
+main:
+ gitops:
+ channel: "gitops-1.11"
+
+global:
+ extraValueFiles: []
+ pattern: none
+ repoURL: none
+ targetRevision: main
+ options:
+ applicationRetryLimit: 20
+
+clusterGroup:
+ subscriptions:
+ acm:
+ source: redhat-operators
+ managedClusterGroups:
+# testRegion:
+# name: region-one
+# clusterPools:
+# testPool:
+# name: spoke
+# openshiftVersion: 4.10.18
+# provider:
+# region: ap-southeast-2
+# baseDomain: blueprints.rhecoeng.com
+# clusters:
+# - spoke1
+# labels:
+# - name: clusterGroup
+# value: region-one
+
+secretStore:
+ name: vault-backend
+ kind: ClusterSecretStore
diff --git a/common/ansible/ansible.cfg b/common/ansible/ansible.cfg
new file mode 100644
index 00000000..652feb98
--- /dev/null
+++ b/common/ansible/ansible.cfg
@@ -0,0 +1,6 @@
+[defaults]
+localhost_warning=False
+library=./plugins/modules:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
+roles_path=./roles:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
+module_utils=~/.ansible/plugins/module_utils:./plugins/module_utils:/usr/share/ansible/plugins/module_utils
+filter_plugins=~/.ansible/plugins/filter:./plugins/filter:/usr/share/ansible/plugins/filter
diff --git a/common/ansible/playbooks/acm/acmhub-get-ca.yaml b/common/ansible/playbooks/acm/acmhub-get-ca.yaml
new file mode 100644
index 00000000..770333ff
--- /dev/null
+++ b/common/ansible/playbooks/acm/acmhub-get-ca.yaml
@@ -0,0 +1,53 @@
+# This playbook fetches the hub cluster's CAbundle from ACM's objects
+# and puts it in a secret inside the imperative namespace
+---
+- name: ACM Get Hub CA
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ vars:
+ ns: imperative
+ tasks:
+ - name: Find hub cluster
+ kubernetes.core.k8s_info:
+ kind: Secret
+ name: hub-kubeconfig-secret
+ namespace: open-cluster-management-agent
+ register: hub_cluster
+
+ - name: Do nothing when no managed clusters are found
+ ansible.builtin.meta: end_play
+ when: hub_cluster['resources'][0]['data']['kubeconfig'] is not defined
+
+ # FIXME(bandini) The assumption here is that there is a single hub cluster for each managed cluster
+ #
+ # oc extract secret/hub-kubeconfig-secret --keys=kubeconfig --to=- -n open-cluster-management-agent
+ # apiVersion: v1
+ # clusters:
+ # - cluster:
+ # certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURNakNDQWhxZ0F3SU...
+ # server: https://api.bandini-dc.blueprints.rhecoeng.com:6443
+ # name: default-cluster
+ - name: Get hub cluster facts
+ ansible.builtin.set_fact:
+ # kubeconfig is just a b64-econded yaml
+ hub_cluster_kubeconfig: "{{ hub_cluster['resources'][0]['data']['kubeconfig'] | b64decode | from_yaml }}"
+
+ - name: Set CA fact
+ ansible.builtin.set_fact:
+ # The .get() call is needed because the key has dashes in it
+ hub_cluster_ca: "{{ hub_cluster_kubeconfig.clusters[0].cluster.get('certificate-authority-data') }}"
+
+ - name: Create secret with managed cluster's CA
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ kind: Secret
+ apiVersion: v1
+ metadata:
+ name: "hub"
+ namespace: "{{ ns }}"
+ data:
+ caBundle: "{{ hub_cluster_ca }}"
+ type: Opaque
diff --git a/common/ansible/playbooks/hello-world/hello-world.yaml b/common/ansible/playbooks/hello-world/hello-world.yaml
new file mode 100644
index 00000000..c0a992a7
--- /dev/null
+++ b/common/ansible/playbooks/hello-world/hello-world.yaml
@@ -0,0 +1,23 @@
+# This playbook is a simple hello-world playbook to show capabilities
+# It creates a config-map inside the imperative namespace containing
+# the helm variable "global.clusterDomain"
+---
+- name: Hello World Example
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ vars:
+ ns: imperative
+ tasks:
+ - name: Create secret with managed cluster's CA
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "hello-world"
+ namespace: "{{ ns }}"
+ data:
+ hello-cluster-domain: "{{ global['clusterDomain'] }}"
diff --git a/common/ansible/playbooks/iib-ci/iib-ci.yaml b/common/ansible/playbooks/iib-ci/iib-ci.yaml
new file mode 100644
index 00000000..dc6e45cb
--- /dev/null
+++ b/common/ansible/playbooks/iib-ci/iib-ci.yaml
@@ -0,0 +1,8 @@
+# This playbook invokes the iib_ci role
+---
+- name: IIB CI playbook
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ roles:
+ - iib_ci
diff --git a/common/ansible/playbooks/iib-ci/lookup.yml b/common/ansible/playbooks/iib-ci/lookup.yml
new file mode 100644
index 00000000..f39b8ea3
--- /dev/null
+++ b/common/ansible/playbooks/iib-ci/lookup.yml
@@ -0,0 +1,46 @@
+---
+- name: IIB CI playbook
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ rh_url: "https://datagrepper.engineering.redhat.com/raw?topic=/topic/VirtualTopic.eng.ci.redhat-container-image.index.built&delta=15780000&contains=%s"
+ operator: "openshift-gitops-1-gitops-operator-bundle"
+ ocp_versions: {}
+ tasks:
+ - name: Set url fact
+ ansible.builtin.set_fact:
+ url: "{{ rh_url | format(operator) }}"
+
+ - name: Fetch URI
+ ansible.builtin.uri:
+ url: "{{ url }}"
+ return_content: true
+ register: jsoncontent
+
+ - name: Setting content
+ ansible.builtin.set_fact:
+ content: "{{ jsoncontent['content'] | from_json }}"
+
+ - name: Set messages fact
+ ansible.builtin.set_fact:
+ raw_messages: "{{ content.raw_messages }}"
+
+ # The when clause is because if we already have an IIB for an ocp version we do not
+ # want to override it (combine will always override existing keys)
+ # Reason for this is that the messages are sorted last first and we only want the
+ # last entries
+ - name: Set output
+ ansible.builtin.set_fact:
+ ocp_versions: "{{ ocp_versions | combine({item['msg']['index']['ocp_version']: {'indeximage': item['msg']['index']['index_image'], 'bundleimage': item['msg']['index']['added_bundle_images'][0]}}) }}"
+ loop: "{{ raw_messages }}"
+ when: item['msg']['index']['ocp_version'] is not in ocp_versions
+ loop_control:
+ label: "{{ item['msg']['index']['ocp_version'] }}"
+
+ - name: Print OCP versions for "{{ operator }}"
+ ansible.builtin.debug:
+ msg: "{{ item.key }} -> {{ item.value }}"
+ loop: "{{ ocp_versions | dict2items }}"
+ loop_control:
+ label: "{{ item.key }}"
diff --git a/common/ansible/playbooks/k8s_secrets/k8s_secrets.yml b/common/ansible/playbooks/k8s_secrets/k8s_secrets.yml
new file mode 100644
index 00000000..989a498a
--- /dev/null
+++ b/common/ansible/playbooks/k8s_secrets/k8s_secrets.yml
@@ -0,0 +1,9 @@
+---
+- name: Secrets parsing and direct loading
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ roles:
+ - find_vp_secrets
+ - cluster_pre_check
+ - k8s_secret_utils
diff --git a/common/ansible/playbooks/process_secrets/display_secrets_info.yml b/common/ansible/playbooks/process_secrets/display_secrets_info.yml
new file mode 100644
index 00000000..4d972359
--- /dev/null
+++ b/common/ansible/playbooks/process_secrets/display_secrets_info.yml
@@ -0,0 +1,29 @@
+---
+- name: Parse and display secrets
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ secrets_backing_store: "vault"
+ tasks:
+ # Set the VALUES_SECRET environment variable to the file to parse
+ - name: Find and decrypt secrets if needed
+ ansible.builtin.include_role:
+ name: find_vp_secrets
+
+ # find_vp_secrets will return a plaintext data structure called values_secrets_data
+ # This will allow us to determine schema version and which backend to use
+ - name: Determine how to load secrets
+ ansible.builtin.set_fact:
+ secrets_yaml: '{{ values_secrets_data | from_yaml }}'
+
+ - name: Parse secrets data
+ no_log: '{{ override_no_log | default(true) }}'
+ parse_secrets_info:
+ values_secrets_plaintext: "{{ values_secrets_data }}"
+ secrets_backing_store: "{{ secrets_backing_store }}"
+ register: secrets_results
+
+ - name: Display secrets data
+ ansible.builtin.debug:
+ var: secrets_results
diff --git a/common/ansible/playbooks/process_secrets/process_secrets.yml b/common/ansible/playbooks/process_secrets/process_secrets.yml
new file mode 100644
index 00000000..ecc1b565
--- /dev/null
+++ b/common/ansible/playbooks/process_secrets/process_secrets.yml
@@ -0,0 +1,50 @@
+---
+- name: Parse and load secrets
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ secrets_role: 'vault_utils'
+ pattern_name: 'common'
+ pattern_dir: '.'
+ secrets_backing_store: 'vault'
+ tasks_from: 'push_parsed_secrets'
+ tasks:
+ - name: "Run secret-loading pre-requisites"
+ ansible.builtin.include_role:
+ name: '{{ item }}'
+ loop:
+ - cluster_pre_check
+ - find_vp_secrets
+
+ # find_vp_secrets will return a plaintext data structure called values_secrets_data
+ # This will allow us to determine schema version and which backend to use
+ - name: Determine how to load secrets
+ ansible.builtin.set_fact:
+ secrets_yaml: '{{ values_secrets_data | from_yaml }}'
+
+ - name: Parse secrets data
+ no_log: '{{ override_no_log | default(true) }}'
+ parse_secrets_info:
+ values_secrets_plaintext: "{{ values_secrets_data }}"
+ secrets_backing_store: "{{ secrets_backing_store }}"
+ register: secrets_results
+
+ # Use the k8s secrets loader when explicitly requested
+ - name: Determine role to use to load secrets
+ ansible.builtin.set_fact:
+ secrets_role: 'k8s_secret_utils'
+ tasks_from: 'inject_k8s_secrets'
+ when:
+ - secrets_backing_store == "kubernetes" or secrets_backing_store == "none"
+ - secrets_yaml['version'] | default('2.0') >= '2.0'
+
+ # secrets_role will have been changed from the default if needed
+ - name: Load secrets using designated role and tasks
+ ansible.builtin.include_role:
+ name: '{{ secrets_role }}'
+ tasks_from: '{{ tasks_from }}'
+ vars:
+ kubernetes_secret_objects: "{{ secrets_results['kubernetes_secret_objects'] }}"
+ vault_policies: "{{ secrets_results['vault_policies'] }}"
+ parsed_secrets: "{{ secrets_results['parsed_secrets'] }}"
diff --git a/common/ansible/playbooks/vault/vault.yaml b/common/ansible/playbooks/vault/vault.yaml
new file mode 100644
index 00000000..b0da9405
--- /dev/null
+++ b/common/ansible/playbooks/vault/vault.yaml
@@ -0,0 +1,9 @@
+---
+- name: Vault initialization
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ roles:
+ - find_vp_secrets
+ - cluster_pre_check
+ - vault_utils
diff --git a/common/ansible/plugins/__init__.py b/common/ansible/plugins/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/common/ansible/plugins/filter/parse_acm_secrets.py b/common/ansible/plugins/filter/parse_acm_secrets.py
new file mode 100644
index 00000000..0445d96d
--- /dev/null
+++ b/common/ansible/plugins/filter/parse_acm_secrets.py
@@ -0,0 +1,83 @@
+# This filter takes a bunch of acm secrets that represent the remote clusters
+# (Usually it is all secrets that are labeled with:
+# "apps.open-cluster-management.io/secret-type=acm-cluster")
+
+# These secrets are usually in the form of:
+# data:
+# config: ewogIC...
+# name: bWNnLW9uZQ==
+# server: aHR0cHM6Ly9hcGkubWNnLW9uZS5ibHVlcHJpbnRzLnJoZWNvZW5nLmNvbTo2NDQz
+
+# The filter parses the secret (name, server, config) and returns a dictionary of secrets in the
+# following form:
+# :
+# name:
+# cluster_fqdn:
+# server_api: https://api.:6443
+# bearerToken:
+# tlsClientConfig:
+# vault_path: "hub" when it is the ACM hub or in the other cases
+
+import json
+from base64 import b64decode
+
+
+# These are the labels of an acm secret
+# labels:
+# apps.open-cluster-management.io/cluster-name: local-cluster
+# apps.open-cluster-management.io/cluster-server: api.mcg-hub.blueprints.rhecoeng.com
+# apps.open-cluster-management.io/secret-type: acm-cluster
+def get_cluster_name(secret):
+ if "metadata" in secret and "labels" in secret["metadata"]:
+ return secret["metadata"]["labels"].get(
+ "apps.open-cluster-management.io/cluster-name", None
+ )
+ return None
+
+
+def is_cluster_a_hub(name):
+ if name == "local-cluster":
+ return True
+ return False
+
+
+def get_cluster_fqdn(secret):
+ if "metadata" in secret and "labels" in secret["metadata"]:
+ server = secret["metadata"]["labels"].get(
+ "apps.open-cluster-management.io/cluster-server", None
+ )
+ # It is rather hard to override this in an OCP deployment so we are
+ # okay in just dropping 'api.'
+ return server.removeprefix("api.")
+ return None
+
+
+def parse_acm_secrets(secrets):
+ ret = {}
+ for secret in secrets:
+ cluster = get_cluster_name(secret)
+ if cluster is None:
+ continue
+
+ ret[cluster] = {}
+ name = b64decode(secret["data"]["name"])
+ ret[cluster]["name"] = name
+ ret[cluster]["server_api"] = b64decode(secret["data"]["server"])
+ fqdn = get_cluster_fqdn(secret)
+ ret[cluster]["cluster_fqdn"] = fqdn
+ if is_cluster_a_hub(name):
+ ret[cluster]["vault_path"] = "hub"
+ else:
+ ret[cluster]["vault_path"] = fqdn
+
+ config = b64decode(secret["data"]["config"])
+ parsed_config = json.loads(config)
+ ret[cluster]["bearerToken"] = parsed_config["bearerToken"]
+ ret[cluster]["tlsClientConfig"] = parsed_config["tlsClientConfig"]
+
+ return ret
+
+
+class FilterModule:
+ def filters(self):
+ return {"parse_acm_secrets": parse_acm_secrets}
diff --git a/common/ansible/plugins/module_utils/load_secrets_common.py b/common/ansible/plugins/module_utils/load_secrets_common.py
new file mode 100644
index 00000000..b4ebc816
--- /dev/null
+++ b/common/ansible/plugins/module_utils/load_secrets_common.py
@@ -0,0 +1,124 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Module that implements some common functions
+"""
+
+import configparser
+from collections.abc import MutableMapping
+
+
+def find_dupes(array):
+ """
+ Returns duplicate items in a list
+
+ Parameters:
+ l(list): Array to check for duplicate entries
+
+ Returns:
+ dupes(list): Array containing all the duplicates and [] is there are none
+ """
+ seen = set()
+ dupes = []
+ for x in array:
+ if x in seen:
+ dupes.append(x)
+ else:
+ seen.add(x)
+ return dupes
+
+
+def get_version(syaml):
+ """
+ Return the version: of the parsed yaml object. If it does not exist
+ return 1.0
+
+ Returns:
+ ret(str): The version value in of the top-level 'version:' key
+ """
+ return str(syaml.get("version", "1.0"))
+
+
+def flatten(dictionary, parent_key=False, separator="."):
+ """
+ Turn a nested dictionary into a flattened dictionary and also
+ drop any key that has 'None' as their value
+
+ Parameters:
+ dictionary(dict): The dictionary to flatten
+
+ parent_key(str): The string to prepend to dictionary's keys
+
+ separator(str): The string used to separate flattened keys
+
+ Returns:
+
+ dictionary: A flattened dictionary where the keys represent the
+ path to reach the leaves
+ """
+
+ items = []
+ for key, value in dictionary.items():
+ new_key = str(parent_key) + separator + key if parent_key else key
+ if isinstance(value, MutableMapping):
+ items.extend(flatten(value, new_key, separator).items())
+ elif isinstance(value, list):
+ for k, v in enumerate(value):
+ items.extend(flatten({str(k): v}, new_key).items())
+ else:
+ if value is not None:
+ items.append((new_key, value))
+ return dict(items)
+
+
+def get_ini_value(inifile, inisection, inikey):
+ """
+ Return a value from an ini-file or 'None' if it does not exist
+
+ Parameters:
+ inifile(str): The path to the ini-file
+
+ inisection(str): The section in the ini-file to look for the key
+
+ inikey(str): The key to look up inside the ini-file's section
+
+ Returns:
+
+ obj: The value of the key or None if it does not exist
+ """
+ config = configparser.ConfigParser()
+ config.read(inifile)
+ return config.get(inisection, inikey, fallback=None)
+
+
+def stringify_dict(input_dict):
+ """
+ Return a dict whose keys and values are all co-erced to strings, for creating labels and annotations in the
+ python Kubernetes module
+
+ Parameters:
+ input_dict(dict): A dictionary of keys and values
+
+ Returns:
+
+ obj: The same dict in the same order but with the keys coerced to str
+ """
+ output_dict = {}
+
+ for key, value in input_dict.items():
+ output_dict[str(key)] = str(value)
+
+ return output_dict
diff --git a/common/ansible/plugins/module_utils/load_secrets_v1.py b/common/ansible/plugins/module_utils/load_secrets_v1.py
new file mode 100644
index 00000000..6478ac26
--- /dev/null
+++ b/common/ansible/plugins/module_utils/load_secrets_v1.py
@@ -0,0 +1,267 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Module that implements V1 of the values-secret.yaml spec
+"""
+
+import base64
+import os
+import time
+
+import yaml
+from ansible.module_utils.load_secrets_common import flatten, get_version
+
+
+class LoadSecretsV1:
+ def __init__(
+ self,
+ module,
+ syaml,
+ basepath,
+ namespace,
+ pod,
+ values_secret_template,
+ check_missing_secrets,
+ ):
+ self.module = module
+ self.basepath = basepath
+ self.namespace = namespace
+ self.pod = pod
+ self.values_secret_template = values_secret_template
+ self.check_missing_secrets = check_missing_secrets
+ self.syaml = syaml
+
+ def _run_command(self, command, attempts=1, sleep=3):
+ """
+ Runs a command on the host ansible is running on. A failing command
+ will raise an exception in this function directly (due to check=True)
+
+ Parameters:
+ command(str): The command to be run.
+ attempts(int): Number of times to retry in case of Error (defaults to 1)
+ sleep(int): Number of seconds to wait in between retry attempts (defaults to 3s)
+
+ Returns:
+ ret(subprocess.CompletedProcess): The return value from run()
+ """
+ for attempt in range(attempts):
+ ret = self.module.run_command(
+ command,
+ check_rc=True,
+ use_unsafe_shell=True,
+ environ_update=os.environ.copy(),
+ )
+ if ret[0] == 0:
+ return ret
+ if attempt >= attempts - 1:
+ return ret
+ time.sleep(sleep)
+
+ def sanitize_values(self):
+ """
+ Sanitizes the secrets YAML object. If a specific secret key has
+ s3.accessKey and s3.secretKey but not s3Secret, the latter will be
+ generated as the base64 encoding of both s3.accessKey and s3.secretKey.
+
+ secrets:
+ test:
+ s3.accessKey: "1234"
+ s3.secretKey: "4321"
+
+ will push three secrets at 'secret/hub/test':
+
+ s3.accessKey: 1234
+ s3.secretKey: 4321
+ s3Secret: czMuYWNjZXNzS2V5OiAxMjM0CnMzLnNlY3JldEtleTogNDMyMQ==
+
+ Parameters:
+
+ Returns:
+ Nothing: Updates self.syaml(obj)
+ """
+ v = get_version(self.syaml)
+ if v != "1.0":
+ self.module.fail_json(f"Version is not 1.0: {v}")
+
+ if not ("secrets" in self.syaml or "files" in self.syaml):
+ self.module.fail_json(
+ f"Values secrets file does not contain 'secrets' or"
+ f"'files' keys: {self.syaml}"
+ )
+
+ if self.check_missing_secrets and self.values_secret_template == "":
+ self.module.fail_json(
+ "No values_secret_template defined and check_missing_secrets set to True"
+ )
+ # If the user specified check_for_missing_secrets then we read values_secret_template
+ # and check if there are any missing secrets. Makes sense only for v1.0
+ if self.check_missing_secrets:
+ self.check_for_missing_secrets()
+
+ secrets = self.syaml.get("secrets", {})
+ # We need to explicitely check for None because the file might contain the
+ # top-level 'secrets:' or 'files:' key but have nothing else under it which will
+ # return None and not {}
+ if secrets is None:
+ secrets = {}
+ files = self.syaml.get("files", {})
+ if files is None:
+ files = {}
+ if len(secrets) == 0 and len(files) == 0:
+ self.module.fail_json(
+ "Neither 'secrets' nor 'files have any secrets to be parsed"
+ )
+
+ if isinstance(secrets, list) or isinstance(files, list):
+ self.module.fail_json("Neither 'secrets' nor 'files can be lists")
+
+ for secret in secrets:
+ if not isinstance(secrets[secret], dict):
+ self.module.fail_json(
+ "Each key under 'secrets' needs to point to "
+ "a dictionary of key value pairs"
+ )
+
+ for file in files:
+ path = files[file]
+ if not os.path.isfile(os.path.expanduser(path)):
+ self.module.fail_json(f"File {path} does not exist")
+
+ # If s3Secret key does not exist but s3.accessKey and s3.secretKey do exist
+ # generate s3Secret so a user does not need to do it manually which tends to be error-prone
+ for secret in secrets:
+ tmp = secrets[secret]
+ if (
+ "s3.accessKey" in tmp
+ and "s3.secretKey" in tmp
+ and "s3Secret" not in tmp
+ ):
+ s3secret = (
+ f"s3.accessKey: {tmp['s3.accessKey']}\n"
+ f"s3.secretKey: {tmp['s3.secretKey']}"
+ )
+ s3secretb64 = base64.b64encode(s3secret.encode())
+ secrets[secret]["s3Secret"] = s3secretb64.decode("utf-8")
+
+ def get_secrets_vault_paths(self, keyname):
+ """
+ Walks a secrets yaml object to look for all top-level keys that start with
+ 'keyname' and returns a list of tuples [(keyname1, path1), (keyname2, path2)...]
+ where the path is the relative vault path
+ For example, given a yaml with the following:
+ secrets:
+ foo: bar
+ secrets.region1:
+ foo: baz
+ secrets.region2:
+ foo: barbaz
+
+ a call with keyname set to 'secrets' will return the following:
+ [('secrets', 'hub'), ('secrets', 'region1'), ('secrets', 'region2')]
+
+ Parameters:
+ keyname(str): The keytypes to look for either usually 'secrets' or 'files'
+
+ Returns:
+ keys_paths(list): List of tuples containing (keyname, relative-vault-path)
+ """
+ all_keys = self.syaml.keys()
+ keys_paths = []
+ for key in all_keys:
+ # We skip any key that does not start with 'secrets' or 'files'
+ # (We should probably bail out in the presence of unexpected top-level keys)
+ if not key.startswith(keyname):
+ continue
+
+ # If there is no '.' after secrets or files, assume the secrets need to
+ # go to the hub vault path
+ if key == keyname:
+ keys_paths.append((key, "hub"))
+ continue
+
+ # We are in the presence of either 'secrets.region-one' or 'files.cluster1' top-level keys
+ tmp = key.split(".", 1)
+ if len(tmp) != 2:
+ self.module.fail_json(
+ f"values-secrets.yaml key is non-conformant: {key}"
+ )
+
+ keys_paths.append((key, tmp[1]))
+
+ return keys_paths
+
+ # NOTE(bandini): we shell out to oc exec it because of
+ # https://github.com/ansible-collections/kubernetes.core/issues/506 and
+ # https://github.com/kubernetes/kubernetes/issues/89899. Until those are solved
+ # it makes little sense to invoke the APIs via the python wrappers
+ def inject_secrets(self):
+ """
+ Walks a secrets yaml object and injects all the secrets into the vault via 'oc exec' calls
+
+ Parameters:
+
+ Returns:
+ counter(int): The number of secrets injected
+ """
+ counter = 0
+ for i in self.get_secrets_vault_paths("secrets"):
+ path = f"{self.basepath}/{i[1]}"
+ for secret in self.syaml[i[0]] or []:
+ properties = ""
+ for key, value in self.syaml[i[0]][secret].items():
+ properties += f"{key}='{value}' "
+ properties = properties.rstrip()
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"\"vault kv put '{path}/{secret}' {properties}\""
+ )
+ self._run_command(cmd, attempts=3)
+ counter += 1
+
+ for i in self.get_secrets_vault_paths("files"):
+ path = f"{self.basepath}/{i[1]}"
+ for filekey in self.syaml[i[0]] or []:
+ file = os.path.expanduser(self.syaml[i[0]][filekey])
+ cmd = (
+ f"cat '{file}' | oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"'cat - > /tmp/vcontent'; "
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c 'base64 --wrap=0 /tmp/vcontent | "
+ f"vault kv put {path}/{filekey} b64content=- content=@/tmp/vcontent; "
+ f"rm /tmp/vcontent'"
+ )
+ self._run_command(cmd, attempts=3)
+ counter += 1
+ return counter
+
+ def check_for_missing_secrets(self):
+ with open(self.values_secret_template, "r", encoding="utf-8") as file:
+ template_yaml = yaml.safe_load(file.read())
+ if template_yaml is None:
+ self.module.fail_json(f"Template {self.values_secret_template} is empty")
+
+ syaml_flat = flatten(self.syaml)
+ template_flat = flatten(template_yaml)
+
+ syaml_keys = set(syaml_flat.keys())
+ template_keys = set(template_flat.keys())
+
+ if template_keys <= syaml_keys:
+ return
+
+ diff = template_keys - syaml_keys
+ self.module.fail_json(
+ f"Values secret yaml is missing needed secrets from the templates: {diff}"
+ )
diff --git a/common/ansible/plugins/module_utils/load_secrets_v2.py b/common/ansible/plugins/module_utils/load_secrets_v2.py
new file mode 100644
index 00000000..05a5917e
--- /dev/null
+++ b/common/ansible/plugins/module_utils/load_secrets_v2.py
@@ -0,0 +1,456 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Module that implements V2 of the values-secret.yaml spec
+"""
+
+import base64
+import getpass
+import os
+import time
+
+from ansible.module_utils.load_secrets_common import (
+ find_dupes,
+ get_ini_value,
+ get_version,
+)
+
+default_vp_vault_policies = {
+ "validatedPatternDefaultPolicy": (
+ "length=20\n"
+ 'rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\n'
+ 'rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\n'
+ 'rule "charset" { charset = "0123456789" min-chars = 1 }\n'
+ 'rule "charset" { charset = "!@#%^&*" min-chars = 1 }\n'
+ )
+}
+
+
+class LoadSecretsV2:
+ def __init__(self, module, syaml, namespace, pod):
+ self.module = module
+ self.namespace = namespace
+ self.pod = pod
+ self.syaml = syaml
+
+ def _run_command(self, command, attempts=1, sleep=3, checkrc=True):
+ """
+ Runs a command on the host ansible is running on. A failing command
+ will raise an exception in this function directly (due to check=True)
+
+ Parameters:
+ command(str): The command to be run.
+ attempts(int): Number of times to retry in case of Error (defaults to 1)
+ sleep(int): Number of seconds to wait in between retry attempts (defaults to 3s)
+
+ Returns:
+ ret(subprocess.CompletedProcess): The return value from run()
+ """
+ for attempt in range(attempts):
+ ret = self.module.run_command(
+ command,
+ check_rc=checkrc,
+ use_unsafe_shell=True,
+ environ_update=os.environ.copy(),
+ )
+ if ret[0] == 0:
+ return ret
+ if attempt >= attempts - 1:
+ return ret
+ time.sleep(sleep)
+
+ def _get_backingstore(self):
+ """
+ Return the backingStore: of the parsed yaml object. If it does not exist
+ return 'vault'
+
+ Returns:
+ ret(str): The value of the top-level 'backingStore:' key
+ """
+ return str(self.syaml.get("backingStore", "vault"))
+
+ def _get_vault_policies(self, enable_default_vp_policies=True):
+ # We start off with the hard-coded default VP policy and add the user-defined ones
+ if enable_default_vp_policies:
+ policies = default_vp_vault_policies.copy()
+ else:
+ policies = {}
+ policies.update(self.syaml.get("vaultPolicies", {}))
+ return policies
+
+ def _get_secrets(self):
+ return self.syaml.get("secrets", {})
+
+ def _get_field_on_missing_value(self, f):
+ # By default if 'onMissingValue' is missing we assume we need to
+ # error out whenever the value is missing
+ return f.get("onMissingValue", "error")
+
+ def _get_field_value(self, f):
+ return f.get("value", None)
+
+ def _get_field_path(self, f):
+ return f.get("path", None)
+
+ def _get_field_ini_file(self, f):
+ return f.get("ini_file", None)
+
+ def _get_field_kind(self, f):
+ # value: null will be interpreted with None, so let's just
+ # check for the existence of the field, as we use 'value: null' to say
+ # "we want a value/secret and not a file path"
+ found = []
+ for i in ["value", "path", "ini_file"]:
+ if i in f:
+ found.append(i)
+
+ if len(found) > 1: # you can only have one of value, path and ini_file
+ self.module.fail_json(f"Both '{found[0]}' and '{found[1]}' cannot be used")
+
+ if len(found) == 0:
+ return ""
+ return found[0]
+
+ def _get_field_prompt(self, f):
+ return f.get("prompt", None)
+
+ def _get_field_base64(self, f):
+ return bool(f.get("base64", False))
+
+ def _get_field_override(self, f):
+ return bool(f.get("override", False))
+
+ # This function could use some rewriting and it should call a specific validation function
+ # for each type (value, path, ini_file)
+ def _validate_field(self, f):
+ # These fields are mandatory
+ try:
+ _ = f["name"]
+ except KeyError:
+ return (False, f"Field {f} is missing name")
+
+ on_missing_value = self._get_field_on_missing_value(f)
+ if on_missing_value not in ["error", "generate", "prompt"]:
+ return (False, f"onMissingValue: {on_missing_value} is invalid")
+
+ value = self._get_field_value(f)
+ path = self._get_field_path(f)
+ ini_file = self._get_field_ini_file(f)
+ kind = self._get_field_kind(f)
+ if kind == "ini_file":
+ # if we are using ini_file then at least ini_key needs to be defined
+ # ini_section defaults to 'default' when omitted
+ ini_key = f.get("ini_key", None)
+ if ini_key is None:
+ return (
+ False,
+ "ini_file requires at least ini_key to be defined",
+ )
+
+ # Test if base64 is a correct boolean (defaults to False)
+ _ = self._get_field_base64(f)
+ _ = self._get_field_override(f)
+
+ vault_policy = f.get("vaultPolicy", None)
+ if vault_policy is not None and vault_policy not in self._get_vault_policies():
+ return (
+ False,
+ f"Secret has vaultPolicy set to {vault_policy} but no such policy exists",
+ )
+
+ if on_missing_value in ["error"]:
+ if (
+ (value is None or len(value) < 1)
+ and (path is None or len(path) < 1)
+ and (ini_file is None or len(ini_file) < 1)
+ ):
+ return (
+ False,
+ "Secret has onMissingValue set to 'error' and has neither value nor path nor ini_file set",
+ )
+ if path is not None and not os.path.isfile(os.path.expanduser(path)):
+ return (False, f"Field has non-existing path: {path}")
+
+ if ini_file is not None and not os.path.isfile(
+ os.path.expanduser(ini_file)
+ ):
+ return (False, f"Field has non-existing ini_file: {ini_file}")
+
+ if "override" in f:
+ return (
+ False,
+ "'override' attribute requires 'onMissingValue' to be set to 'generate'",
+ )
+
+ if on_missing_value in ["generate"]:
+ if value is not None:
+ return (
+ False,
+ "Secret has onMissingValue set to 'generate' but has a value set",
+ )
+ if path is not None:
+ return (
+ False,
+ "Secret has onMissingValue set to 'generate' but has a path set",
+ )
+ if vault_policy is None:
+ return (
+ False,
+ "Secret has no vaultPolicy but onMissingValue is set to 'generate'",
+ )
+
+ if on_missing_value in ["prompt"]:
+ # When we prompt, the user needs to set one of the following:
+ # - value: null # prompt for a secret without a default value
+ # - value: 123 # prompt for a secret but use a default value
+ # - path: null # prompt for a file path without a default value
+ # - path: /tmp/ca.crt # prompt for a file path with a default value
+ if "value" not in f and "path" not in f:
+ return (
+ False,
+ "Secret has onMissingValue set to 'prompt' but has no value nor path fields",
+ )
+
+ if "override" in f:
+ return (
+ False,
+ "'override' attribute requires 'onMissingValue' to be set to 'generate'",
+ )
+
+ return (True, "")
+
+ def _validate_secrets(self):
+ secrets = self._get_secrets()
+ if len(secrets) == 0:
+ self.module.fail_json("No secrets found")
+
+ names = []
+ for s in secrets:
+ # These fields are mandatory
+ for i in ["name"]:
+ try:
+ _ = s[i]
+ except KeyError:
+ return (False, f"Secret {s['name']} is missing {i}")
+ names.append(s["name"])
+
+ vault_prefixes = s.get("vaultPrefixes", ["hub"])
+ # This checks for the case when vaultPrefixes: is specified but empty
+ if vault_prefixes is None or len(vault_prefixes) == 0:
+ return (False, f"Secret {s['name']} has empty vaultPrefixes")
+
+ fields = s.get("fields", [])
+ if len(fields) == 0:
+ return (False, f"Secret {s['name']} does not have any fields")
+
+ field_names = []
+ for i in fields:
+ (ret, msg) = self._validate_field(i)
+ if not ret:
+ return (False, msg)
+ field_names.append(i["name"])
+ field_dupes = find_dupes(field_names)
+ if len(field_dupes) > 0:
+ return (False, f"You cannot have duplicate field names: {field_dupes}")
+
+ dupes = find_dupes(names)
+ if len(dupes) > 0:
+ return (False, f"You cannot have duplicate secret names: {dupes}")
+ return (True, "")
+
+ def inject_vault_policies(self):
+ for name, policy in self._get_vault_policies().items():
+ cmd = (
+ f"echo '{policy}' | oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"'cat - > /tmp/{name}.hcl';"
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c 'vault write sys/policies/password/{name} "
+ f" policy=@/tmp/{name}.hcl'"
+ )
+ self._run_command(cmd, attempts=3)
+
+ def sanitize_values(self):
+ """
+ Sanitizes the secrets YAML object version 2.0
+
+ Parameters:
+
+ Returns:
+ Nothing: Updates self.syaml(obj) if needed
+ """
+ v = get_version(self.syaml)
+ if v != "2.0":
+ self.module.fail_json(f"Version is not 2.0: {v}")
+
+ backing_store = self._get_backingstore()
+ if backing_store != "vault": # we currently only support vault
+ self.module.fail_json(
+ f"Currently only the 'vault' backingStore is supported: {backing_store}"
+ )
+
+ (ret, msg) = self._validate_secrets()
+ if not ret:
+ self.module.fail_json(msg)
+
+ def _get_secret_value(self, name, field):
+ on_missing_value = self._get_field_on_missing_value(field)
+ # We cannot use match + case as RHEL8 has python 3.9 (it needs 3.10)
+ # We checked for errors in _validate_secrets() already
+ if on_missing_value == "error":
+ return field.get("value")
+ elif on_missing_value == "prompt":
+ prompt = self._get_field_prompt(field)
+ if prompt is None:
+ prompt = f"Type secret for {name}/{field['name']}: "
+ value = self._get_field_value(field)
+ if value is not None:
+ prompt += f" [{value}]"
+ prompt += ": "
+ return getpass.getpass(prompt)
+ return None
+
+ def _get_file_path(self, name, field):
+ on_missing_value = self._get_field_on_missing_value(field)
+ if on_missing_value == "error":
+ return os.path.expanduser(field.get("path"))
+ elif on_missing_value == "prompt":
+ prompt = self._get_field_prompt(field)
+ path = self._get_field_path(field)
+ if path is None:
+ path = ""
+
+ if prompt is None:
+ text = f"Type path for file {name}/{field['name']} [{path}]: "
+ else:
+ text = f"{prompt} [{path}]: "
+
+ newpath = getpass.getpass(text)
+ if newpath == "": # Set the default if no string was entered
+ newpath = path
+
+ if os.path.isfile(os.path.expanduser(newpath)):
+ return newpath
+ self.module.fail_json(f"File {newpath} not found, exiting")
+
+ self.module.fail_json("File with wrong onMissingValue")
+
+ def _vault_secret_attr_exists(self, mount, prefix, secret_name, attribute):
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f'"vault kv get -mount={mount} -field={attribute} {prefix}/{secret_name}"'
+ )
+ # we ignore stdout and stderr
+ (ret, _, _) = self._run_command(cmd, attempts=1, checkrc=False)
+ if ret == 0:
+ return True
+
+ return False
+
+ def _inject_field(self, secret_name, f, mount, prefixes, first=False):
+ on_missing_value = self._get_field_on_missing_value(f)
+ override = self._get_field_override(f)
+ kind = self._get_field_kind(f)
+ # If we're generating the password then we just push the secret in the vault directly
+ verb = "put" if first else "patch"
+ b64 = self._get_field_base64(f)
+ if kind in ["value", ""]:
+ if on_missing_value == "generate":
+ if kind == "path":
+ self.module.fail_json(
+ "You cannot have onMissingValue set to 'generate' with a path"
+ )
+ vault_policy = f.get("vaultPolicy")
+ gen_cmd = f"vault read -field=password sys/policies/password/{vault_policy}/generate"
+ if b64:
+ gen_cmd += " | base64 --wrap=0"
+ for prefix in prefixes:
+ # if the override field is False and the secret attribute exists at the prefix then we just
+ # skip, as we do not want to overwrite the existing secret
+ if not override and self._vault_secret_attr_exists(
+ mount, prefix, secret_name, f["name"]
+ ):
+ continue
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"\"{gen_cmd} | vault kv {verb} -mount={mount} {prefix}/{secret_name} {f['name']}=-\""
+ )
+ self._run_command(cmd, attempts=3)
+ return
+
+ # If we're not generating the secret inside the vault directly we either read it from the file ("error")
+ # or we are prompting the user for it
+ secret = self._get_secret_value(secret_name, f)
+ if b64:
+ secret = base64.b64encode(secret.encode()).decode("utf-8")
+ for prefix in prefixes:
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"\"vault kv {verb} -mount={mount} {prefix}/{secret_name} {f['name']}='{secret}'\""
+ )
+ self._run_command(cmd, attempts=3)
+
+ elif kind == "path": # path. we upload files
+ # If we're generating the password then we just push the secret in the vault directly
+ verb = "put" if first else "patch"
+ path = self._get_file_path(secret_name, f)
+ for prefix in prefixes:
+ if b64:
+ b64_cmd = "| base64 --wrap=0 "
+ else:
+ b64_cmd = ""
+ cmd = (
+ f"cat '{path}' | oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"'cat - {b64_cmd}> /tmp/vcontent'; "
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c '"
+ f"vault kv {verb} -mount={mount} {prefix}/{secret_name} {f['name']}=@/tmp/vcontent; "
+ f"rm /tmp/vcontent'"
+ )
+ self._run_command(cmd, attempts=3)
+ elif kind == "ini_file": # ini_file. we parse an ini_file
+ verb = "put" if first else "patch"
+ ini_file = os.path.expanduser(f.get("ini_file"))
+ ini_section = f.get("ini_section", "default")
+ ini_key = f.get("ini_key")
+ secret = get_ini_value(ini_file, ini_section, ini_key)
+ if b64:
+ secret = base64.b64encode(secret.encode()).decode("utf-8")
+ for prefix in prefixes:
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"\"vault kv {verb} -mount={mount} {prefix}/{secret_name} {f['name']}='{secret}'\""
+ )
+ self._run_command(cmd, attempts=3)
+
+ # This assumes that self.sanitize_values() has already been called
+ # so we do a lot less validation as it has already happened
+ def inject_secrets(self):
+ # This must come first as some passwords might depend on vault policies to exist.
+ # It is a noop when no policies are defined
+ self.inject_vault_policies()
+ secrets = self._get_secrets()
+
+ total_secrets = 0 # Counter for all the secrets uploaded
+ for s in secrets:
+ counter = 0 # This counter is to use kv put on first secret and kv patch on latter
+ sname = s.get("name")
+ fields = s.get("fields", [])
+ mount = s.get("vaultMount", "secret")
+ vault_prefixes = s.get("vaultPrefixes", ["hub"])
+ for i in fields:
+ self._inject_field(sname, i, mount, vault_prefixes, counter == 0)
+ counter += 1
+ total_secrets += 1
+
+ return total_secrets
diff --git a/common/ansible/plugins/module_utils/parse_secrets_v2.py b/common/ansible/plugins/module_utils/parse_secrets_v2.py
new file mode 100644
index 00000000..512f75ef
--- /dev/null
+++ b/common/ansible/plugins/module_utils/parse_secrets_v2.py
@@ -0,0 +1,527 @@
+# Copyright 2022, 2023 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Module that implements V2 of the values-secret.yaml spec
+"""
+
+import base64
+import getpass
+import os
+
+from ansible.module_utils.load_secrets_common import (
+ find_dupes,
+ get_ini_value,
+ get_version,
+ stringify_dict,
+)
+
+default_vp_vault_policies = {
+ "validatedPatternDefaultPolicy": (
+ "length=20\n"
+ 'rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\n'
+ 'rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\n'
+ 'rule "charset" { charset = "0123456789" min-chars = 1 }\n'
+ 'rule "charset" { charset = "!@#%^&*" min-chars = 1 }\n'
+ )
+}
+
+secret_store_namespace = "validated-patterns-secrets"
+
+
+class ParseSecretsV2:
+ def __init__(self, module, syaml, secrets_backing_store):
+ self.module = module
+ self.syaml = syaml
+ self.secrets_backing_store = str(secrets_backing_store)
+ self.secret_store_namespace = None
+ self.parsed_secrets = {}
+ self.kubernetes_secret_objects = []
+ self.vault_policies = {}
+
+ def _get_backingstore(self):
+ """
+ Backing store is now influenced by the caller more than the file. Setting
+ Return the backingStore: of the parsed yaml object. In most cases the file
+ key was not set anyway - since vault was the only supported option. Since
+ we are introducing new options now, this method of defining behavior is
+ deprecated, but if the file key is included it must match the option defined
+ by values-global in the pattern, or there is an error. The default remains
+ 'vault' if the key is unspecified.
+
+ Returns:
+ ret(str): The value of the top-level 'backingStore:' key
+ """
+ file_backing_store = str(self.syaml.get("backingStore", "unset"))
+
+ if file_backing_store == "unset":
+ pass
+ else:
+ if file_backing_store != self.secrets_backing_store:
+ self.module.fail_json(
+ f"Secrets file specifies '{file_backing_store}' backend but pattern config "
+ f"specifies '{self.secrets_backing_store}'."
+ )
+
+ return self.secrets_backing_store
+
+ def _get_vault_policies(self, enable_default_vp_policies=True):
+ # We start off with the hard-coded default VP policy and add the user-defined ones
+ if enable_default_vp_policies:
+ policies = default_vp_vault_policies.copy()
+ else:
+ policies = {}
+
+ # This is useful for embedded newlines, which occur with YAML
+ # flow-type scalars (|, |- for example)
+ for name, policy in self.syaml.get("vaultPolicies", {}).items():
+ policies[name] = self._sanitize_yaml_value(policy)
+
+ return policies
+
+ def _get_secrets(self):
+ return self.syaml.get("secrets", {})
+
+ def _get_field_on_missing_value(self, f):
+ # By default if 'onMissingValue' is missing we assume we need to
+ # error out whenever the value is missing
+ return f.get("onMissingValue", "error")
+
+ def _get_field_value(self, f):
+ return f.get("value", None)
+
+ def _get_field_path(self, f):
+ return f.get("path", None)
+
+ def _get_field_ini_file(self, f):
+ return f.get("ini_file", None)
+
+ def _get_field_annotations(self, f):
+ return f.get("annotations", {})
+
+ def _get_field_labels(self, f):
+ return f.get("labels", {})
+
+ def _get_field_kind(self, f):
+ # value: null will be interpreted with None, so let's just
+ # check for the existence of the field, as we use 'value: null' to say
+ # "we want a value/secret and not a file path"
+ found = []
+ for i in ["value", "path", "ini_file"]:
+ if i in f:
+ found.append(i)
+
+ if len(found) > 1: # you can only have one of value, path and ini_file
+ self.module.fail_json(
+ f"Both '{found[0]}' and '{found[1]}' cannot be used "
+ f"in field {f['name']}"
+ )
+
+ if len(found) == 0:
+ return ""
+ return found[0]
+
+ def _get_field_prompt(self, f):
+ return f.get("prompt", None)
+
+ def _get_field_base64(self, f):
+ return bool(f.get("base64", False))
+
+ def _get_field_override(self, f):
+ return bool(f.get("override", False))
+
+ def _get_secret_store_namespace(self):
+ return str(self.syaml.get("secretStoreNamespace", secret_store_namespace))
+
+ def _get_vault_prefixes(self, s):
+ return list(s.get("vaultPrefixes", ["hub"]))
+
+ def _get_default_labels(self):
+ return self.syaml.get("defaultLabels", {})
+
+ def _get_default_annotations(self):
+ return self.syaml.get("defaultAnnotations", {})
+
+ def _append_kubernetes_secret(self, secret_obj):
+ self.kubernetes_secret_objects.append(secret_obj)
+
+ def _sanitize_yaml_value(self, value):
+ # This is useful for embedded newlines, which occur with YAML
+ # flow-type scalars (|, |- for example)
+ if value is not None:
+ sanitized_value = bytes(value, "utf-8").decode("unicode_escape")
+ else:
+ sanitized_value = None
+
+ return sanitized_value
+
+ def _create_k8s_secret(self, sname, secret_type, namespace, labels, annotations):
+ return {
+ "type": secret_type,
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": sname,
+ "namespace": namespace,
+ "annotations": annotations,
+ "labels": labels,
+ },
+ "stringData": {},
+ }
+
+ # This does what inject_secrets used to (mostly)
+ def parse(self):
+ self.sanitize_values()
+ self.vault_policies = self._get_vault_policies()
+ self.secret_store_namespace = self._get_secret_store_namespace()
+ backing_store = self._get_backingstore()
+ secrets = self._get_secrets()
+
+ total_secrets = 0 # Counter for all the secrets uploaded
+ for s in secrets:
+ total_secrets += 1
+ counter = 0 # This counter is to use kv put on first secret and kv patch on latter
+ sname = s.get("name")
+ fields = s.get("fields", [])
+ vault_prefixes = self._get_vault_prefixes(s)
+ secret_type = s.get("type", "Opaque")
+ vault_mount = s.get("vaultMount", "secret")
+ target_namespaces = s.get("targetNamespaces", [])
+ labels = stringify_dict(s.get("labels", self._get_default_labels()))
+ annotations = stringify_dict(
+ s.get("annotations", self._get_default_annotations())
+ )
+
+ self.parsed_secrets[sname] = {
+ "name": sname,
+ "fields": {},
+ "vault_mount": vault_mount,
+ "vault_policies": {},
+ "vault_prefixes": vault_prefixes,
+ "override": [],
+ "generate": [],
+ "paths": {},
+ "base64": [],
+ "ini_file": {},
+ "type": secret_type,
+ "target_namespaces": target_namespaces,
+ "labels": labels,
+ "annotations": annotations,
+ }
+
+ for i in fields:
+ self._inject_field(sname, i)
+ counter += 1
+
+ if backing_store == "kubernetes":
+ k8s_namespaces = [self._get_secret_store_namespace()]
+ else:
+ k8s_namespaces = target_namespaces
+
+ for tns in k8s_namespaces:
+ k8s_secret = self._create_k8s_secret(
+ sname, secret_type, tns, labels, annotations
+ )
+ k8s_secret["stringData"] = self.parsed_secrets[sname]["fields"]
+ self.kubernetes_secret_objects.append(k8s_secret)
+
+ return total_secrets
+
+ # This function could use some rewriting and it should call a specific validation function
+ # for each type (value, path, ini_file)
+ def _validate_field(self, f):
+ # These fields are mandatory
+ try:
+ _ = f["name"]
+ except KeyError:
+ return (False, f"Field {f} is missing name")
+
+ on_missing_value = self._get_field_on_missing_value(f)
+ if on_missing_value not in ["error", "generate", "prompt"]:
+ return (False, f"onMissingValue: {on_missing_value} is invalid")
+
+ value = self._get_field_value(f)
+ path = self._get_field_path(f)
+ ini_file = self._get_field_ini_file(f)
+ kind = self._get_field_kind(f)
+ if kind == "ini_file":
+ # if we are using ini_file then at least ini_key needs to be defined
+ # ini_section defaults to 'default' when omitted
+ ini_key = f.get("ini_key", None)
+ if ini_key is None:
+ return (
+ False,
+ "ini_file requires at least ini_key to be defined",
+ )
+
+ # Test if base64 is a correct boolean (defaults to False)
+ _ = self._get_field_base64(f)
+ _ = self._get_field_override(f)
+
+ vault_policy = f.get("vaultPolicy", None)
+ if vault_policy is not None and vault_policy not in self._get_vault_policies():
+ return (
+ False,
+ f"Secret has vaultPolicy set to {vault_policy} but no such policy exists",
+ )
+
+ if on_missing_value in ["error"]:
+ if (
+ (value is None or len(value) < 1)
+ and (path is None or len(path) < 1)
+ and (ini_file is None or len(ini_file) < 1)
+ ):
+ return (
+ False,
+ "Secret has onMissingValue set to 'error' and has neither value nor path nor ini_file set",
+ )
+ if path is not None and not os.path.isfile(os.path.expanduser(path)):
+ return (False, f"Field has non-existing path: {path}")
+
+ if ini_file is not None and not os.path.isfile(
+ os.path.expanduser(ini_file)
+ ):
+ return (False, f"Field has non-existing ini_file: {ini_file}")
+
+ if on_missing_value in ["prompt"]:
+ # When we prompt, the user needs to set one of the following:
+ # - value: null # prompt for a secret without a default value
+ # - value: 123 # prompt for a secret but use a default value
+ # - path: null # prompt for a file path without a default value
+ # - path: /tmp/ca.crt # prompt for a file path with a default value
+ if "value" not in f and "path" not in f:
+ return (
+ False,
+ "Secret has onMissingValue set to 'prompt' but has no value nor path fields",
+ )
+
+ if "override" in f:
+ return (
+ False,
+ "'override' attribute requires 'onMissingValue' to be set to 'generate'",
+ )
+
+ return (True, "")
+
+ def _validate_secrets(self):
+ backing_store = self._get_backingstore()
+ secrets = self._get_secrets()
+ if len(secrets) == 0:
+ self.module.fail_json("No secrets found")
+
+ names = []
+ for s in secrets:
+ # These fields are mandatory
+ for i in ["name"]:
+ try:
+ _ = s[i]
+ except KeyError:
+ return (False, f"Secret {s['name']} is missing {i}")
+ names.append(s["name"])
+
+ vault_prefixes = s.get("vaultPrefixes", ["hub"])
+ # This checks for the case when vaultPrefixes: is specified but empty
+ if vault_prefixes is None or len(vault_prefixes) == 0:
+ return (False, f"Secret {s['name']} has empty vaultPrefixes")
+
+ namespaces = s.get("targetNamespaces", [])
+ if not isinstance(namespaces, list):
+ return (False, f"Secret {s['name']} targetNamespaces must be a list")
+
+ if backing_store == "none" and namespaces == []:
+ return (
+ False,
+ f"Secret {s['name']} targetNamespaces cannot be empty for secrets backend {backing_store}",
+ ) # noqa: E501
+
+ labels = s.get("labels", {})
+ if not isinstance(labels, dict):
+ return (False, f"Secret {s['name']} labels must be a dictionary")
+
+ annotations = s.get("annotations", {})
+ if not isinstance(annotations, dict):
+ return (False, f"Secret {s['name']} annotations must be a dictionary")
+
+ fields = s.get("fields", [])
+ if len(fields) == 0:
+ return (False, f"Secret {s['name']} does not have any fields")
+
+ field_names = []
+ for i in fields:
+ (ret, msg) = self._validate_field(i)
+ if not ret:
+ return (False, msg)
+ field_names.append(i["name"])
+ field_dupes = find_dupes(field_names)
+ if len(field_dupes) > 0:
+ return (False, f"You cannot have duplicate field names: {field_dupes}")
+
+ dupes = find_dupes(names)
+ if len(dupes) > 0:
+ return (False, f"You cannot have duplicate secret names: {dupes}")
+ return (True, "")
+
+ def sanitize_values(self):
+ """
+ Sanitizes the secrets YAML object version 2.0
+
+ Parameters:
+
+ Returns:
+ Nothing: Updates self.syaml(obj) if needed
+ """
+ v = get_version(self.syaml)
+ if v not in ["2.0"]:
+ self.module.fail_json(f"Version is not 2.0: {v}")
+
+ backing_store = self._get_backingstore()
+ if backing_store not in [
+ "kubernetes",
+ "vault",
+ "none",
+ ]: # we currently only support vault
+ self.module.fail_json(
+ f"Currently only the 'vault', 'kubernetes' and 'none' backingStores are supported: {backing_store}"
+ )
+
+ (ret, msg) = self._validate_secrets()
+ if not ret:
+ self.module.fail_json(msg)
+
+ def _get_secret_value(self, name, field):
+ on_missing_value = self._get_field_on_missing_value(field)
+ # We cannot use match + case as RHEL8 has python 3.9 (it needs 3.10)
+ # We checked for errors in _validate_secrets() already
+ if on_missing_value == "error":
+ return self._sanitize_yaml_value(field.get("value"))
+ elif on_missing_value == "prompt":
+ prompt = self._get_field_prompt(field)
+ if prompt is None:
+ prompt = f"Type secret for {name}/{field['name']}: "
+ value = self._get_field_value(field)
+ if value is not None:
+ prompt += f" [{value}]"
+ prompt += ": "
+ return getpass.getpass(prompt)
+ return None
+
+ def _get_file_path(self, name, field):
+ on_missing_value = self._get_field_on_missing_value(field)
+ if on_missing_value == "error":
+ return os.path.expanduser(field.get("path"))
+ elif on_missing_value == "prompt":
+ prompt = self._get_field_prompt(field)
+ path = self._get_field_path(field)
+ if path is None:
+ path = ""
+
+ if prompt is None:
+ text = f"Type path for file {name}/{field['name']} [{path}]: "
+ else:
+ text = f"{prompt} [{path}]: "
+
+ newpath = getpass.getpass(text)
+ if newpath == "": # Set the default if no string was entered
+ newpath = path
+
+ if os.path.isfile(os.path.expanduser(newpath)):
+ return newpath
+ self.module.fail_json(f"File {newpath} not found, exiting")
+
+ self.module.fail_json("File with wrong onMissingValue")
+
+ def _inject_field(self, secret_name, f):
+ on_missing_value = self._get_field_on_missing_value(f)
+ override = self._get_field_override(f)
+ kind = self._get_field_kind(f)
+ b64 = self._get_field_base64(f)
+
+ if kind in ["value", ""]:
+ if on_missing_value == "generate":
+ self.parsed_secrets[secret_name]["generate"].append(f["name"])
+ if self._get_backingstore() != "vault":
+ self.module.fail_json(
+ "You cannot have onMissingValue set to 'generate' unless using vault backingstore "
+ f"for secret {secret_name} field {f['name']}"
+ )
+ else:
+ if kind in ["path", "ini_file"]:
+ self.module.fail_json(
+ "You cannot have onMissingValue set to 'generate' with a path or ini_file"
+ f" for secret {secret_name} field {f['name']}"
+ )
+
+ vault_policy = f.get("vaultPolicy", "validatedPatternDefaultPolicy")
+
+ if override:
+ self.parsed_secrets[secret_name]["override"].append(f["name"])
+
+ if b64:
+ self.parsed_secrets[secret_name]["base64"].append(f["name"])
+
+ self.parsed_secrets[secret_name]["fields"][f["name"]] = None
+ self.parsed_secrets[secret_name]["vault_policies"][
+ f["name"]
+ ] = vault_policy
+
+ return
+
+ # If we're not generating the secret inside the vault directly we either read it from the file ("error")
+ # or we are prompting the user for it
+ secret = self._get_secret_value(secret_name, f)
+ if b64:
+ secret = base64.b64encode(secret.encode()).decode("utf-8")
+ self.parsed_secrets[secret_name]["base64"].append(f["name"])
+
+ self.parsed_secrets[secret_name]["fields"][f["name"]] = secret
+
+ elif kind == "path": # path. we upload files
+ path = self._get_file_path(secret_name, f)
+ self.parsed_secrets[secret_name]["paths"][f["name"]] = path
+
+ binfile = False
+
+ # Default to UTF-8
+ try:
+ secret = open(path, encoding="utf-8").read()
+ except UnicodeDecodeError:
+ secret = open(path, "rb").read()
+ binfile = True
+
+ if b64:
+ self.parsed_secrets[secret_name]["base64"].append(f["name"])
+ if binfile:
+ secret = base64.b64encode(bytes(secret)).decode("utf-8")
+ else:
+ secret = base64.b64encode(secret.encode()).decode("utf-8")
+
+ self.parsed_secrets[secret_name]["fields"][f["name"]] = secret
+ elif kind == "ini_file": # ini_file. we parse an ini_file
+ ini_file = os.path.expanduser(f.get("ini_file"))
+ ini_section = f.get("ini_section", "default")
+ ini_key = f.get("ini_key")
+ secret = get_ini_value(ini_file, ini_section, ini_key)
+ if b64:
+ self.parsed_secrets[secret_name]["base64"].append(f["name"])
+ secret = base64.b64encode(secret.encode()).decode("utf-8")
+
+ self.parsed_secrets[secret_name]["ini_file"][f["name"]] = {
+ "ini_file": ini_file,
+ "ini_section": ini_section,
+ "ini_key": ini_key,
+ }
+ self.parsed_secrets[secret_name]["fields"][f["name"]] = secret
+
+ return
diff --git a/common/ansible/plugins/modules/parse_secrets_info.py b/common/ansible/plugins/modules/parse_secrets_info.py
new file mode 100644
index 00000000..b962271a
--- /dev/null
+++ b/common/ansible/plugins/modules/parse_secrets_info.py
@@ -0,0 +1,149 @@
+# Copyright 2022,2023 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ansible plugin module that loads secrets from a yaml file and pushes them
+inside the HashiCorp Vault in an OCP cluster. The values-secrets.yaml file is
+expected to be in the following format:
+---
+# version is optional. When not specified it is assumed it is 1.0
+version: 1.0
+
+# These secrets will be pushed in the vault at secret/hub/test The vault will
+# have secret/hub/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets:
+ test:
+ secret1: foo
+ secret2: bar
+
+# This will create the vault key secret/hub/testfoo which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files:
+ testfoo: ~/ca.crt
+
+# These secrets will be pushed in the vault at secret/region1/test The vault will
+# have secret/region1/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets.region1:
+ test:
+ secret1: foo1
+ secret2: bar1
+
+# This will create the vault key secret/region2/testbar which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files.region2:
+ testbar: ~/ca.crt
+"""
+
+import yaml
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parse_secrets_v2 import ParseSecretsV2
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.2",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: parse_secrets_info
+short_description: Parses a Validated Patterns Secrets file for later loading
+version_added: "2.50"
+author: "Martin Jackson"
+description:
+ - Takes a values-secret.yaml file, parses and returns values for secrets loading. The goal here is to do all the
+ work of reading and interpreting the file and resolving the content pointers (that is, creating content where it
+ is given) such that that content is then available for secrets vaults to load. It does not attempt to load the
+ content or interpret the content beyond the conventions of the file format. (So, it knows how to retrieve
+ ini-keys, about paths, and about base64 but leaves interaction with backends to backend-specific code.
+options:
+ values_secrets_plaintext:
+ description:
+ - The unencrypted content of the values-secrets file
+ required: true
+ type: str
+ secrets_backing_store:
+ description:
+ - The secrets backing store that will be used for parsed secrets (i.e. vault, kubernetes, none)
+ required: false
+ default: vault
+ type: str
+"""
+
+RETURN = """
+"""
+
+EXAMPLES = """
+- name: Parse secrets file into objects - backingstore defaults to vault
+ parse_secrets_info:
+ values_secrets_plaintext: '{{ }}'
+ register: secrets_info
+
+- name: Parse secrets file into data structures
+ parse_secrets_info:
+ values_secrets_plaintext: '{{ }}'
+ secrets_backing_store: 'kubernetes'
+ register: secrets_info
+
+- name: Parse secrets file into data structures
+ parse_secrets_info:
+ values_secrets_plaintext: '{{ }}'
+ secrets_backing_store: 'none'
+ register: secrets_info
+"""
+
+
+def run(module):
+ """Main ansible module entry point"""
+ results = dict(changed=False)
+
+ args = module.params
+ values_secrets_plaintext = args.get("values_secrets_plaintext", "")
+ secrets_backing_store = args.get("secrets_backing_store", "vault")
+
+ syaml = yaml.safe_load(values_secrets_plaintext)
+
+ if syaml is None:
+ syaml = {}
+
+ parsed_secret_obj = ParseSecretsV2(module, syaml, secrets_backing_store)
+ parsed_secret_obj.parse()
+
+ results["failed"] = False
+ results["changed"] = False
+
+ results["vault_policies"] = parsed_secret_obj.vault_policies
+ results["parsed_secrets"] = parsed_secret_obj.parsed_secrets
+ results["kubernetes_secret_objects"] = parsed_secret_obj.kubernetes_secret_objects
+ results["secret_store_namespace"] = parsed_secret_obj.secret_store_namespace
+
+ module.exit_json(**results)
+
+
+def main():
+ """Main entry point where the AnsibleModule class is instantiated"""
+ module = AnsibleModule(
+ argument_spec=yaml.safe_load(DOCUMENTATION)["options"],
+ supports_check_mode=True,
+ )
+ run(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/common/ansible/plugins/modules/vault_load_parsed_secrets.py b/common/ansible/plugins/modules/vault_load_parsed_secrets.py
new file mode 100644
index 00000000..cfcf9732
--- /dev/null
+++ b/common/ansible/plugins/modules/vault_load_parsed_secrets.py
@@ -0,0 +1,302 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ansible plugin module that loads secrets and policies once parsed and pushes them
+into a HashiCorp Vault in an OCP cluster. The values-secrets.yaml file is
+expected to be in the following format:
+---
+# version is optional. When not specified it is assumed it is 2.0
+version: 2.0
+
+"""
+
+import os
+import time
+
+import yaml
+from ansible.module_utils.basic import AnsibleModule
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: vault_load_parsed_secrets
+short_description: Loads secrets into the HashiCorp Vault
+version_added: "2.50"
+author: "Martin Jackson"
+description:
+ - Takes parsed secrets objects and vault policies (as delivered by parse_secrets_info) and runs the commands to
+ load them into a vault instance. The relevent metadata will exist in the parsed secrets object. Returns count
+ of secrets injected.
+options:
+ parsed_secrets:
+ description:
+ - A structure containing the secrets, fields, and their metadata
+ required: true
+ type: dict
+ vault_policies:
+ description:
+ - Vault policies to inject into the instance.
+ required: true
+ type: dict
+ namespace:
+ description:
+ - Namespace where the vault is running
+ required: false
+ type: str
+ default: vault
+ pod:
+ description:
+ - Name of the vault pod to use to inject secrets
+ required: false
+ type: str
+ default: vault-0
+"""
+
+RETURN = """
+"""
+
+EXAMPLES = """
+- name: Loads secrets file into the vault of a cluster
+ vault_load_parsed_secrets:
+ parsed_secrets: "{{ parsed_secrets_structure_from_parse_secrets_info }}"
+ vault_policies: "{{ parsed_vault_policies_structure_from_parse_secrets_info }}"
+"""
+
+
+class VaultSecretLoader:
+ def __init__(
+ self,
+ module,
+ parsed_secrets,
+ vault_policies,
+ namespace,
+ pod,
+ ):
+ self.module = module
+ self.parsed_secrets = parsed_secrets
+ self.vault_policies = vault_policies
+ self.namespace = namespace
+ self.pod = pod
+
+ def _run_command(self, command, attempts=1, sleep=3, checkrc=True):
+ """
+ Runs a command on the host ansible is running on. A failing command
+ will raise an exception in this function directly (due to check=True)
+
+ Parameters:
+ command(str): The command to be run.
+ attempts(int): Number of times to retry in case of Error (defaults to 1)
+ sleep(int): Number of seconds to wait in between retry attempts (defaults to 3s)
+
+ Returns:
+ ret(subprocess.CompletedProcess): The return value from run()
+ """
+ for attempt in range(attempts):
+ ret = self.module.run_command(
+ command,
+ check_rc=checkrc,
+ use_unsafe_shell=True,
+ environ_update=os.environ.copy(),
+ )
+ if ret[0] == 0:
+ return ret
+ if attempt >= attempts - 1:
+ return ret
+ time.sleep(sleep)
+
+ def _vault_secret_attr_exists(self, mount, prefix, secret_name, attribute):
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f'"vault kv get -mount={mount} -field={attribute} {prefix}/{secret_name}"'
+ )
+ # we ignore stdout and stderr
+ (ret, _, _) = self._run_command(cmd, attempts=1, checkrc=False)
+ if ret == 0:
+ return True
+
+ return False
+
+ def load_vault(self):
+ injected_secret_count = 0
+
+ self.inject_vault_policies()
+
+ for secret_name, secret in self.parsed_secrets.items():
+ self.inject_secret(secret_name, secret)
+ injected_secret_count += 1
+
+ return injected_secret_count
+
+ def inject_field(
+ self,
+ secret_name,
+ soverride,
+ sbase64,
+ sgenerate,
+ spaths,
+ svault_policies,
+ fieldname,
+ fieldvalue,
+ mount,
+ vault_prefixes,
+ first=False,
+ ):
+ # Special cases:
+ # generate w|wo override
+ # path (w|wo b64)
+ #
+ # inifile secrets will be resolved by parser
+ # values (including base64'd ones) will be resolved by parser
+ # And we just ignore k8s or other fields
+
+ override = True if fieldname in soverride else False
+ b64 = True if fieldname in sbase64 else False
+ generate = True if fieldname in sgenerate else False
+ path = spaths.get(fieldname, False)
+ prefixes = vault_prefixes
+ verb = "put" if first else "patch"
+ policy = svault_policies.get(fieldname, False)
+
+ # "generate" secrets are created with policies and may be overridden or not
+ if generate:
+ gen_cmd = (
+ f"vault read -field=password sys/policies/password/{policy}/generate"
+ )
+ if b64:
+ gen_cmd += " | base64 --wrap=0"
+ for prefix in prefixes:
+ # if the override field is False and the secret attribute exists at the prefix then we just
+ # skip, as we do not want to overwrite the existing secret
+ if not override and self._vault_secret_attr_exists(
+ mount, prefix, secret_name, fieldname
+ ):
+ continue
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f'"{gen_cmd} | vault kv {verb} -mount={mount} {prefix}/{secret_name} {fieldname}=-"'
+ )
+ self._run_command(cmd, attempts=3)
+ return
+
+ if path:
+ for prefix in prefixes:
+ if b64:
+ b64_cmd = "| base64 --wrap=0"
+ else:
+ b64_cmd = ""
+ cmd = (
+ f"cat '{path}' | oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"'cat - {b64_cmd}> /tmp/vcontent'; "
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c '"
+ f"vault kv {verb} -mount={mount} {prefix}/{secret_name} {fieldname}=@/tmp/vcontent; "
+ f"rm /tmp/vcontent'"
+ )
+ self._run_command(cmd, attempts=3)
+ return
+
+ for prefix in prefixes:
+ cmd = (
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"\"vault kv {verb} -mount={mount} {prefix}/{secret_name} {fieldname}='{fieldvalue}'\""
+ )
+ self._run_command(cmd, attempts=3)
+ return
+
+ def inject_secret(self, secret_name, secret):
+ mount = secret.get("vault_mount", "secret")
+ vault_prefixes = secret.get("vault_prefixes", ["hub"])
+
+ counter = 0
+ # In this structure, each field will have one value
+ for fname, fvalue in secret.get("fields").items():
+ self.inject_field(
+ secret_name=secret_name,
+ soverride=secret["override"],
+ sbase64=secret["base64"],
+ sgenerate=secret["generate"],
+ spaths=secret["paths"],
+ svault_policies=secret["vault_policies"],
+ fieldname=fname,
+ fieldvalue=fvalue,
+ mount=mount,
+ vault_prefixes=vault_prefixes,
+ first=counter == 0,
+ )
+ counter += 1
+ return
+
+ def inject_vault_policies(self):
+ for name, policy in self.vault_policies.items():
+ cmd = (
+ f"echo '{policy}' | oc exec -n {self.namespace} {self.pod} -i -- sh -c "
+ f"'cat - > /tmp/{name}.hcl';"
+ f"oc exec -n {self.namespace} {self.pod} -i -- sh -c 'vault write sys/policies/password/{name} "
+ f" policy=@/tmp/{name}.hcl'"
+ )
+ self._run_command(cmd, attempts=3)
+
+
+def run(module):
+ """Main ansible module entry point"""
+ results = dict(changed=False)
+
+ args = module.params
+
+ vault_policies = args.get("vault_policies", {})
+ parsed_secrets = args.get("parsed_secrets", {})
+ namespace = args.get("namespace", "vault")
+ pod = args.get("pod", "vault-0")
+
+ if vault_policies == {}:
+ results["failed"] = True
+ module.fail_json("Must pass vault_policies")
+
+ if parsed_secrets == {}:
+ results["failed"] = True
+ module.fail_json("Must pass parsed_secrets")
+
+ loader = VaultSecretLoader(
+ module,
+ parsed_secrets,
+ vault_policies,
+ namespace,
+ pod,
+ )
+
+ nr_secrets = loader.load_vault()
+
+ results["failed"] = False
+ results["changed"] = True
+ results["msg"] = f"{nr_secrets} secrets injected"
+ module.exit_json(**results)
+
+
+def main():
+ """Main entry point where the AnsibleModule class is instantiated"""
+ module = AnsibleModule(
+ argument_spec=yaml.safe_load(DOCUMENTATION)["options"],
+ supports_check_mode=False,
+ )
+ run(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/common/ansible/plugins/modules/vault_load_secrets.py b/common/ansible/plugins/modules/vault_load_secrets.py
new file mode 100644
index 00000000..725b69b4
--- /dev/null
+++ b/common/ansible/plugins/modules/vault_load_secrets.py
@@ -0,0 +1,209 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ansible plugin module that loads secrets from a yaml file and pushes them
+inside the HashiCorp Vault in an OCP cluster. The values-secrets.yaml file is
+expected to be in the following format:
+---
+# version is optional. When not specified it is assumed it is 1.0
+version: 1.0
+
+# These secrets will be pushed in the vault at secret/hub/test The vault will
+# have secret/hub/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets:
+ test:
+ secret1: foo
+ secret2: bar
+
+# This will create the vault key secret/hub/testfoo which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files:
+ testfoo: ~/ca.crt
+
+# These secrets will be pushed in the vault at secret/region1/test The vault will
+# have secret/region1/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets.region1:
+ test:
+ secret1: foo1
+ secret2: bar1
+
+# This will create the vault key secret/region2/testbar which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files.region2:
+ testbar: ~/ca.crt
+"""
+
+import os
+
+import yaml
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.load_secrets_common import get_version
+from ansible.module_utils.load_secrets_v1 import LoadSecretsV1
+from ansible.module_utils.load_secrets_v2 import LoadSecretsV2
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: vault_load_secrets
+short_description: Loads secrets into the HashiCorp Vault
+version_added: "2.11"
+author: "Michele Baldessari"
+description:
+ - Takes a values-secret.yaml file and uploads the secrets into the HashiCorp Vault
+options:
+ values_secrets:
+ description:
+ - Path to the values-secrets file (only one of values_secrets and
+ values_secrets_plaintext can be passed)
+ required: false
+ default: ''
+ type: str
+ values_secrets_plaintext:
+ description:
+ - The content of the values-secrets file (only one of values_secrets and
+ values_secrets_plaintext can be passed)
+ required: false
+ default: ''
+ type: str
+ namespace:
+ description:
+ - Namespace where the vault is running
+ required: false
+ type: str
+ default: vault
+ pod:
+ description:
+ - Name of the vault pod to use to inject secrets
+ required: false
+ type: str
+ default: vault-0
+ basepath:
+ description:
+ - Vault's kv initial part of the path. This is only supported on version 1.0 of the
+ secret format
+ required: false
+ type: str
+ default: secret
+ check_missing_secrets:
+ description:
+ - Validate the ~/values-secret.yaml file against the top-level
+ values-secret-template.yaml and error out if secrets are missing
+ required: false
+ type: bool
+ default: False
+ values_secret_template:
+ description:
+ - Path of the values-secret-template.yaml file of the pattern
+ required: false
+ type: str
+ default: ""
+"""
+
+RETURN = """
+"""
+
+EXAMPLES = """
+- name: Loads secrets file into the vault of a cluster
+ vault_load_secrets:
+ values_secrets: ~/values-secret.yaml
+"""
+
+
+def run(module):
+ """Main ansible module entry point"""
+ results = dict(changed=False)
+
+ args = module.params
+ values_secrets = os.path.expanduser(args.get("values_secrets", ""))
+ values_secrets_plaintext = args.get("values_secrets_plaintext", "")
+ if values_secrets != "" and values_secrets_plaintext != "":
+ module.fail_json("Cannot pass both values_secret and values_secret_plaintext")
+
+ values_secrets = os.path.expanduser(args.get("values_secrets"))
+ basepath = args.get("basepath")
+ namespace = args.get("namespace")
+ pod = args.get("pod")
+ check_missing_secrets = args.get("check_missing_secrets")
+ values_secret_template = args.get("values_secret_template")
+
+ if values_secrets != "" and not os.path.exists(values_secrets):
+ results["failed"] = True
+ results["error"] = f"Missing {values_secrets} file"
+ results["msg"] = f"Values secrets file does not exist: {values_secrets}"
+ module.exit_json(**results)
+
+ # We were passed a filename (aka the unencrypted path)
+ if values_secrets != "":
+ with open(values_secrets, "r", encoding="utf-8") as file:
+ syaml = yaml.safe_load(file.read())
+ if syaml is None:
+ syaml = {}
+ elif isinstance(syaml, str):
+ module.fail_json(f"Could not parse {values_secrets} file as yaml")
+ elif values_secrets_plaintext != "":
+ syaml = yaml.safe_load(values_secrets_plaintext)
+ if syaml is None:
+ syaml = {}
+ elif isinstance(syaml, str):
+ module.fail_json("Could not parse values_secrets_plaintext as yaml")
+ else:
+ module.fail_json("Both values_secrets and values_secrets_plaintext are unset")
+
+ version = get_version(syaml)
+ if version == "2.0":
+ secret_obj = LoadSecretsV2(module, syaml, namespace, pod)
+ elif version == "1.0":
+ secret_obj = LoadSecretsV1(
+ module,
+ syaml,
+ basepath,
+ namespace,
+ pod,
+ values_secret_template,
+ check_missing_secrets,
+ )
+
+ else:
+ module.fail_json(f"Version {version} is currently not supported")
+
+ secret_obj.sanitize_values()
+ nr_secrets = secret_obj.inject_secrets()
+ results["failed"] = False
+ results["changed"] = True
+ results["msg"] = f"{nr_secrets} secrets injected"
+ module.exit_json(**results)
+
+
+def main():
+ """Main entry point where the AnsibleModule class is instantiated"""
+ module = AnsibleModule(
+ argument_spec=yaml.safe_load(DOCUMENTATION)["options"],
+ supports_check_mode=False,
+ )
+ run(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/common/ansible/roles/cluster_pre_check/defaults/main.yml b/common/ansible/roles/cluster_pre_check/defaults/main.yml
new file mode 100644
index 00000000..fd6cdd5c
--- /dev/null
+++ b/common/ansible/roles/cluster_pre_check/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}"
+kubeconfig_backup: "{{ lookup('env', 'HOME') }}/.kube/config"
diff --git a/common/ansible/roles/cluster_pre_check/tasks/main.yml b/common/ansible/roles/cluster_pre_check/tasks/main.yml
new file mode 100644
index 00000000..1dc5f445
--- /dev/null
+++ b/common/ansible/roles/cluster_pre_check/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: Check if the kubernetes python module is usable from ansible
+ ansible.builtin.command: "{{ ansible_python_interpreter }} -c 'import kubernetes'"
+ changed_when: false
+
+- name: Check if KUBECONFIG is correctly set
+ ansible.builtin.debug:
+ msg: "KUBECONFIG is not set, falling back to ~/.kube/config"
+ when: kubeconfig is not defined or kubeconfig | length == 0
+
+- name: Check if ~/.kube/config exists
+ ansible.builtin.stat:
+ path: "{{ kubeconfig_backup }}"
+ register: kubeconfig_result
+
+- name: Check if we're running inside an OCP cluster directly
+ ansible.builtin.set_fact:
+ running_in_ocp: "{{ lookup('env', 'KUBERNETES_SERVICE_HOST') | length > 0 | bool }}"
+
+- name: Fail if both KUBECONFIG and ~/.kube/config do not exist but only when not running in a cluster
+ ansible.builtin.fail:
+ msg: "{{ kubeconfig_backup }} not found and KUBECONFIG unset. Bailing out."
+ failed_when:
+ - not running_in_ocp
+ - not kubeconfig_result.stat.exists
+ - kubeconfig is not defined or kubeconfig | length == 0
diff --git a/common/ansible/roles/find_vp_secrets/tasks/main.yml b/common/ansible/roles/find_vp_secrets/tasks/main.yml
new file mode 100644
index 00000000..ce847a01
--- /dev/null
+++ b/common/ansible/roles/find_vp_secrets/tasks/main.yml
@@ -0,0 +1,87 @@
+---
+# Once V1 support is dropped we can remove the whole secret_template support
+- name: Set secret_template fact
+ no_log: "{{ override_no_log | default(true) }}"
+ ansible.builtin.set_fact:
+ secret_template: "{{ pattern_dir }}/values-secret.yaml.template"
+
+- name: Is a VALUES_SECRET env variable set?
+ ansible.builtin.set_fact:
+ custom_env_values_secret: "{{ lookup('ansible.builtin.env', 'VALUES_SECRET') }}"
+
+- name: Check if VALUES_SECRET file exists
+ ansible.builtin.stat:
+ path: "{{ custom_env_values_secret }}"
+ register: custom_file_values_secret
+ when: custom_env_values_secret | default('') | length > 0
+
+- name: Set values-secret yaml file to {{ custom_file_values_secret.stat.path }}
+ ansible.builtin.set_fact:
+ found_file: "{{ custom_file_values_secret.stat.path }}"
+ when:
+ - custom_env_values_secret | default('') | length > 0
+ - custom_file_values_secret.stat.exists
+
+# FIXME(bandini): Eventually around end of 2023(?) we should drop
+# ~/values-secret-{{ pattern_name }}.yaml and ~/values-secret.yaml
+- name: Find first existing values-secret yaml file
+ ansible.builtin.set_fact:
+ found_file: "{{ lookup('ansible.builtin.first_found', findme) }}"
+ vars:
+ findme:
+ - "~/.config/hybrid-cloud-patterns/values-secret-{{ pattern_name }}.yaml"
+ - "~/.config/validated-patterns/values-secret-{{ pattern_name }}.yaml"
+ - "~/values-secret-{{ pattern_name }}.yaml"
+ - "~/values-secret.yaml"
+ - "{{ pattern_dir }}/values-secret.yaml.template"
+ when: custom_env_values_secret | default('') | length == 0
+
+- name: Is found values secret file encrypted
+ no_log: "{{ override_no_log | default(true) }}"
+ ansible.builtin.shell: |
+ set -o pipefail
+ head -1 "{{ found_file }}" | grep -q \$ANSIBLE_VAULT
+ changed_when: false
+ register: encrypted
+ failed_when: (encrypted.rc not in [0, 1])
+
+# When HOME is set we replace it with '~' in this debug message
+# because when run from inside the container the HOME is /pattern-home
+# which is confusing for users
+- name: Is found values secret file encrypted
+ ansible.builtin.debug:
+ msg: "Using {{ (lookup('env', 'HOME') | length > 0) | ternary(found_file | regex_replace('^' + lookup('env', 'HOME'), '~'), found_file) }} to parse secrets"
+
+- name: Set encryption bool fact
+ no_log: "{{ override_no_log | default(true) }}"
+ ansible.builtin.set_fact:
+ is_encrypted: "{{ encrypted.rc == 0 | bool }}"
+
+- name: Get password for "{{ found_file }}"
+ ansible.builtin.pause:
+ prompt: "Input the password for {{ found_file }}"
+ echo: false
+ when: is_encrypted
+ register: vault_pass
+
+- name: Get decrypted content if {{ found_file }} was encrypted
+ no_log: "{{ override_no_log | default(true) }}"
+ ansible.builtin.shell:
+ ansible-vault view --vault-password-file <(cat <<<"{{ vault_pass.user_input }}") "{{ found_file }}"
+ register: values_secret_plaintext
+ when: is_encrypted
+ changed_when: false
+
+- name: Normalize secrets format (un-encrypted)
+ no_log: '{{ override_no_log | default(true) }}'
+ ansible.builtin.set_fact:
+ values_secrets_data: "{{ lookup('file', found_file) | from_yaml }}"
+ when: not is_encrypted
+ changed_when: false
+
+- name: Normalize secrets format (encrypted)
+ no_log: '{{ override_no_log | default(true) }}'
+ ansible.builtin.set_fact:
+ values_secrets_data: "{{ values_secret_plaintext.stdout | from_yaml }}"
+ when: is_encrypted
+ changed_when: false
diff --git a/common/ansible/roles/iib_ci/README.md b/common/ansible/roles/iib_ci/README.md
new file mode 100644
index 00000000..98355ad9
--- /dev/null
+++ b/common/ansible/roles/iib_ci/README.md
@@ -0,0 +1,97 @@
+# IIB Utilities
+
+A set of ansible plays to fetch an IIB (Image Index Bundle, aka a container created by the operator sdk
+that contains a bunch of references to operators that can be installed in an OpenShift cluster)
+
+Run `ansible-playbook common/ansible/playbooks/iib-ci/lookup.yml` to see which IIBs are available (defaults to
+openshift-gitops). If you want to look up IIBs for a different operator run:
+`ansible-playbook -e operator=acm-operator common/ansible/playbooks/iib-ci/lookup.yml`
+
+You can also try running curl manually via:
+`curl -sSL "https://datagrepper.engineering.redhat.com/raw?topic=/topic/VirtualTopic.eng.ci.redhat-container-image.index.built&delta=15780000&contains=acm-operator" | jq ".raw_messages[].msg"`
+
+Typically IIB are prerelease stuff that lives on some internal boxes. What these scripts do is fetch
+the IIB internally, mirror it to the registry inside the cluster, parse all the needed images and mirror
+those to the internal cluster registry and then set up the registries.conf files on all nodes so
+that the images used are the ones pointing to the internal cluster.
+
+## Usage
+
+By default the operator to be installed from the IIB is `openshift-gitops-operator`. You can override this through the `OPERATOR` env variable.
+For example, to mirror an operator into an existing cluster you would do the following:
+
+```sh
+export KUBECONFIG=/tmp/foo/kubeconfig
+export OPERATOR=openshift-gitops-operator
+export IIB=492329
+export INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:${IIB}
+export KUBEADMINPASS="11111-22222-33333-44444"
+# This will push the IIB and all the needed images for the default openshift-gitops-operator into the cluster
+make load-iib
+# This will install the pattern using the gitops operator from the IIB
+```
+
+***NOTE:*** When using an SNO without shared storage in a non-production environment, the enablement of the internal registry will fail. You need to run the following to enable it:
+
+```sh
+oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"managementState":"Managed"}}'
+oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}'
+```
+
+Then in case of the `openshift-gitops-operator` we would install with:
+
+```sh
+export CHANNEL=$(oc get -n openshift-marketplace packagemanifests -l "catalog=iib-${IIB}" --field-selector "metadata.name=${OPERATOR}" -o jsonpath='{.items[0].status.defaultChannel}')
+make EXTRA_HELM_OPTS="--set main.gitops.operatorSource=iib-${IIB} --set main.gitops.channel=${CHANNEL}" install
+```
+
+To install ACM (`export OPERATOR=advanced-cluster-management`) or any other
+operator (except the gitops one) from an IIB we would call the following as a
+final step:
+
+```sh
+export CHANNEL=$(oc get -n openshift-marketplace packagemanifests -l "catalog=iib-${IIB}" --field-selector "metadata.name=${OPERATOR}" -o jsonpath='{.items[0].status.defaultChannel}')
+make EXTRA_HELM_OPTS="--set main.extraParameters[0].name=clusterGroup.subscriptions.acm.source --set main.extraParameters[0].value=iib-${IIB} --set main.extraParameters[1].name=clusterGroup.subscriptions.acm.channel --set main.extraParameters[1].value=${CHANNEL}" install
+```
+
+*Note*: In this case `acm` is the name of the subscription in `values-hub.yaml`
+
+### OCP 4.13 and onwards
+
+Since 4.13 supports an internal registry that can cope with v2 docker manifests, we
+use that. Run `make iib` with the following environment variables set:
+
+* `INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:492329`
+* `KUBEADMINPASS="11111-22222-33333-44444"`
+
+### OCP 4.12 and previous versions
+
+Due to the lack of v2 manifest support on the internal registry, we use an external
+registry. Run `make iib` with the following environment variables set:
+
+* `INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:492329`
+* `REGISTRY=quay.io/rhn_support_mbaldess/iib`
+* `REGISTRY_TOKEN=:`
+
+*Note*: For the REGISTRY_TOKEN go to your quay repository, add a robot with "Write" permissions. The robot created will have a "username" and "password" fields. Set the REGISTRY_TOKEN environment variable to that value.
+
+## Useful commands
+
+* List IIBs for an operator:
+
+```sh
+ansible-playbook common/ansible/playbooks/iib-ci/lookup.yml
+...
+ok: [localhost] => (item=v4.13) => {
+ "msg": "v4.13 -> {'indeximage': 'registry-proxy.engineering.redhat.com/rh-osbs/iib:509435', 'bundleimage': 'registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-operator-bundle:v99.9.0-106'}"
+}
+...
+```
+
+Override the `operator` value with the desired bundle name to figure out the last IIBs for it.
+
+* List all images uploaded to the internal registry:
+
+```sh
+oc exec -it -n openshift-image-registry $(oc get pods -n openshift-image-registry -o json | jq -r '.items[].metadata.name | select(. | test("^image-registry-"))' | head -n1) -- bash -c "curl -k -u kubeadmin:$(oc whoami -t) https://localhost:5000/v2/_catalog"
+```
diff --git a/common/ansible/roles/iib_ci/defaults/main.yml b/common/ansible/roles/iib_ci/defaults/main.yml
new file mode 100644
index 00000000..7605dba5
--- /dev/null
+++ b/common/ansible/roles/iib_ci/defaults/main.yml
@@ -0,0 +1,17 @@
+rh_internal_registry: registry-proxy.engineering.redhat.com
+iib_image: "{{ lookup('env', 'INDEX_IMAGE') }}"
+
+external_registry: "{{ lookup('env', 'REGISTRY') }}"
+external_registry_token: "{{ lookup('env', 'REGISTRY_TOKEN') }}"
+external_registry_email: noemail@localhost
+
+kubeadminpass: "{{ lookup('env', 'KUBEADMINPASS') }}"
+
+internal_registry_ns: openshift-marketplace
+internal_registry_email: noemail@localhost
+internal_registry_user: registry-custom-user
+internal_registry_pass: "{{ lookup('env', 'INTERNAL_REGISTRY_USER') }}"
+
+# We can use default(, true) below because OPERATOR is a string and not
+# a boolean
+operator: "{{ lookup('env', 'OPERATOR') | default('openshift-gitops-operator', true) }}"
diff --git a/common/ansible/roles/iib_ci/handlers/main.yml b/common/ansible/roles/iib_ci/handlers/main.yml
new file mode 100644
index 00000000..a983544d
--- /dev/null
+++ b/common/ansible/roles/iib_ci/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for vault_utils
diff --git a/common/ansible/roles/iib_ci/meta/main.yml b/common/ansible/roles/iib_ci/meta/main.yml
new file mode 100644
index 00000000..c9d7005d
--- /dev/null
+++ b/common/ansible/roles/iib_ci/meta/main.yml
@@ -0,0 +1,29 @@
+galaxy_info:
+ author: Validated Patterns Team https://github.com/hybrid-cloud-patterns/
+ description: Internal module to work with IIBs (Image Index Bundles)
+
+ issue_tracker_url: https://github.com/hybrid-cloud-patterns/common/issues
+ license: Apache-2.0
+ min_ansible_version: "2.1"
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ platforms:
+ - name: Fedora
+ versions:
+ - all
+ - name: Ubuntu
+ versions:
+ - all
+ - name: Debian
+ versions:
+ - all
+ - name: EL
+ versions:
+ - "8"
+ - "9"
+
+ galaxy_tags: []
+
+dependencies: []
diff --git a/common/ansible/roles/iib_ci/tasks/fetch-operator-images.yml b/common/ansible/roles/iib_ci/tasks/fetch-operator-images.yml
new file mode 100644
index 00000000..11df26cc
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/fetch-operator-images.yml
@@ -0,0 +1,98 @@
+# This task fetches all the images given an operator name
+# the operator name is defined in the variable "item". This
+# set of tasks is to be included in a loop that goes over the
+# needed operators
+- name: Get default channel in the IIB for "{{ item }}"
+ ansible.builtin.shell: |
+ oc get -n "{{ internal_registry_ns }}" packagemanifests -l "catalog=iib-{{ iib }}" --field-selector "metadata.name={{ item }}" \
+ -o jsonpath='{.items[0].status.defaultChannel}'
+ register: default_channel_raw
+ retries: 10
+ delay: 10
+ until: default_channel_raw is not failed
+
+- name: Set default channel fact
+ ansible.builtin.set_fact:
+ default_channel: "{{ default_channel_raw.stdout }}"
+
+- name: Get all related images in the IIB for "{{ item }}"
+ ansible.builtin.shell: |
+ oc get packagemanifests -l "catalog=iib-{{ iib }}" --field-selector "metadata.name={{ item }}" \
+ -o jsonpath="{.items[0].status.channels[?(@.name==\"{{ default_channel }}\")].currentCSVDesc.relatedImages}"
+ register: related_images_raw
+ retries: 5
+ delay: 10
+ until: related_images_raw is not failed
+
+- name: Set related_images fact
+ ansible.builtin.set_fact:
+ related_images: "{{ related_images_raw.stdout }}"
+
+# NOTE(bandini)
+# The following code is here to fund out what the operator bundle image is and to make
+# sure it is on the internal registry.
+# This is all potentially hacky, but so far I could not find a single place in the cluster
+# where the olm.bundle image is available. The info is in there in the IIB, but it certainly
+# is not in any package manifest nor catalogsource. This is why we resort to invoking opm
+# alpha commands inside the IIB image locally
+- name: Pull the IIB locally
+ ansible.builtin.command:
+ podman pull "{{ iib_image }}"
+
+# $ opm alpha list channels /configs advanced-cluster-management
+# PACKAGE CHANNEL HEAD
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.4
+# advanced-cluster-management release-2.8 advanced-cluster-management.v2.8.0-130
+- name: Read the operator bundle from the default channel
+ ansible.builtin.shell: |
+ set -o pipefail
+ podman run -it --rm "{{ iib_image }}" alpha list channels /configs "{{ item }}" | grep -E "(\s){{ default_channel }}(\s)" | awk '{ print $3 }'
+ register: bundle_channel_raw
+
+- name: Set bundle fact
+ ansible.builtin.set_fact:
+ bundle_channel: "{{ bundle_channel_raw.stdout }}"
+
+- name: Fail if bundle_channel is empty
+ ansible.builtin.fail:
+ msg: "Failed to find bundle from channel: {{ bundle_channel_raw }}"
+ when: >
+ (bundle_channel is not defined) or (bundle_channel | length == 0)
+
+# $ opm alpha list bundles /configs advanced-cluster-management
+# PACKAGE CHANNEL BUNDLE REPLACES SKIPS SKIP RANGE IMAGE
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.0 >=2.6.0 <2.7.0 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:f63d0a9a0e3dc9d86e84279c50e9c613d8430e71a3821d418e168250ca3b747c
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.1 advanced-cluster-management.v2.7.0 >=2.6.0 <2.7.1 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:a81a574f2f22d37681c44fe0c3b958074408705415de333de54d120145537533
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.2 advanced-cluster-management.v2.7.1 >=2.6.0 <2.7.2 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:8a2c758689eaebe6a287315ca18fd9122f323e195ea3410db005b6a449060fad
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.3 advanced-cluster-management.v2.7.2 >=2.6.0 <2.7.3 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:208f4d9473a923817c102bb7e5f138d3e1e8ed3057a23a220ffa8fe9c0c27128
+# advanced-cluster-management release-2.7 advanced-cluster-management.v2.7.4 advanced-cluster-management.v2.7.3 >=2.6.0 <2.7.4 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:75b6438e08800b2e3608aeb01c1c0a68810108d9905fff35916afd21e6d32685
+# advanced-cluster-management release-2.8 advanced-cluster-management.v2.8.0-130 >=2.7.0 <2.8.0-130 registry.stage.redhat.io/rhacm2/acm-operator-bundle@sha256:6c385aa69256cdd964ae9e79e52ce52e1048391f0557af59843326c4ebe9bec0
+- name: Get bundle image
+ ansible.builtin.shell: |
+ set -o pipefail
+ podman run -it --rm "{{ iib_image }}" alpha list bundles /configs "{{ item }}" | grep -e "{{ default_channel }}\s\+{{ bundle_channel }}" | awk '{ print $NF }'
+ register: bundle_image_raw
+
+- name: Set bundle image fact
+ ansible.builtin.set_fact:
+ bundle_image: "{{ bundle_image_raw.stdout }}"
+
+- name: Fail if bundle_image is empty
+ ansible.builtin.fail:
+ msg: "Failed to find bundle image: {{ bundle_image_raw }}"
+ when: >
+ (bundle_image is not defined) or (bundle_image | length == 0)
+
+# all_images will be a list as follows:
+# [ "registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40",
+# "registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:5ff915a399c1cc12d4f932652b410bf7399850934833e755267bdd409f4ce11b",
+# "registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:81e0574159c6aaabe7125d27782a5e6e5e72383a4a0ba76b44d465f3a3098759",
+# "registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3",
+# "registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:efbfb010f24894f715a50832a4b3d2cdc221f283cbbdca05e388850586e9d792",
+# "registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2",
+# "registry.redhat.io/openshift-gitops-1/kam-delivery-rhel8@sha256:10c5a1b6a0858a812117e6fb2b28d37617d9eb83da5e4fb647059ff740a14461",
+# "registry.redhat.io/openshift-gitops-1/dex-rhel8@sha256:6a3eaee6a4f8cb9a35363bf4c7f83a7fa2042ae62bdaa700ecd0893dd52276f5",
+# "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-operator-bundle@sha256:e463314596098a4e774e0dda..." ]
+- name: Set all images fact (related images + operator bundles)
+ ansible.builtin.set_fact:
+ all_images: "{{ all_images + related_images + [bundle_image] }}"
diff --git a/common/ansible/roles/iib_ci/tasks/install-iib-in-cluster.yml b/common/ansible/roles/iib_ci/tasks/install-iib-in-cluster.yml
new file mode 100644
index 00000000..4b39184c
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/install-iib-in-cluster.yml
@@ -0,0 +1,52 @@
+- name: Remove manifest folder "{{ iib_local_folder }}"
+ ansible.builtin.file:
+ path: "{{ iib_local_folder }}"
+ state: absent
+
+- name: Create manifest folder "{{ iib_local_folder }}"
+ ansible.builtin.file:
+ path: "{{ iib_local_folder }}"
+ state: directory
+ mode: "0755"
+
+# This generates files in /tmp/manifest-IIB:
+# - mapping.txt
+# - catalogSource.yaml
+# - imageContentSourcePolicy.yaml
+- name: Mirror catalog manifests only to "{{ iib_local_folder }}"
+ ansible.builtin.shell: |
+ oc adm catalog mirror --insecure --manifests-only --to-manifests=. \
+ "{{ iib_image }}" "{{ rh_internal_registry }}/rh-osbs" > catalog.log 2>&1
+ args:
+ chdir: "{{ iib_local_folder }}"
+
+- name: Mirror IIB to "{{ mirror_iib }}"
+ ansible.builtin.shell: |
+ oc image mirror -a "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson" \
+ "{{ iib_image }}={{ mirror_iib }}" --insecure --keep-manifest-list 2>&1
+ args:
+ chdir: "{{ iib_local_folder }}"
+ register: oc_mirror_result
+ retries: 10
+ delay: 5
+ until: oc_mirror_result is not failed
+
+- name: Template mirrored catalogsource
+ ansible.builtin.template:
+ src: ./templates/catalogSource.yaml.j2
+ dest: "{{ iib_local_folder }}/mirrored-catalogsource.yaml"
+ mode: "0644"
+
+- name: Apply mirrored catalogsource
+ ansible.builtin.shell: |
+ oc apply -f "{{ iib_local_folder }}/mirrored-catalogsource.yaml"
+
+- name: Wait for catalogsource to show up
+ ansible.builtin.shell: |
+ oc get -n "{{ internal_registry_ns }}" packagemanifests -l "catalog=iib-{{ iib }}" --field-selector "metadata.name={{ operator }}" \
+ -o jsonpath='{.items[0].status.defaultChannel}'
+ register: oc_catalogsource_result
+ retries: 30
+ delay: 10
+ until: oc_catalogsource_result is not failed
+ changed_when: false
diff --git a/common/ansible/roles/iib_ci/tasks/main.yml b/common/ansible/roles/iib_ci/tasks/main.yml
new file mode 100644
index 00000000..ba6eb7c8
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/main.yml
@@ -0,0 +1,43 @@
+- name: Check that INDEX_IMAGE env variable is set
+ ansible.builtin.fail:
+ msg: "INDEX_IMAGE: '{{ iib_image }}' is not set"
+ failed_when:
+ (iib_image is not defined or iib_image | length == 0)
+
+- name: Set IIB fact
+ ansible.builtin.set_fact:
+ iib: "{{ iib_image.split(':')[1] }}"
+
+- name: Set IIB local folder fact
+ ansible.builtin.set_fact:
+ iib_local_folder: "/tmp/manifest-{{ iib }}"
+
+- name: Get cluster version
+ # E.g. 4.13.0-rc.6 or 4.12.16
+ ansible.builtin.shell: |
+ oc get openshiftcontrollermanager/cluster -o yaml -o jsonpath='{.status.version}'
+ register: oc_version_raw
+ changed_when: false
+
+- name: Is OCP pre OCP 4.13? (aka registry supports v2 manifests)
+ ansible.builtin.set_fact:
+ use_internal_registry: "{{ oc_version_raw.stdout is version('4.13', '>=') }}"
+
+- name: Set up internal registry (OCP >= 4.13)
+ ansible.builtin.include_tasks: setup-internal-registry.yml
+ when: use_internal_registry
+
+- name: Set up external registry (OCP < 4.13)
+ ansible.builtin.include_tasks: setup-external-registry.yml
+ when: not use_internal_registry
+
+- name: Install new IIB in cluster
+ ansible.builtin.include_tasks: install-iib-in-cluster.yml
+
+- name: Mirror all related images
+ ansible.builtin.include_tasks: mirror-related-images.yml
+
+- name: Remove pullsecrets tempfolder
+ ansible.builtin.file:
+ path: "{{ pull_secrets_tempfolder.path }}"
+ state: absent
diff --git a/common/ansible/roles/iib_ci/tasks/mirror-related-images.yml b/common/ansible/roles/iib_ci/tasks/mirror-related-images.yml
new file mode 100644
index 00000000..32a36c07
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/mirror-related-images.yml
@@ -0,0 +1,226 @@
+# This is needed because some operators like "advanced-cluster-management"
+# install a second operator "multicluster-engine"
+- name: Set operators list
+ ansible.builtin.set_fact:
+ operator_list: "{{ [operator] + (operator == 'advanced-cluster-management') | ternary(['multicluster-engine'], []) }}"
+
+- name: Set all images to empty list
+ ansible.builtin.set_fact:
+ all_images: []
+
+- name: Fetch operator images tasks
+ ansible.builtin.include_tasks: fetch-operator-images.yml
+ loop: "{{ operator_list }}"
+
+- name: Print all_images
+ ansible.builtin.debug:
+ msg: "{{ all_images }}"
+
+# A mapping.txt file will have lines like the following. Note how the image to the right of '='
+# does have a shortened hash! :
+# registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:5ff...=registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-rhel8:8256cca6
+# registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf..=registry-proxy.engineering.redhat.com/rh-osbs/openshift4-ose-haproxy-router:a636cbea
+#
+# Now what we are doing here is the following:
+# 1. For every image we get from the bundle (contained in all_images var) we check if it exists. If it does great, skip to the next image
+# 2. If the image was not found above, we take the corresponding URL on the right hand side of the '=' sign in mapping.txt
+# except that we drop the hash that exists on the right hand-side and just use the one we were given with the image.
+# If the image is found, great. If not we need to error out because we have no idea where we can fetch it from
+- name: Find out which images really exist by consulting mapping.txt
+ ansible.builtin.shell: |
+ set -o pipefail
+ left_sha=$(echo "{{ image }}" | sed -e 's/^.*@//')
+ right=$(grep "{{ image }}" "{{ iib_local_folder }}/mapping.txt" | cut -f2 -d=)
+ right_base=$(echo $right | sed -e 's/:.*$//' -e 's/@.*$//')
+ right_log=$(echo "${right_base}@${left_sha}" | sed -e 's/\//-/g')
+ if skopeo inspect --authfile "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson" --no-tags docker://"{{ image }}" &> /tmp/skopeo-"{{ image | regex_replace('/', '-') }}".log; then
+ echo "{{ image }}"
+ elif skopeo inspect --authfile "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson" --no-tags docker://"${right_base}@${left_sha}" &> "/tmp/skopeo-${right_log}.log"; then
+ echo "${right_base}@${left_sha}"
+ else
+ echo "ERROR: both {{ image }} and echo ${right_base}@${left_sha} could not be found"
+ exit 1
+ fi
+ register: all_existing_images
+ with_items: "{{ all_images }}"
+ loop_control:
+ loop_var: image
+
+# The dictionary below will be in the following form:
+# {
+# "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-operator-bundle@sha256:e463314596098a4e774e0ddaed0009bfdad4d79b664e28fef219c796679ee6a0": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-operator-bundle@sha256:e463314596098a4e774e0ddaed0009bfdad4d79b664e28fef219c796679ee6a0"
+# },
+# "registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:81e0574159c6aaabe7125d27782a5e6e5e72383a4a0ba76b44d465f3a3098759": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-argocd-rhel8@sha256:81e0574159c6aaabe7125d27782a5e6e5e72383a4a0ba76b44d465f3a3098759"
+# },
+# "registry.redhat.io/openshift-gitops-1/dex-rhel8@sha256:6a3eaee6a4f8cb9a35363bf4c7f83a7fa2042ae62bdaa700ecd0893dd52276f5": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-dex-rhel8@sha256:6a3eaee6a4f8cb9a35363bf4c7f83a7fa2042ae62bdaa700ecd0893dd52276f5"
+# },
+# "registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:efbfb010f24894f715a50832a4b3d2cdc221f283cbbdca05e388850586e9d792": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-rhel8-operator@sha256:efbfb010f24894f715a50832a4b3d2cdc221f283cbbdca05e388850586e9d792"
+# },
+# "registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:5ff915a399c1cc12d4f932652b410bf7399850934833e755267bdd409f4ce11b": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-gitops-rhel8@sha256:5ff915a399c1cc12d4f932652b410bf7399850934833e755267bdd409f4ce11b"
+# },
+# "registry.redhat.io/openshift-gitops-1/kam-delivery-rhel8@sha256:10c5a1b6a0858a812117e6fb2b28d37617d9eb83da5e4fb647059ff740a14461": {
+# "source": "registry-proxy.engineering.redhat.com/rh-osbs/openshift-gitops-1-kam-delivery-rhel8@sha256:10c5a1b6a0858a812117e6fb2b28d37617d9eb83da5e4fb647059ff740a14461"
+# },
+# "registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2": {
+# "source": "registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2"
+# },
+# "registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40": {
+# "source": "registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40"
+# },
+# "registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3": {
+# "source": "registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3"
+# }
+# }
+- name: Create dict with full image name+sha -> url where we will fetch it from
+ ansible.builtin.set_fact:
+ image_urls: "{{ image_urls | default({}) | combine({item: {'source': all_existing_images.results[counter].stdout,
+ 'source_nosha': all_existing_images.results[counter].stdout | regex_replace('@.*$', '')}}, recursive=true) }}"
+ loop: "{{ all_images }}"
+ loop_control:
+ index_var: counter
+
+- name: Create dict with full image name+sha -> mirror destination (OCP >= 4.13)
+ ansible.builtin.set_fact:
+ image_urls: "{{ image_urls | default({}) | combine({item:
+ {'mirrordest': mirror_dest + item | basename,
+ 'mirrordest_nosha': (mirror_dest + item | basename) | regex_replace('@.*$', ''),
+ 'mirrordest_tag': 'tag-' + item | basename | regex_replace('^.*@sha256:', '')}}, recursive=true) }}"
+ loop: "{{ all_images }}"
+ when: use_internal_registry
+
+- name: Create dict with full image name+sha -> mirror destination (OCP < 4.13)
+ ansible.builtin.set_fact:
+ image_urls: "{{ image_urls | default({}) | combine({item:
+ {'mirrordest': mirror_dest + '@' + item | basename | regex_replace('^.*@', ''),
+ 'mirrordest_nosha': mirror_dest,
+ 'mirrordest_tag': 'tag-' + item | basename | regex_replace('^.*@sha256:', '')}}, recursive=true) }}"
+ loop: "{{ all_images }}"
+ when: not use_internal_registry
+
+- name: Create dict with full image name+sha -> image key without sha
+ ansible.builtin.set_fact:
+ image_urls: "{{ image_urls | default({}) | combine({item: {'image_nosha': item | regex_replace('@.*$', '')}}, recursive=true) }}"
+ loop: "{{ all_images }}"
+
+# At this point the dictionary looks as follows:
+# "registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3": {
+# "mirrordest": "default-route-openshift-image-registry.apps.mcg-hub.blueprints.rhecoeng.com/openshift-marketplace/redis-6@sha256:535...
+# "mirrordest_nosha": "default-route-openshift-image-registry.apps.mcg-hub.blueprints.rhecoeng.com/openshift-marketplace/redis-6",
+# "source": "registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3",
+# "source_nosha": "registry.redhat.io/rhel8/redis-6"
+# }
+- name: Print dict with full images
+ ansible.builtin.debug:
+ msg: "{{ image_urls }}"
+
+# OCP 4.13 uses the new fangled "ImageDigestMirrorSet", older OCPs use "ImageContentSourcePolicy"
+- name: Template out imageMirror.yaml (OCP >= 4.13)
+ ansible.builtin.template:
+ src: ./templates/imageDigestMirror.yaml.j2
+ dest: "{{ iib_local_folder }}/imageMirror.yaml"
+ mode: "0644"
+ when: use_internal_registry
+
+- name: Template out imageMirror.yaml (OCP < 4.13)
+ ansible.builtin.template:
+ src: ./templates/imageContentSourcePolicy.yaml.j2
+ dest: "{{ iib_local_folder }}/imageMirror.yaml"
+ mode: "0644"
+ when: not use_internal_registry
+
+- name: Template out mirror.map
+ ansible.builtin.template:
+ src: ./templates/mirror.map.j2
+ dest: "{{ iib_local_folder }}/mirror.map"
+ mode: "0644"
+
+# NOTE(bandini): mirror.map *must* have a tag (we use the IIB number) on the image on the right side
+# otherwise, the image will be uplaoded and will exist in S3 but it won't exist in the registry's catalog!!
+- name: Mirror all the needed images
+ ansible.builtin.shell: |
+ set -o pipefail
+ oc image mirror -a "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson" -f mirror.map --insecure --keep-manifest-list 2>&1 | tee -a image-mirror.log
+ args:
+ chdir: "{{ iib_local_folder }}"
+ retries: 5
+ delay: 2
+ register: oc_mirror
+ until: oc_mirror is not failed
+
+- name: Fetch MCP observedGeneration worker
+ ansible.builtin.shell:
+ oc get mcp/worker -o jsonpath='{.status.observedGeneration}'
+ register: worker_observed_generation_raw
+
+- name: Fetch MCP machineCount worker
+ ansible.builtin.shell:
+ oc get mcp/worker -o jsonpath='{.status.machineCount}'
+ register: worker_machinecount_raw
+
+- name: Fetch MCP observedGeneration master
+ ansible.builtin.shell:
+ oc get mcp/master -o jsonpath='{.status.observedGeneration}'
+ register: master_observed_generation_raw
+
+- name: Fetch MCP machineCount master
+ ansible.builtin.shell:
+ oc get mcp/master -o jsonpath='{.status.machineCount}'
+ register: master_machinecount_raw
+
+- name: Will the imageMirror trigger any changes
+ ansible.builtin.command:
+ oc diff -f "{{ iib_local_folder }}/imageMirror.yaml"
+ failed_when: false
+ register: oc_mirror_diff
+
+# We only run this piece if there is an actual change in the mirror digest for images
+# cannot use 'is failed' as that is always false when setting failed_when: false above
+- name: Apply imageMirror and wait for MCP to complete
+ when: oc_mirror_diff.rc != 0
+ block:
+ - name: Apply imageMirror
+ ansible.builtin.command:
+ oc apply -f "{{ iib_local_folder }}/imageMirror.yaml"
+
+ # NOTE(bandini): The reason to not fail on these two observedGeneration waiting
+ # tasks, is to make this idempotent: If the 'oc apply' above does *not* trigger
+ # any changes, the observed generation tasks will just timeout. And then we still
+ # wait to make sure that the readyworker count is correct.
+ - name: Wait for MCP new observedGeneration worker
+ ansible.builtin.shell:
+ oc get mcp/worker -o jsonpath='{.status.observedGeneration}'
+ register: worker_current_observed_generation_raw
+ retries: 10
+ delay: 20
+ until: worker_current_observed_generation_raw.stdout != worker_observed_generation_raw.stdout
+ failed_when: false
+
+ - name: Wait for MCP new observedGeneration master
+ ansible.builtin.shell:
+ oc get mcp/master -o jsonpath='{.status.observedGeneration}'
+ register: master_current_observed_generation_raw
+ retries: 10
+ delay: 20
+ until: master_current_observed_generation_raw.stdout != master_observed_generation_raw.stdout
+ failed_when: false
+
+ - name: Wait for MCP readyMachineCount to be the same as before applying the digest (worker)
+ ansible.builtin.shell:
+ oc get mcp/worker -o jsonpath='{.status.readyMachineCount}'
+ register: worker_current_ready_machinecount_raw
+ retries: 30
+ delay: 10
+ until: worker_current_ready_machinecount_raw.stdout == worker_machinecount_raw.stdout
+
+ - name: Wait for MCP readyMachineCount to be the same as before applying the digest (master)
+ ansible.builtin.shell:
+ oc get mcp/master -o jsonpath='{.status.readyMachineCount}'
+ register: master_current_ready_machinecount_raw
+ retries: 30
+ delay: 10
+ until: master_current_ready_machinecount_raw.stdout == master_machinecount_raw.stdout
diff --git a/common/ansible/roles/iib_ci/tasks/setup-external-registry.yml b/common/ansible/roles/iib_ci/tasks/setup-external-registry.yml
new file mode 100644
index 00000000..a9a9b10a
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/setup-external-registry.yml
@@ -0,0 +1,45 @@
+- name: Check that we can push to the external registry
+ ansible.builtin.fail:
+ msg: "REGISTRY: '{{ external_registry }}' and REGISTRY_TOKEN: '{{ external_registry_token }}'. Both need to be set"
+ failed_when: >
+ (external_registry is not defined or external_registry | length == 0) or
+ (external_registry_token is not defined or external_registry_token | length == 0)
+
+- name: Get current cluster pull secrets
+ ansible.builtin.command:
+ oc extract secret/pull-secret -n openshift-config --to=-
+ register: pull_secrets_raw
+
+- name: Add external registry to pull secrets and set auth fact
+ ansible.builtin.set_fact:
+ pull_secrets_new: "{{ pull_secrets_raw.stdout | from_json }}"
+ external_registry_auth: "{{ external_registry_token | b64encode }}"
+
+- name: Add local registry to pull secrets
+ ansible.builtin.set_fact:
+ pull_secrets: "{{ pull_secrets_new | combine({'auths': {external_registry.split('/')[0]: {'email': external_registry_email, 'auth': external_registry_auth}}}, recursive=true) }}"
+
+- name: Get a tempfile for the pull secrets
+ ansible.builtin.tempfile:
+ state: directory
+ register: pull_secrets_tempfolder
+
+- name: Store pull secrets in tempfile
+ ansible.builtin.copy:
+ dest: "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson"
+ content: "{{ pull_secrets | to_nice_json }}"
+ mode: "0644"
+
+# We cannot store the logins back in the cluster, because quay.io would be overwritten and not have
+# access to the images openshift needs. See:
+# https://github.com/moby/moby/issues/37569
+# - name: Update pull-secret in the cluster
+# ansible.builtin.shell: |
+# oc set data secret/pull-secret -n openshift-config --from-file="{{ pull_secrets_tempfolder.path }}/.dockerconfigjson"
+- name: Set Mirror URL fact for external mirror IIB
+ ansible.builtin.set_fact:
+ mirror_iib: "{{ external_registry }}"
+
+- name: Set Mirror URL fact for external mirror
+ ansible.builtin.set_fact:
+ mirror_dest: "{{ external_registry }}"
diff --git a/common/ansible/roles/iib_ci/tasks/setup-internal-registry.yml b/common/ansible/roles/iib_ci/tasks/setup-internal-registry.yml
new file mode 100644
index 00000000..e45def74
--- /dev/null
+++ b/common/ansible/roles/iib_ci/tasks/setup-internal-registry.yml
@@ -0,0 +1,108 @@
+- name: Check KUBEADMINPASS is set
+ ansible.builtin.fail:
+ msg: "KUBEADMINPASS: '{{ kubeadminpass }}' is not set"
+ failed_when: kubeadminpass is not defined or kubeadminpass | length == 0
+
+- name: Get kubeadmin api endpoint
+ ansible.builtin.command:
+ oc whoami --show-server=true
+ register: kubeadminapi_raw
+
+- name: Set kubeadminapi fact
+ ansible.builtin.set_fact:
+ kubeadminapi: "{{ kubeadminapi_raw.stdout }}"
+
+- name: Login via kubeadmin
+ ansible.builtin.command: |
+ oc login -u kubeadmin -p "{{ kubeadminpass }}" "{{ kubeadminapi }}" --insecure-skip-tls-verify=true
+
+- name: Get kubeadmin token
+ ansible.builtin.command: |
+ oc whoami -t
+ register: oc_whoami_raw
+
+- name: Set kubeadmin token
+ ansible.builtin.set_fact:
+ kubeadmin_token: "{{ oc_whoami_raw.stdout }}"
+
+- name: Expose internal registry route
+ ansible.builtin.shell: |
+ oc patch configs.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"defaultRoute":true}}' --type=merge
+
+- name: Fetch internal registry route value
+ ansible.builtin.command:
+ oc registry info --public=true
+ register: registry_route_raw
+ retries: 20
+ delay: 20
+ until:
+ - registry_route_raw is not failed
+ - registry_route_raw.stdout | length > 0
+
+- name: Set route fact
+ ansible.builtin.set_fact:
+ registry_route: "{{ registry_route_raw.stdout }}"
+
+- name: Set registry allowedRegistries
+ ansible.builtin.shell: >
+ oc patch image.config.openshift.io/cluster --patch "{\"spec\":{\"registrySources\":{\"allowedRegistries\":[ \"registry.stage.redhat.io\", \"registry.access.redhat.com\", \"registry.connect.redhat.com\", \"ghcr.io\", \"gcr.io\", \"quay.io\", \"registry.redhat.io\", \"docker.io\",
+ \"registry-proxy.engineering.redhat.com\", \"image-registry.openshift-image-registry.svc:5000\", \"{{ registry_route }}\"]}}}" --type=merge
+
+- name: Set registry insecureRegistries
+ ansible.builtin.shell: >
+ oc patch image.config.openshift.io/cluster --patch "{\"spec\":{\"registrySources\":{\"insecureRegistries\":[ \"registry-proxy.engineering.redhat.com\",
+ \"image-registry.openshift-image-registry.svc:5000\", \"{{ registry_route }}\"]}}}" --type=merge
+
+- name: Get current cluster pull secrets
+ ansible.builtin.command:
+ oc extract secret/pull-secret -n openshift-config --to=-
+ register: pull_secrets_raw
+
+- name: Add local registry to pull secrets and set auth fact
+ ansible.builtin.set_fact:
+ pull_secrets_new: "{{ pull_secrets_raw.stdout | from_json }}"
+ internal_registry_auth: "{{ ('kubeadmin:' + kubeadmin_token) | b64encode }}"
+
+- name: Add local registry to pull secrets
+ ansible.builtin.set_fact:
+ pull_secrets: "{{ pull_secrets_new | combine({'auths': {registry_route: {'email': internal_registry_email, 'auth': internal_registry_auth}}}, recursive=true) }}"
+
+- name: Get a tempfile for the pull secrets
+ ansible.builtin.tempfile:
+ state: directory
+ register: pull_secrets_tempfolder
+
+- name: Store pull secrets in tempfile
+ ansible.builtin.copy:
+ dest: "{{ pull_secrets_tempfolder.path }}/.dockerconfigjson"
+ content: "{{ pull_secrets | to_nice_json }}"
+ mode: "0644"
+
+- name: Update pull-secret in the cluster
+ ansible.builtin.shell: |
+ oc set data secret/pull-secret -n openshift-config --from-file="{{ pull_secrets_tempfolder.path }}/.dockerconfigjson"
+
+- name: Before proceeding here we need to make sure that the MCPs have all settled
+ ansible.builtin.shell: |
+ if [ $(oc get mcp/master -o jsonpath='{.status.readyMachineCount}') != $(oc get mcp/master -o jsonpath='{.status.machineCount}') ]; then
+ exit 1
+ fi
+ if [ $(oc get mcp/worker -o jsonpath='{.status.readyMachineCount}') != $(oc get mcp/worker -o jsonpath='{.status.machineCount}') ]; then
+ exit 1
+ fi
+ retries: 30
+ delay: 20
+ register: mcp_ready
+ until: mcp_ready is not failed
+
+- name: Login the internal registry with podman
+ ansible.builtin.command:
+ podman login --tls-verify=false --username unused --password "{{ kubeadmin_token }}" "{{ registry_route }}"
+
+- name: Set Mirror URL fact for internal mirror IIB
+ ansible.builtin.set_fact:
+ mirror_iib: "{{ registry_route }}/{{ internal_registry_ns }}/iib"
+
+- name: Set Mirror URL fact for internal mirror
+ ansible.builtin.set_fact:
+ mirror_dest: "{{ registry_route }}/{{ internal_registry_ns }}/"
diff --git a/common/ansible/roles/iib_ci/templates/catalogSource.yaml.j2 b/common/ansible/roles/iib_ci/templates/catalogSource.yaml.j2
new file mode 100644
index 00000000..99087603
--- /dev/null
+++ b/common/ansible/roles/iib_ci/templates/catalogSource.yaml.j2
@@ -0,0 +1,9 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: CatalogSource
+metadata:
+ name: iib-{{ iib }}
+ namespace: {{ internal_registry_ns }}
+spec:
+ image: {{ mirror_iib }}:{{ iib }}
+ sourceType: grpc
+ displayName: IIB {{ iib }}
diff --git a/common/ansible/roles/iib_ci/templates/htpasswd-oauth.yaml b/common/ansible/roles/iib_ci/templates/htpasswd-oauth.yaml
new file mode 100644
index 00000000..8fc41821
--- /dev/null
+++ b/common/ansible/roles/iib_ci/templates/htpasswd-oauth.yaml
@@ -0,0 +1,14 @@
+apiVersion: config.openshift.io/v1
+kind: OAuth
+metadata:
+ name: cluster
+spec:
+ identityProviders:
+ - name: my_htpasswd_provider
+ mappingMethod: claim
+ type: HTPasswd
+ challenge: true
+ login: true
+ htpasswd:
+ fileData:
+ name: htpass-secret
diff --git a/common/ansible/roles/iib_ci/templates/imageContentSourcePolicy.yaml.j2 b/common/ansible/roles/iib_ci/templates/imageContentSourcePolicy.yaml.j2
new file mode 100644
index 00000000..d0f417ec
--- /dev/null
+++ b/common/ansible/roles/iib_ci/templates/imageContentSourcePolicy.yaml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: operator.openshift.io/v1alpha1
+kind: ImageContentSourcePolicy
+metadata:
+ labels:
+ operators.openshift.org/catalog: "true"
+ name: iib-{{ iib }}
+spec:
+ repositoryDigestMirrors:
+{% for item in image_urls.values() %}
+ - mirrors:
+ - {{ item.mirrordest_nosha }}
+ source: {{ item.source_nosha }}
+ mirrorSourcePolicy: NeverContactSource
+ - mirrors:
+ - {{ item.mirrordest_nosha }}
+ source: {{ item.image_nosha }}
+ mirrorSourcePolicy: NeverContactSource
+{% endfor %}
diff --git a/common/ansible/roles/iib_ci/templates/imageDigestMirror.yaml.j2 b/common/ansible/roles/iib_ci/templates/imageDigestMirror.yaml.j2
new file mode 100644
index 00000000..1b04f321
--- /dev/null
+++ b/common/ansible/roles/iib_ci/templates/imageDigestMirror.yaml.j2
@@ -0,0 +1,18 @@
+apiVersion: config.openshift.io/v1
+kind: ImageDigestMirrorSet
+metadata:
+ labels:
+ operators.openshift.org/catalog: "true"
+ name: iib-{{ iib }}
+spec:
+ imageDigestMirrors:
+{% for item in image_urls.values() %}
+ - mirrors:
+ - {{ item.mirrordest_nosha }}
+ source: {{ item.source_nosha }}
+ mirrorSourcePolicy: AllowContactingSource
+ - mirrors:
+ - {{ item.mirrordest_nosha }}
+ source: {{ item.image_nosha }}
+ mirrorSourcePolicy: AllowContactingSource
+{% endfor %}
diff --git a/common/ansible/roles/iib_ci/templates/mirror.map.j2 b/common/ansible/roles/iib_ci/templates/mirror.map.j2
new file mode 100644
index 00000000..ecef721c
--- /dev/null
+++ b/common/ansible/roles/iib_ci/templates/mirror.map.j2
@@ -0,0 +1,3 @@
+{% for item in image_urls.values() %}
+{{ item.source }}={{ item.mirrordest_nosha }}:{{ item.mirrordest_tag }}
+{% endfor %}
diff --git a/common/ansible/roles/iib_ci/vars/main.yml b/common/ansible/roles/iib_ci/vars/main.yml
new file mode 100644
index 00000000..56894088
--- /dev/null
+++ b/common/ansible/roles/iib_ci/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for iib_ci
diff --git a/common/ansible/roles/k8s_secret_utils/defaults/main.yml b/common/ansible/roles/k8s_secret_utils/defaults/main.yml
new file mode 100644
index 00000000..7ebda207
--- /dev/null
+++ b/common/ansible/roles/k8s_secret_utils/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+secrets_ns: 'validated-patterns-secrets'
diff --git a/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secret.yml b/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secret.yml
new file mode 100644
index 00000000..283fb6a2
--- /dev/null
+++ b/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secret.yml
@@ -0,0 +1,15 @@
+---
+- name: Check for secrets namespace
+ no_log: false
+ kubernetes.core.k8s_info:
+ kind: Namespace
+ name: "{{ item['metadata']['namespace'] }}"
+ register: secrets_ns_rc
+ until: secrets_ns_rc.resources | length > 0
+ retries: 20
+ delay: 45
+
+- name: Inject k8s secret
+ no_log: '{{ override_no_log | default(True) }}'
+ kubernetes.core.k8s:
+ definition: '{{ item }}'
diff --git a/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secrets.yml b/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secrets.yml
new file mode 100644
index 00000000..a2299734
--- /dev/null
+++ b/common/ansible/roles/k8s_secret_utils/tasks/inject_k8s_secrets.yml
@@ -0,0 +1,5 @@
+---
+- name: Inject secrets
+ no_log: '{{ override_no_log | default(True) }}'
+ ansible.builtin.include_tasks: inject_k8s_secret.yml
+ loop: '{{ kubernetes_secret_objects }}'
diff --git a/common/ansible/roles/k8s_secret_utils/tasks/main.yml b/common/ansible/roles/k8s_secret_utils/tasks/main.yml
new file mode 100644
index 00000000..d72de7ae
--- /dev/null
+++ b/common/ansible/roles/k8s_secret_utils/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Parse and extract k8s secrets from values-secret file
+ ansible.builtin.include_tasks: parse_secrets.yml
+
+- name: Inject k8s secrets
+ ansible.builtin.include_tasks: inject_k8s_secrets.yml
diff --git a/common/ansible/roles/k8s_secret_utils/tasks/parse_secrets.yml b/common/ansible/roles/k8s_secret_utils/tasks/parse_secrets.yml
new file mode 100644
index 00000000..b1755cc2
--- /dev/null
+++ b/common/ansible/roles/k8s_secret_utils/tasks/parse_secrets.yml
@@ -0,0 +1,12 @@
+---
+- name: Parse secrets data
+ # no_log: '{{ override_no_log | default(true) }}'
+ parse_secrets_info:
+ values_secrets_plaintext: "{{ values_secrets_data }}"
+ secrets_backing_store: "{{ secrets_backing_store }}"
+ register: secrets_results
+
+- name: Return kubernetes objects
+ no_log: '{{ override_no_log | default(true) }}'
+ ansible.builtin.set_fact:
+ kubernetes_secret_objects: "{{ secrets_results['kubernetes_secret_objects'] }}"
diff --git a/common/ansible/roles/vault_utils/README.md b/common/ansible/roles/vault_utils/README.md
new file mode 100644
index 00000000..7198752c
--- /dev/null
+++ b/common/ansible/roles/vault_utils/README.md
@@ -0,0 +1,230 @@
+# Role Name
+
+Bunch of utilities to manage the vault inside k8s imperatively
+
+## Requirements
+
+ansible-galaxy collection install kubernetes.core (formerly known as community.kubernetes)
+
+## Role Variables
+
+Defaults as to where the values-secret.yaml file is and the two ways to connect to a kubernetes cluster
+(KUBERCONFIG and ~/.kube/config respectively):
+
+```yaml
+values_secret: "{{ lookup('env', 'HOME') }}/values-secret.yaml"
+kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}"
+kubeconfig_backup: "{{ lookup('env', 'HOME') }}/.kube/config"
+```
+
+Default values for vault configuration:
+
+```yaml
+vault_ns: "vault"
+vault_pod: "vault-0"
+vault_hub: "hub"
+vault_hub_kubernetes_host: https://$KUBERNETES_PORT_443_TCP_ADDR:443
+# Needs extra escaping due to how it gets injected via shell in the vault
+vault_hub_capabilities: '[\\\"read\\\"]'
+vault_base_path: "secret"
+vault_path: "{{ vault_base_path }}/{{ vault_hub }}"
+vault_hub_ttl: "15m"
+vault_pki_max_lease_ttl: "8760h"
+external_secrets_ns: golang-external-secrets
+external_secrets_sa: golang-external-secrets
+unseal_secret: "vaultkeys"
+unseal_namespace: "imperative"
+```
+
+## Dependencies
+
+This relies on [kubernetes.core](https://docs.ansible.com/ansible/latest/collections/kubernetes/core/k8s_module.html)
+
+## Values secret file format
+
+Currently this role supports two formats: version 1.0 (which is the assumed
+default when not specified) and version 2.0. The latter is more fatureful and
+supports generating secrets directly into the vault and also prompting the user
+for a secret.
+
+By default, the first file that will looked up is
+`~/.config/hybrid-cloud-patterns/values-secret-.yaml`, then
+`~/.config/validated-patterns/values-secret-.yaml`,
+`~/values-secret-.yaml` and should that not exist it will look for
+`~/values-secret.yaml`.
+The paths can be overridden by setting the environment variable `VALUES_SECRET` to the path of the
+secret file.
+
+The values secret yaml files can be encrypted with `ansible-vault`. If the role detects they are encrypted, the password to
+decrypt them will be prompted when needed.
+
+### Version 1.0
+
+Here is a well-commented example of a version 1.0 file:
+
+```yaml
+---
+# By default when a top-level 'version: 1.0' is missing it is assumed to be '1.0'
+# NEVER COMMIT THESE VALUES TO GIT
+
+secrets:
+ # These secrets will be pushed in the vault at secret/hub/test The vault will
+ # have secret/hub/test with secret1 and secret2 as keys with their associated
+ # values (secrets)
+ test:
+ secret1: foo
+ secret2: bar
+
+ # This ends up as the s3Secret attribute to the path secret/hub/aws
+ aws:
+ s3Secret: test-secret
+
+# This will create the vault key secret/hub/testfoo which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files:
+ testfoo: ~/ca.crt
+# These secrets will be pushed in the vault at secret/region1/test The vault will
+# have secret/region1/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets.region1:
+ test:
+ secret1: foo1
+ secret2: bar1
+# This will create the vault key secret/region2/testbar which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files.region2:
+ testbar: ~/ca.crt
+```
+
+### Version 2.0
+
+Here is a version 2.0 example file (specifying `version: 2.0` is mandatory in this case):
+
+```yaml
+# NEVER COMMIT THESE VALUES TO GIT (unless your file only uses generated
+# passwords or only points to files)
+
+# Needed to specify the new format (missing version means old version: 1.0 by default)
+version: 2.0
+
+backingStore: vault # 'vault' is the default when omitted
+
+# These are the vault policies to be created in the vault
+# these are used when we let the vault generate the passwords
+# by setting the 'onMissingValue' attribute to 'generate'
+# See https://developer.hashicorp.com/vault/docs/concepts/password-policies
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#$%^&*" min-chars = 1 }
+
+# This is the mandatory top-level secrets entry
+secrets:
+ # This will create the following keys + attributes:
+ # - secret/region-one/config-demo:
+ # secret: ......
+ # secretprompt: ......
+ # secretprompt2: ......
+ # secretfile: ......
+ # ca_crt_b64: ......
+ # - secret/snowflake.blueprints.rhecoeng.com:
+ # secret: ......
+ # secretprompt: ......
+ # secretprompt2: ......
+ # secretfile: ......
+ # ca_crt_b64: ......
+ - name: config-demo
+ # This is the default and passes the -mount=secret option to the vault commands
+ vaultMount: secret
+ # These represent the paths inside the vault maint
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate # One of: error,generate,prompt (generate is only valid for normal secrets)
+ # This override attribute is false by default. The attribute is only valid with 'generate'. If the secret already exists in the
+ # vault it won't be changed unless override is set to true
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secretprompt
+ value: null
+ onMissingValue: prompt # when prompting for something you need to set either value: null or path: null as
+ # we need to know if it is a secret plaintext or a file path
+ description: "Please specify the password for application ABC"
+ - name: secretprompt2
+ value: defaultvalue
+ onMissingValue: prompt
+ description: "Please specify the API key for XYZ"
+ - name: secretprompt3
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy # This is an always-existing hard-coded policy
+ - name: secretfile
+ path: /tmp/ca.crt
+ onMissingValue: prompt
+ description: "Insert path to Certificate Authority"
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error # One of error, prompt (for path). generate makes no sense for file
+ - name: ca_crt_b64
+ path: /tmp/ca.crt
+ base64: true # defaults to false
+ onMissingValue: prompt # One of error, prompt (for path). generate makes no sense for file
+
+ - name: config-demo2
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: ca_crt2
+ path: /tmp/ca.crt # this will be the default shown when prompted
+ description: "Specify the path for ca_crt2"
+ onMissingValue: prompt # One of error, prompt (for path). generate makes no sense for file
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error # One of error, prompt (for path). generate makes no sense for file
+
+ # The following will read the ini-file at ~/.aws/credentials and place the ini_key "[default]/aws_access_key_id"
+ # in the aws_access_key_id_test vault attribute in the secret/hub/awsexample path
+ - name: awsexample
+ fields:
+ - name: aws_access_key_id_test
+ ini_file: ~/.aws/credentials
+ ini_section: default
+ ini_key: aws_access_key_id
+ - name: aws_secret_access_key_test
+ ini_file: ~/.aws/credentials
+ ini_key: aws_secret_access_key
+```
+
+Internals
+---------
+
+Here is the rough high-level algorithm used to unseal the vault:
+
+1. Check vault status. If vault is not initialized go to 2. If initialized go to 3.
+2. Initialize vault and store unseal keys + login token inside a secret in k8s
+3. Check vault status. If vault is unsealed go to 5. else to to 4.
+4. Unseal the vault using the secrets read from the k8s secret
+5. Configure the vault (should be idempotent)
+
+## License
+
+Apache
+
+## Author Information
+
+Michele Baldessari
diff --git a/common/ansible/roles/vault_utils/defaults/main.yml b/common/ansible/roles/vault_utils/defaults/main.yml
new file mode 100644
index 00000000..4d263223
--- /dev/null
+++ b/common/ansible/roles/vault_utils/defaults/main.yml
@@ -0,0 +1,24 @@
+---
+# defaults file for vault_utils
+values_secret: "{{ lookup('env', 'HOME') }}/values-secret.yaml"
+kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}"
+kubeconfig_backup: "{{ lookup('env', 'HOME') }}/.kube/config"
+vault_ns: "vault"
+vault_pod: "vault-0"
+vault_hub: "hub"
+vault_pvc: "data-vault-0"
+vault_hub_kubernetes_host: https://$KUBERNETES_PORT_443_TCP_ADDR:443
+# Needs extra escaping due to how it gets injected via shell in the vault
+vault_hub_capabilities: '[\\\"read\\\"]'
+vault_base_path: "secret"
+vault_path: "{{ vault_base_path }}/{{ vault_hub }}"
+vault_hub_ttl: "15m"
+vault_spoke_capabilities: '[\\\"read\\\"]'
+vault_spoke_ttl: "15m"
+vault_global_policy: global
+vault_global_capabilities: '[\\\"read\\\"]'
+external_secrets_ns: golang-external-secrets
+external_secrets_sa: golang-external-secrets
+external_secrets_secret: golang-external-secrets
+unseal_secret: "vaultkeys"
+unseal_namespace: "imperative"
diff --git a/common/ansible/roles/vault_utils/handlers/main.yml b/common/ansible/roles/vault_utils/handlers/main.yml
new file mode 100644
index 00000000..a983544d
--- /dev/null
+++ b/common/ansible/roles/vault_utils/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for vault_utils
diff --git a/common/ansible/roles/vault_utils/meta/main.yml b/common/ansible/roles/vault_utils/meta/main.yml
new file mode 100644
index 00000000..c99eb3a9
--- /dev/null
+++ b/common/ansible/roles/vault_utils/meta/main.yml
@@ -0,0 +1,31 @@
+galaxy_info:
+ author: Validated Patterns Team https://github.com/hybrid-cloud-patterns/
+ description: Utilities to manage vault in kubernetes (init, unseal, etc)
+
+ issue_tracker_url: https://github.com/hybrid-cloud-patterns/common/issues
+ license: Apache-2.0
+ min_ansible_version: "2.1"
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ platforms:
+ - name: Fedora
+ versions:
+ - all
+ - name: Ubuntu
+ versions:
+ - all
+ - name: Debian
+ versions:
+ - all
+ - name: EL
+ versions:
+ - "8"
+ - "9"
+
+ galaxy_tags: []
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/common/ansible/roles/vault_utils/tasks/main.yml b/common/ansible/roles/vault_utils/tasks/main.yml
new file mode 100644
index 00000000..1072e6b7
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: Run vault init tasks
+ ansible.builtin.import_tasks: vault_init.yaml
+ tags: vault_init
+
+- name: Unseal vault
+ ansible.builtin.import_tasks: vault_unseal.yaml
+ tags: vault_unseal
+
+- name: Vault secrets init
+ ansible.builtin.import_tasks: vault_secrets_init.yaml
+ tags: vault_secrets_init
+
+- name: Vault spoke backend init
+ ansible.builtin.import_tasks: vault_spokes_init.yaml
+ tags: vault_spokes_init
+
+- name: Load secrets
+ ansible.builtin.import_tasks: push_secrets.yaml
+ tags: push_secrets
diff --git a/common/ansible/roles/vault_utils/tasks/push_parsed_secrets.yaml b/common/ansible/roles/vault_utils/tasks/push_parsed_secrets.yaml
new file mode 100644
index 00000000..cbca15e0
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/push_parsed_secrets.yaml
@@ -0,0 +1,43 @@
+---
+- name: "Do pre-checks for Vault"
+ ansible.builtin.include_role:
+ name: vault_utils
+ tasks_from: vault_status
+
+# Unfortunately we cannot loop vault_status and just check if the vault is unsealed
+# https://github.com/ansible/proposals/issues/136
+# So here we keep running the 'vault status' command until sealed is set to false
+- name: If the vault is still sealed we need to retry
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault status -format=json
+ register: vault_status_json
+ until: "'stdout' in vault_status_json and (not (vault_status_json.stdout | from_json)['sealed'] | bool)"
+ retries: 20
+ delay: 45
+ failed_when: "'stdout_lines' not in vault_status_json"
+
+# This step is not really needed when running make vault-init + load-secrets as
+# everything is sequential
+# It is needed when the vault is unsealed/configured inside the cluster and load-secrets
+# gets run *while* the cronjob configures the vault. I.e. it might be half configured and return
+# errors
+- name: Make sure that the vault auth policy exists
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command:
+ sh -c "vault list auth/{{ vault_hub }}/role | grep '{{ vault_hub }}-role'"
+ register: vault_role_cmd
+ until:
+ - vault_role_cmd.rc is defined
+ - vault_role_cmd.rc == 0
+ retries: 20
+ delay: 45
+ changed_when: false
+
+- name: Load parsed secrets into cluster vault
+ vault_load_parsed_secrets:
+ vault_policies: "{{ vault_policies }}"
+ parsed_secrets: "{{ parsed_secrets }}"
diff --git a/common/ansible/roles/vault_utils/tasks/push_secrets.yaml b/common/ansible/roles/vault_utils/tasks/push_secrets.yaml
new file mode 100644
index 00000000..7954dc47
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/push_secrets.yaml
@@ -0,0 +1,125 @@
+---
+- name: Vault status check
+ ansible.builtin.include_tasks: vault_status.yaml
+
+# Unfortunately we cannot loop vault_status and just check if the vault is unsealed
+# https://github.com/ansible/proposals/issues/136
+# So here we keep running the 'vault status' command until sealed is set to false
+- name: If the vault is still sealed we need to retry
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault status -format=json
+ register: vault_status_json
+ until: "'stdout' in vault_status_json and (not (vault_status_json.stdout | from_json)['sealed'] | bool)"
+ retries: 20
+ delay: 45
+ failed_when: "'stdout_lines' not in vault_status_json"
+
+# This step is not really needed when running make vault-init + load-secrets as
+# everything is sequential
+# It is needed when the vault is unsealed/configured inside the cluster and load-secrets
+# gets run *while* the cronjob configures the vault. I.e. it might be half configured and return
+# errors
+- name: Make sure that the vault auth policy exists
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command:
+ sh -c "vault list auth/{{ vault_hub }}/role | grep '{{ vault_hub }}-role'"
+ register: vault_role_cmd
+ until:
+ - vault_role_cmd.rc is defined
+ - vault_role_cmd.rc == 0
+ retries: 20
+ delay: 45
+ changed_when: false
+
+# Once V1 support is dropped we can remove the whole secret_template support
+- name: Set secret_template fact
+ no_log: true
+ ansible.builtin.set_fact:
+ secret_template: "{{ pattern_dir }}/values-secret.yaml.template"
+
+- name: Is a VALUES_SECRET env variable set?
+ ansible.builtin.set_fact:
+ custom_env_values_secret: "{{ lookup('ansible.builtin.env', 'VALUES_SECRET') }}"
+
+- name: Check if VALUES_SECRET file exists
+ ansible.builtin.stat:
+ path: "{{ custom_env_values_secret }}"
+ register: custom_file_values_secret
+ when: custom_env_values_secret | default('') | length > 0
+
+- name: Set values-secret yaml file to {{ custom_file_values_secret.stat.path }}
+ ansible.builtin.set_fact:
+ found_file: "{{ custom_file_values_secret.stat.path }}"
+ when:
+ - custom_env_values_secret | default('') | length > 0
+ - custom_file_values_secret.stat.exists
+
+# FIXME(bandini): Eventually around end of 2023(?) we should drop
+# ~/values-secret-{{ pattern_name }}.yaml and ~/values-secret.yaml
+- name: Find first existing values-secret yaml file
+ ansible.builtin.set_fact:
+ found_file: "{{ lookup('ansible.builtin.first_found', findme) }}"
+ vars:
+ findme:
+ - "~/.config/hybrid-cloud-patterns/values-secret-{{ pattern_name }}.yaml"
+ - "~/.config/validated-patterns/values-secret-{{ pattern_name }}.yaml"
+ - "~/values-secret-{{ pattern_name }}.yaml"
+ - "~/values-secret.yaml"
+ - "{{ pattern_dir }}/values-secret.yaml.template"
+ when: custom_env_values_secret | default('') | length == 0
+
+- name: Is found values secret file encrypted
+ no_log: true
+ ansible.builtin.shell: |
+ set -o pipefail
+ head -1 "{{ found_file }}" | grep -q \$ANSIBLE_VAULT
+ changed_when: false
+ register: encrypted
+ failed_when: (encrypted.rc not in [0, 1])
+
+# When HOME is set we replace it with '~' in this debug message
+# because when run from inside the container the HOME is /pattern-home
+# which is confusing for users
+- name: Is found values secret file encrypted
+ ansible.builtin.debug:
+ msg: "Using {{ (lookup('env', 'HOME') | length > 0) | ternary(found_file | regex_replace('^' + lookup('env', 'HOME'), '~'), found_file) }} to parse secrets"
+
+- name: Set encryption bool fact
+ no_log: true
+ ansible.builtin.set_fact:
+ is_encrypted: "{{ encrypted.rc == 0 | bool }}"
+
+- name: Get password for "{{ found_file }}"
+ ansible.builtin.pause:
+ prompt: "Input the password for {{ found_file }}"
+ echo: false
+ when: is_encrypted
+ register: vault_pass
+
+- name: Get decrypted content if {{ found_file }} was encrypted
+ no_log: true
+ ansible.builtin.shell:
+ ansible-vault view --vault-password-file <(cat <<<"{{ vault_pass.user_input }}") "{{ found_file }}"
+ register: values_secret_plaintext
+ when: is_encrypted
+ changed_when: false
+
+- name: Loads secrets file into the vault of a cluster
+ no_log: false
+ vault_load_secrets:
+ values_secrets: "{{ found_file }}"
+ check_missing_secrets: false
+ values_secret_template: "{{ secret_template }}"
+ when: not is_encrypted
+
+- name: Loads secrets file into the vault of a cluster
+ no_log: false
+ vault_load_secrets:
+ values_secrets_plaintext: "{{ values_secret_plaintext.stdout }}"
+ check_missing_secrets: false
+ values_secret_template: "{{ secret_template }}"
+ when: is_encrypted
diff --git a/common/ansible/roles/vault_utils/tasks/vault_init.yaml b/common/ansible/roles/vault_utils/tasks/vault_init.yaml
new file mode 100644
index 00000000..38e1e911
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/vault_init.yaml
@@ -0,0 +1,47 @@
+---
+- name: Vault status check
+ ansible.builtin.include_tasks: vault_status.yaml
+
+# If the vault is already initialized we skip all the tasks below
+- name: Is the vault initialized?
+ ansible.builtin.set_fact:
+ vault_initialized: "{{ vault_status['initialized'] | bool }}"
+
+# We need to retry here because the vault service might be starting
+# and can return a 500 internal server until its state is fully ready
+- name: Init vault operator
+ no_log: true
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault operator init -format=json
+ register: vault_init_json_out
+ until: vault_init_json_out is not failed
+ retries: 10
+ delay: 15
+ when: not vault_initialized
+
+- name: Set vault init output json fact
+ no_log: true
+ ansible.builtin.set_fact:
+ vault_init_json: "{{ vault_init_json_out.stdout | from_json }}"
+ when: not vault_initialized
+
+# We store the the operator unseal keys and root token to a secret inside
+# the cluster when the vault was not already initialized *and* when
+# unseal_from_cluster is set to true
+- name: Save vault operator output (into a secret inside the cluster)
+ no_log: true
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Secret
+ type: Opaque
+ metadata:
+ name: "{{ unseal_secret }}"
+ namespace: "{{ unseal_namespace }}"
+ data:
+ vault_data_json: "{{ vault_init_json | to_nice_json | b64encode }}"
+ when:
+ - not vault_initialized
diff --git a/common/ansible/roles/vault_utils/tasks/vault_secrets_init.yaml b/common/ansible/roles/vault_utils/tasks/vault_secrets_init.yaml
new file mode 100644
index 00000000..35327d58
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/vault_secrets_init.yaml
@@ -0,0 +1,96 @@
+---
+- name: Is secrets backend already enabled
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ bash -e -c "vault secrets list | grep -e '^{{ vault_base_path }}'"
+ register: secrets_enabled
+ failed_when: false
+
+- name: Create secrets backend kv-v2
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault secrets enable -path="{{ vault_base_path }}" kv-v2
+ when: secrets_enabled.rc != 0
+
+- name: Is kubernetes backend already enabled
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ bash -e -c "vault auth list | grep -e '^{{ vault_hub }}'"
+ register: kubernetes_enabled
+ failed_when: false
+
+- name: Enable kubernetes backend on hub
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: "vault auth enable -path={{ vault_hub }} kubernetes"
+ when: kubernetes_enabled.rc != 0
+
+- name: Get token from service account secret {{ external_secrets_ns }}/{{ external_secrets_secret }}
+ no_log: true
+ kubernetes.core.k8s_info:
+ kind: Secret
+ namespace: "{{ external_secrets_ns }}"
+ name: "{{ external_secrets_secret }}"
+ api_version: v1
+ register: token_data
+ failed_when: token_data.resources | length == 0
+
+- name: Set sa_token fact
+ no_log: true
+ ansible.builtin.set_fact:
+ sa_token: "{{ token_data.resources[0].data.token | b64decode }}"
+
+- name: Configure hub kubernetes backend
+ no_log: true
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: bash -e -c "vault write auth/{{ vault_hub }}/config token_reviewer_jwt={{ sa_token }}
+ kubernetes_host={{ vault_hub_kubernetes_host }}
+ kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ issuer=https://kubernetes.default.svc"
+
+# This creates a {{ vault_global_policy }} policy that is applied to both hubs and spokes
+- name: Configure VP global policy template
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ bash -e -c "echo \"path \\\"secret/data/{{ vault_global_policy }}/*\\\" {
+ capabilities = {{ vault_global_capabilities }} }\" > /tmp/policy-{{ vault_global_policy }}.hcl"
+
+- name: Configure VP global policy
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: "vault policy write {{ vault_global_policy }}-secret /tmp/policy-{{ vault_global_policy }}.hcl"
+
+- name: Configure policy template for hub
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ bash -e -c "echo \"path \\\"secret/data/{{ vault_hub }}/*\\\" {
+ capabilities = {{ vault_hub_capabilities }} }\" > /tmp/policy-{{ vault_hub }}.hcl"
+
+- name: Configure policy for hub
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: "vault policy write {{ vault_hub }}-secret /tmp/policy-{{ vault_hub }}.hcl"
+
+- name: Configure kubernetes role for hub
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ vault write auth/"{{ vault_hub }}"/role/"{{ vault_hub }}"-role
+ bound_service_account_names="{{ external_secrets_sa }}"
+ bound_service_account_namespaces="{{ external_secrets_ns }}"
+ policies="default,{{ vault_global_policy }}-secret,{{ vault_hub }}-secret" ttl="{{ vault_hub_ttl }}"
diff --git a/common/ansible/roles/vault_utils/tasks/vault_spokes_init.yaml b/common/ansible/roles/vault_utils/tasks/vault_spokes_init.yaml
new file mode 100644
index 00000000..e930252a
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/vault_spokes_init.yaml
@@ -0,0 +1,190 @@
+---
+- name: Find managed clusters
+ kubernetes.core.k8s_info:
+ kind: ManagedCluster
+ api_version: "cluster.open-cluster-management.io/v1"
+ register: managed_clusters
+
+- name: Set resource fact
+ ansible.builtin.set_fact:
+ resources: "{{ managed_clusters['resources'] }}"
+
+- name: Do nothing when no managed clusters are found
+ ansible.builtin.meta: end_play
+ when: resources | length == 0 or managed_clusters.failed or not managed_clusters.api_found
+
+- name: Loop over returned ACM managedclusters
+ ansible.builtin.set_fact:
+ clusters: "{{ clusters | default({}) | combine({item.metadata.name: {'caBundle': item.spec.managedClusterClientConfigs[0].caBundle | b64decode}}) }}"
+ loop: "{{ resources }}"
+ when: item.spec.managedClusterClientConfigs[0].caBundle is defined
+ loop_control:
+ label: "{{ item.metadata.name }}"
+
+- name: Extract ClusterGroup
+ ansible.builtin.set_fact:
+ clusters: "{{ clusters | default({}) | combine({item.metadata.name: {'clusterGroup': item.metadata.labels.clusterGroup}}, recursive=True) }}"
+ when: "'clusterGroup' in item.metadata.labels"
+ loop: "{{ resources }}"
+ loop_control:
+ label: "{{ item.metadata.name }}"
+
+- name: Fetch all ACM secrets
+ kubernetes.core.k8s_info:
+ kind: Secret
+ label_selectors:
+ - "apps.open-cluster-management.io/secret-type=acm-cluster"
+ register: acm_secrets
+
+- name: Set cleaned_acm_secrets fect
+ ansible.builtin.set_fact:
+ cleaned_acm_secrets: "{{ acm_secrets.resources | parse_acm_secrets }}"
+
+- name: Merge the two dicts together
+ ansible.builtin.set_fact:
+ clusters_info: "{{ clusters | default({}) | combine(cleaned_acm_secrets, recursive=True) }}"
+
+- name: Write out CAs
+ ansible.builtin.copy:
+ content: "{{ item.value['caBundle'] }}"
+ dest: "/tmp/{{ item.key }}.ca"
+ mode: "0640"
+ loop: "{{ clusters_info | dict2items }}"
+ when: item.value['caBundle'] is defined
+ loop_control:
+ label: "{{ item.key }}"
+
+# FIXME(bandini): validate_certs is false due to an ACM bug when using
+# letsencrypt certificates with API endpoints: https://issues.redhat.com/browse/ACM-4398
+# We always verify the CA chain except when letsencrypt.api_endpoint is set to true
+- name: If we are using letsencrypt on the API endpoints we cannot use the validate_certs later
+ ansible.builtin.set_fact:
+ validate_certs_api_endpoint: "{{ not letsencrypt.api_endpoint | default(True) | bool }}"
+
+- name: Fetch remote ansible to remote cluster
+ kubernetes.core.k8s_info:
+ api_key: "{{ item.value['bearerToken'] }}"
+ ca_cert: /tmp/{{ item.key }}.ca
+ host: "{{ item.value['server_api'] }}"
+ kind: Secret
+ namespace: "{{ external_secrets_ns }}"
+ name: "{{ external_secrets_secret }}"
+ api_version: v1
+ validate_certs: "{{ validate_certs_api_endpoint }}"
+ register: remote_external_secrets_sa
+ when:
+ - clusters_info[item.key]['bearerToken'] is defined
+ - clusters_info[item.key]['server_api'] is defined
+ - clusters_info[item.key]['caBundle'] is defined
+ loop: "{{ clusters_info | dict2items }}"
+ loop_control:
+ label: "{{ item.key }}"
+
+# 'token' will be empty if the remote cluster has no golang-external-secret
+# app configured and running
+- name: Loop over returned ESO tokens
+ ansible.builtin.set_fact:
+ clusters_info: "{{ clusters_info | default({}) | combine({item['item']['key']: {'esoToken': item['resources'][0]['data']['token'] | b64decode}}, recursive=True) }}"
+ loop: "{{ remote_external_secrets_sa.results }}"
+ when: item['resources'][0]['data']['token'] is defined
+ loop_control:
+ label: "{{ item['item']['key'] }}"
+
+# At this point clusters_info contains a per cluster hash table with *all* the right attributes. For example:
+# "mcg-one": {
+# "bearerToken": "ey...",
+# "caBundle": "-----BEGIN CERTIFICATE-----\nMIIDMjCCA",
+# "clusterGroup": "group-one",
+# "cluster_fqdn": "mcg-one.blueprints.rhecoeng.com",
+# "vault_path": "hub" (when the hub) and the cluster_fqdn when not hub,
+# "esoToken": (optional) only if there was an external golang-external-secrets namespace+service account
+# "name": "mcg-one",
+# "server_api": "https://api.mcg-one.blueprints.rhecoeng.com:6443",
+# "tlsClientConfig": {
+# "insecure": true
+# }
+# }
+- name: Dump CABundles into the vault
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: bash -e -c "echo '{{ item.value['caBundle'] }}' > /tmp/{{ item.value['vault_path'] }}.ca"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
+
+- name: Is kubernetes backend already enabled
+ no_log: true
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: bash -e -c "if vault auth list | grep -e ^'{{ item.value['vault_path'] }}'; then
+ echo done; else
+ vault auth enable -path='{{ item.value['vault_path'] }}' kubernetes; fi"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
+
+- name: Configure kubernetes backend
+ no_log: true
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: bash -e -c "vault write auth/{{ item.value['vault_path'] }}/config
+ token_reviewer_jwt=\"{{ item.value['esoToken'] }}\"
+ kubernetes_host=\"{{ item.value['server_api'] }}\"
+ kubernetes_ca_cert=@/tmp/{{ item.value['vault_path'] }}.ca"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
+
+- name: Configure policy template
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ bash -e -c "echo \"path \\\"secret/data/{{ item.value['vault_path'] }}/*\\\" {
+ capabilities = {{ vault_spoke_capabilities }} }\" > /tmp/policy-{{ item.value['vault_path'] }}.hcl"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
+
+- name: Configure policy for spokes
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: "vault policy write {{ item.value['vault_path'] }}-secret /tmp/policy-{{ item.value['vault_path'] }}.hcl"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
+
+- name: Configure kubernetes role for spokes
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: >
+ vault write auth/"{{ item.value['vault_path'] }}"/role/"{{ item.value['vault_path'] }}"-role
+ bound_service_account_names="{{ external_secrets_sa }}"
+ bound_service_account_namespaces="{{ external_secrets_ns }}"
+ policies="default,{{ vault_global_policy }}-secret,{{ item.value['vault_path'] }}-secret" ttl="{{ vault_spoke_ttl }}"
+ loop: "{{ clusters_info | dict2items }}"
+ when:
+ - item.value['esoToken'] is defined
+ - item.key != "local-cluster"
+ loop_control:
+ label: "{{ item.key }}"
diff --git a/common/ansible/roles/vault_utils/tasks/vault_status.yaml b/common/ansible/roles/vault_utils/tasks/vault_status.yaml
new file mode 100644
index 00000000..9dc3e426
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/vault_status.yaml
@@ -0,0 +1,61 @@
+---
+# Registers a variable valled vault_status containing the vault's status json dict
+- name: Check for vault namespace
+ kubernetes.core.k8s_info:
+ kind: Namespace
+ name: "{{ vault_ns }}"
+ register: vault_ns_rc
+ until: vault_ns_rc.resources | length > 0
+ retries: 20
+ delay: 45
+
+- name: Check if the vault pod is present
+ kubernetes.core.k8s_info:
+ kind: Pod
+ namespace: "{{ vault_ns }}"
+ name: "{{ vault_pod }}"
+ register: vault_pod_rc
+ until: vault_pod_rc.resources | length > 0
+ retries: 20
+ delay: 45
+
+# This needs retrying because during startup we can just get
+# Failed to execute on pod vault-0 due to : (0)\nReason: Handshake status 500 Internal Server Error
+# In the above case there is no 'rc' in vault_status. So first we wait for 'rc' to show up and ignore
+# any errors, and then we bail out if rc is 2 as it means the vault is already initialized
+- name: Check for the vault status
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault status -format=json
+ register: vault_status_json
+ until: "'rc' in vault_status_json"
+ retries: 20
+ delay: 45
+ failed_when: "'stdout_lines' not in vault_status_json"
+
+- name: Set vault status output json fact
+ ansible.builtin.set_fact:
+ vault_status: "{{ vault_status_json.stdout | from_json }}"
+ when: vault_status_json.stdout_lines | length > 0
+
+- name: List Vault pods
+ kubernetes.core.k8s_info:
+ namespace: "{{ vault_ns }}"
+ kind: Pod
+ label_selectors:
+ - "component = server"
+ register: vault_pods_list
+
+- name: "Get pods"
+ ansible.builtin.set_fact:
+ vault_pods: "{{ vault_pods + [item.metadata.name] }}"
+ loop: "{{ vault_pods_list.resources }}"
+ loop_control:
+ label: "{{ item.metadata.name }}"
+ vars:
+ vault_pods: []
+
+- name: "Followers"
+ ansible.builtin.set_fact:
+ followers: "{{ vault_pods | difference(vault_pod) }}"
diff --git a/common/ansible/roles/vault_utils/tasks/vault_unseal.yaml b/common/ansible/roles/vault_utils/tasks/vault_unseal.yaml
new file mode 100644
index 00000000..43232ac7
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tasks/vault_unseal.yaml
@@ -0,0 +1,88 @@
+---
+- name: Vault status check
+ ansible.builtin.include_tasks: vault_status.yaml
+
+# If the vault is already unsealed we skip all the tasks below
+- name: Is the vault sealed?
+ ansible.builtin.set_fact:
+ vault_sealed: "{{ vault_status['sealed'] | bool }}"
+
+# We reparse the json vault init secret in case unseal was called without operator init before
+- name: Parse vaultkeys
+ kubernetes.core.k8s_info:
+ kind: Secret
+ namespace: "{{ unseal_namespace }}"
+ name: "{{ unseal_secret }}"
+ api_version: v1
+ register: vault_init_data
+ when: vault_sealed
+
+- name: Does the vaultkeys secret exist?
+ ansible.builtin.set_fact:
+ vaultkeys_exists: "{{ vault_init_data.resources | length > 0 }}"
+ when: vault_sealed
+
+- name: Vaultkeys does not exist and the vault is sealed, so exit
+ ansible.builtin.meta: end_play
+ when:
+ - vault_sealed
+ - not vaultkeys_exists
+
+- name: Set vault init json
+ ansible.builtin.set_fact:
+ vault_init_json: "{{ vault_init_data.resources[0].data.vault_data_json | b64decode | from_json }}"
+ when: vault_sealed
+
+- name: Set root token and unseal_keys
+ ansible.builtin.set_fact:
+ root_token: "{{ vault_init_json['root_token'] }}"
+ unseal_keys: "{{ vault_init_json['unseal_keys_hex'] }}"
+ when: vault_sealed
+
+- name: Unseal leader
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault operator unseal "{{ item }}"
+ loop: "{{ unseal_keys }}"
+ loop_control:
+ extended: true
+ label: "Unsealing with key {{ ansible_loop.index }}"
+ when: vault_sealed
+
+- name: Join Raft cluster
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ item }}"
+ command: vault operator raft join http://{{ vault_pod }}.{{ vault_ns }}-internal:8200
+ register: join_raft_cluster_out
+ until: join_raft_cluster_out is not failed
+ retries: 10
+ delay: 15
+ loop: "{{ followers }}"
+ loop_control:
+ extended: true
+ label: "Joining Raft Cluster on http://{{ vault_pod }}.{{ vault_ns }}-internal:8200"
+ when:
+ - vault_sealed
+ - followers | length > 0
+
+- name: Unseal followers
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ item.0 }}"
+ command: vault operator unseal "{{ item.1 }}"
+ loop: "{{ followers | product(unseal_keys) | list }}"
+ loop_control:
+ extended: true
+ label: "Unsealing {{ item.0 }} with key {{ ansible_loop.index }}"
+ when:
+ - vault_sealed
+ - followers | length > 0
+
+- name: Login into vault
+ kubernetes.core.k8s_exec:
+ namespace: "{{ vault_ns }}"
+ pod: "{{ vault_pod }}"
+ command: vault login "{{ root_token }}"
+ when: vault_sealed
diff --git a/common/ansible/roles/vault_utils/tests/inventory b/common/ansible/roles/vault_utils/tests/inventory
new file mode 100644
index 00000000..878877b0
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/common/ansible/roles/vault_utils/tests/test.yml b/common/ansible/roles/vault_utils/tests/test.yml
new file mode 100644
index 00000000..b4da5c68
--- /dev/null
+++ b/common/ansible/roles/vault_utils/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Test Play
+ hosts: localhost
+ remote_user: root
+ roles:
+ - vault_utils
diff --git a/common/ansible/roles/vault_utils/values-secrets.v1.schema.json b/common/ansible/roles/vault_utils/values-secrets.v1.schema.json
new file mode 100644
index 00000000..3cb8c530
--- /dev/null
+++ b/common/ansible/roles/vault_utils/values-secrets.v1.schema.json
@@ -0,0 +1,38 @@
+{
+ "$schema": "http://json-schema.org/draft-06/schema#",
+ "$ref": "#/definitions/valuesSecretsV1",
+ "meta:license": [
+ "Copyright 2022 Red Hat, Inc. All rights reserved.",
+ "This file is licensed to you under the Apache License, Version 2.0 (the 'License');",
+ "you may not use this file except in compliance with the License. You may obtain a copy",
+ "of the License at http://www.apache.org/licenses/LICENSE-2.0"
+ ],
+ "title": "Hybrid Cloud Patterns - values-secret.yaml files schema V1",
+ "description": "This schema defines the values-secret.yaml file as used by [Validated Patterns](https://hybrid-cloud-patterns.io)",
+ "type": "object",
+ "examples": [],
+ "definitions": {
+ "valuesSecretsV1": {
+ "title": "Values Secrets V1 Format",
+ "type": "object",
+ "additionalProperties": true,
+ "properties": {
+ "version": {
+ "type": [ "string", "null" ],
+ "description": "Version of the secret specification",
+ "default": "1.0"
+ }
+ },
+ "patternProperties": {
+ "secrets[a-z0-9.]*$": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "files[a-z0-9.]*$": {
+ "type": "object",
+ "additionalProperties": true
+ }
+ }
+ }
+ }
+}
diff --git a/common/ansible/roles/vault_utils/values-secrets.v2.schema.json b/common/ansible/roles/vault_utils/values-secrets.v2.schema.json
new file mode 100644
index 00000000..c8b5c020
--- /dev/null
+++ b/common/ansible/roles/vault_utils/values-secrets.v2.schema.json
@@ -0,0 +1,335 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$ref": "#/definitions/valuesSecretsV2",
+ "meta:license": [
+ "Copyright 2022 Red Hat, Inc. All rights reserved.",
+ "This file is licensed to you under the Apache License, Version 2.0 (the 'License');",
+ "you may not use this file except in compliance with the License. You may obtain a copy",
+ "of the License at http://www.apache.org/licenses/LICENSE-2.0"
+ ],
+ "title": "Hybrid Cloud Patterns - values-secret.yaml files schema V2",
+ "description": "This schema defines the values-secret.yaml file as used by [Validated Patterns](https://hybrid-cloud-patterns.io)",
+ "type": "object",
+ "examples": [
+ {
+ "version": "2.0",
+ "backingStore": "vault",
+ "vaultPolicies": {
+ "basicPolicy": "length=10\nrule \"charset\" { charset = \"abcdefghijklmnopqrstuvwxyz\" min-chars = 1 }\nrule \"charset\" { charset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" min-chars = 1 }\nrule \"charset\" { charset = \"0123456789\" min-chars = 1 }\n",
+ "advancedPolicy": "length=20\nrule \"charset\" { charset = \"abcdefghijklmnopqrstuvwxyz\" min-chars = 1 }\nrule \"charset\" { charset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" min-chars = 1 }\nrule \"charset\" { charset = \"0123456789\" min-chars = 1 }\nrule \"charset\" { charset = \"!@#$%^&*\" min-chars = 1 }\n"
+ },
+ "secrets": [
+ {
+ "name": "config-demo",
+ "vaultMount": "secret",
+ "vaultPrefixes": [
+ "region-one",
+ "snowflake.blueprints.rhecoeng.com"
+ ],
+ "fields": [
+ {
+ "name": "secret",
+ "onMissingValue": "generate",
+ "override": true,
+ "vaultPolicy": "basicPolicy"
+ },
+ {
+ "name": "secretprompt",
+ "value": null,
+ "onMissingValue": "prompt",
+ "prompt": "Please specify the password for application ABC"
+ },
+ {
+ "name": "secretprompt2",
+ "value": "defaultvalue",
+ "onMissingValue": "prompt",
+ "prompt": "Please specify the API key for XYZ"
+ },
+ {
+ "name": "secretfile",
+ "path": "/tmp/ca.crt",
+ "onMissingValue": "prompt",
+ "prompt": "Insert path to Certificate Authority"
+ },
+ {
+ "name": "ca_crt",
+ "path": "/tmp/ca.crt",
+ "onMissingValue": "error"
+ },
+ {
+ "name": "ca_crt_b64",
+ "path": "/tmp/ca.crt",
+ "base64": true,
+ "onMissingValue": "prompt"
+ }
+ ]
+ },
+ {
+ "name": "config-demo2",
+ "vaultPrefixes": [
+ "region-one",
+ "snowflake.blueprints.rhecoeng.com"
+ ],
+ "fields": [
+ {
+ "name": "ca_crt2",
+ "path": null,
+ "onMissingValue": "prompt"
+ },
+ {
+ "name": "ca_crt",
+ "path": "/tmp/ca.crt",
+ "onMissingValue": "error"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "definitions": {
+ "valuesSecretsV2": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "version": {
+ "type": [ "string", "null" ],
+ "description": "Version of the secret specification",
+ "default": "1.0"
+ },
+ "backingStore": {
+ "type": "string",
+ "description": "Secrets backing store type",
+ "default": "vault"
+ },
+ "vaultPolicies": {
+ "$ref": "#/definitions/VaultPolicies",
+ "description": "A dictionary of {name}:{policy} of custom vault password policies"
+ },
+ "secretStoreNamespace": {
+ "type": "string",
+ "description": "Namespace to store secrets in for kubernetes loader",
+ "default": "validated-patterns-secrets"
+ },
+ "defaultLabels": {
+ "type": "object",
+ "description": "Default labels to add to secret objects for kubernetes loader"
+ },
+ "defaultAnnotations": {
+ "type": "object",
+ "description": "Default labels to add to secret objects for kubernetes loader"
+ },
+ "secrets": {
+ "$ref": "#/definitions/Secrets",
+ "description": "The list of actual secrets to be uploaded in the vault"
+ }
+ },
+ "required": [
+ "secrets"
+ ],
+ "title": "Values Secrets V2 Format"
+ },
+ "VaultPolicies": {
+ "type": "object",
+ "description": "A dictionary of {name}:{policy} of custom vault password policies",
+ "items": {
+ "$ref": "#/definitions/VaultPolicy"
+ },
+ "examples": [
+ {
+ "vaultPolicies": {
+ "basicPolicy": "length=10\nrule \"charset\" { charset = \"abcdefghijklmnopqrstuvwxyz\" min-chars = 1 }\nrule \"charset\" { charset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" min-chars = 1 }\nrule \"charset\" { charset = \"0123456789\" min-chars = 1 }\n",
+ "advancedPolicy": "length=20\nrule \"charset\" { charset = \"abcdefghijklmnopqrstuvwxyz\" min-chars = 1 }\nrule \"charset\" { charset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" min-chars = 1 }\nrule \"charset\" { charset = \"0123456789\" min-chars = 1 }\nrule \"charset\" { charset = \"!@#$%^&*\" min-chars = 1 }\n"
+ }
+ }
+ ]
+ },
+ "VaultPolicy": {
+ "type": "string",
+ "description": "A password policy to be created in the vault. See https://developer.hashicorp.com/vault/docs/concepts/password-policies"
+ },
+ "Secrets": {
+ "type": "array",
+ "description": "The list of secrets to be injected into the vault",
+ "items": {
+ "$ref": "#/definitions/Secret"
+ }
+ },
+ "Secret": {
+ "type": "object",
+ "description": "The single secret to be injected into the vault",
+ "additionalProperties": false,
+ "required": [ "name", "fields" ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the top level key that will be created at the vaultMount point and that will contain one secret per field inside its attributes"
+ },
+ "vaultMount": {
+ "type": "string",
+ "description": "This is the vault -mount=<...> mount point used in vault commands",
+ "default": "secret"
+ },
+ "vaultPrefixes": {
+ "type": "array",
+ "description": "This is the list of prefixes the secret will be uploaded to. It defaults to ['hub'] when not specified",
+ "items": {
+ "type": "string",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "default": [ "hub" ]
+ },
+ "targetNamespaces": {
+ "type": "array",
+ "description": "The namespace(s) that the secret will be injected into, ignored by configs using ESO",
+ "items": {
+ "type": "string",
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "annotations": {
+ "type": "object",
+ "description": "Annotations to add to the kubernetes secret object, which override defaults"
+ },
+ "labels": {
+ "type": "object",
+ "description": "Labels to add to the kubernetes secret object, which override defaults"
+ },
+ "fields": {
+ "type": "array",
+ "description": "This is the list of actual secret material that will be placed in a vault key's attributes",
+ "items": {
+ "type": "object",
+ "$ref": "#/definitions/Field",
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ }
+ }
+ },
+ "Field": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the attribute inside vault"
+ },
+ "onMissingValue": {
+ "type": "string",
+ "default": "error",
+ "description": "'error' will generate an error if the secret (via value or via path attributes) are not defined. 'generate' will create a secret using a defined vaultPolicy. 'prompt' will ask the user for input and it requires to set a value or a path depending if the user should input a secret or a path to a secret file. Non-null entries represent the default value when prompted.",
+ "enum": [
+ "error",
+ "generate",
+ "prompt"
+ ]
+ },
+ "prompt": {
+ "type": "string",
+ "description": "Represents the prompt used when onMissingValue is set to prompt"
+ },
+ "value": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Is the value of a secret. Represents the default value when onMissingValue is set to prompt"
+ },
+ "path": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Is the path to a secret file. Represents the default path when onMissingValue is set to prompt"
+ },
+ "ini_file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Is the path to an ini_file containing secret material"
+ },
+ "ini_section": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Is the section in an ini file where a user-defined key will be looked up",
+ "default": "default"
+ },
+ "ini_key": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Is the key inside a section in an inifile whose value will be used"
+ },
+ "vaultPolicy": {
+ "type": "string",
+ "description": "When onMissingValue is set to 'generate', uses this policy to create the secret inside the vault directly"
+ },
+ "base64": {
+ "type": "boolean",
+ "description": "Before uploading the secret the content is base-64 encoded. It is recommended to set this to true when dealing with files",
+ "default": "false"
+ },
+ "override": {
+ "type": "boolean",
+ "description": "When onMissingValue is set to 'generate' and the secret already exists in the vault update it",
+ "default": "false"
+ }
+ },
+ "dependentRequired": {
+ "ini_file": ["ini_key"]
+ },
+ "allOf": [
+ {
+ "if": {
+ "properties": { "onMissingValue": { "enum": ["prompt"] } }
+ },
+ "then": {
+ "oneOf": [
+ {
+ "required": [ "path" ]
+ },
+ {
+ "required": [ "value" ]
+ }
+ ]
+ }
+ },
+ {
+ "if": {
+ "properties": { "onMissingValue": { "enum": ["generate"] } }
+ },
+ "then": {
+ "required": [ "vaultPolicy" ]
+ }
+ },
+ {
+ "if": {
+ "properties": { "onMissingValue": { "enum": ["error"] } }
+ },
+ "then": {
+ "oneOf": [
+ {
+ "required": [ "path" ]
+ },
+ {
+ "required": [ "ini_file" ]
+ },
+ {
+ "required": [ "value" ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+}
diff --git a/common/ansible/roles/vault_utils/vars/main.yml b/common/ansible/roles/vault_utils/vars/main.yml
new file mode 100644
index 00000000..f6e02b93
--- /dev/null
+++ b/common/ansible/roles/vault_utils/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for vault_utils
diff --git a/common/ansible/tests/unit/test_ini_file.py b/common/ansible/tests/unit/test_ini_file.py
new file mode 100644
index 00000000..e92280cb
--- /dev/null
+++ b/common/ansible/tests/unit/test_ini_file.py
@@ -0,0 +1,56 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple module to test ini parsing function
+"""
+
+import os
+import sys
+import unittest
+
+# TODO(bandini): I could not come up with something better to force the imports to be existing
+# when we 'import vault_load_secrets'
+sys.path.insert(1, "./ansible/plugins/module_utils")
+sys.path.insert(1, "./ansible/plugins/modules")
+import load_secrets_common # noqa: E402
+
+
+class TestMyModule(unittest.TestCase):
+ def setUp(self):
+ self.testdir_v2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "v2")
+
+ def test_ensure_ini_file_parsed_correctly(self):
+ f = os.path.join(self.testdir_v2, "aws-example.ini")
+ key_id = load_secrets_common.get_ini_value(f, "default", "aws_access_key_id")
+ access_key = load_secrets_common.get_ini_value(
+ f, "default", "aws_secret_access_key"
+ )
+ self.assertEqual(key_id, "A123456789012345678A")
+ self.assertEqual(access_key, "A12345678901234567890123456789012345678A")
+
+ def test_ensure_ini_file_missing_value_is_none(self):
+ f = os.path.join(self.testdir_v2, "aws-example.ini")
+ missing_id = load_secrets_common.get_ini_value(f, "default", "nonexisting")
+ self.assertEqual(missing_id, None)
+
+ def test_ensure_ini_file_missing_section_is_none(self):
+ f = os.path.join(self.testdir_v2, "aws-example.ini")
+ missing_id = load_secrets_common.get_ini_value(f, "nonexisting", "nonexisting")
+ self.assertEqual(missing_id, None)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/common/ansible/tests/unit/test_parse_secrets.py b/common/ansible/tests/unit/test_parse_secrets.py
new file mode 100644
index 00000000..0cfef1b6
--- /dev/null
+++ b/common/ansible/tests/unit/test_parse_secrets.py
@@ -0,0 +1,981 @@
+# Copyright 2022, 2023 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple module to test parse_secret_info
+"""
+
+import base64
+import configparser
+import json
+import os
+import sys
+import unittest
+from unittest import mock
+from unittest.mock import patch
+
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+from test_util_datastructures import (
+ DEFAULT_KUBERNETES_METADATA,
+ DEFAULT_KUBERNETES_SECRET_OBJECT,
+ DEFAULT_PARSED_SECRET_VALUE,
+ DEFAULT_VAULT_POLICIES,
+)
+
+# from unittest.mock import call, patch
+
+# TODO(bandini): I could not come up with something better to force the imports to be existing
+# when we "import parse_secrets_info"
+sys.path.insert(1, "./ansible/plugins/module_utils")
+sys.path.insert(1, "./ansible/plugins/modules")
+
+import load_secrets_common # noqa: E402
+
+sys.modules["ansible.module_utils.load_secrets_common"] = load_secrets_common
+
+import parse_secrets_v2 # noqa: E402
+
+sys.modules["ansible.module_utils.parse_secrets_v2"] = parse_secrets_v2
+
+import parse_secrets_info # noqa: E402
+
+sys.modules["ansible.modules.parse_secrets_info"] = parse_secrets_info
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class BytesEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, bytes):
+ return base64.b64encode(o).decode("ascii")
+ else:
+ return super().default(o)
+
+
+def json_str(a):
+ return json.dumps(a, sort_keys=True, cls=BytesEncoder)
+
+
+def ds_eq(a, b):
+ """
+ This function takes two arbitrary data structures, sorts their keys, stringifies them into JSON
+ and compares them. The idea here is to test data structure difference without having to write
+ an involved recursive data structure parser. If the function returns true, the two data
+ structures are equal.
+ """
+ print("a=" + json_str(a))
+ print("b=" + json_str(b))
+ return json_str(a) == json_str(b)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs["failed"] = True
+ kwargs["args"] = args
+ raise AnsibleFailJson(kwargs)
+
+
+@mock.patch("getpass.getpass")
+class TestMyModule(unittest.TestCase):
+ def create_inifile(self):
+ self.inifile = open("/tmp/awscredentials", "w")
+ config = configparser.ConfigParser()
+ config["default"] = {
+ "aws_access_key_id": "123123",
+ "aws_secret_access_key": "abcdefghi",
+ }
+ config["foobar"] = {
+ "aws_access_key_id": "345345",
+ "aws_secret_access_key": "rstuvwxyz",
+ }
+ with self.inifile as configfile:
+ config.write(configfile)
+
+ def create_testbinfile(self):
+ with open(self.binfilename, "wb") as f:
+ f.write(bytes([8, 6, 7, 5, 3, 0, 9]))
+ f.close()
+
+ def setUp(self):
+ self.binfilename = "/tmp/testbinfile.bin"
+ self.mock_module_helper = patch.multiple(
+ basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json
+ )
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.testdir_v2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "v2")
+ self.testfile = open("/tmp/ca.crt", "w")
+ self.create_inifile()
+ self.create_testbinfile()
+ # For ~/expanduser tests
+ self.orig_home = os.environ["HOME"]
+ os.environ["HOME"] = self.testdir_v2
+
+ def tearDown(self):
+ os.environ["HOME"] = self.orig_home
+ self.testfile.close()
+ try:
+ os.remove("/tmp/ca.crt")
+ os.remove(self.binfilename)
+ # os.remove("/tmp/awscredentials")
+ except OSError:
+ pass
+
+ def get_file_as_stdout(self, filename, openmode="r"):
+ with open(filename, mode=openmode, encoding="utf-8") as f:
+ return f.read()
+
+ def test_module_fail_when_required_args_missing(self, getpass):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ parse_secrets_info.main()
+
+ def test_module_parse_base(self, getpass):
+ getpass.return_value = "/tmp/ca.crt"
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ (ret["failed"] is False)
+ and (ret["changed"] is False)
+ and (len(ret["parsed_secrets"])) == 1
+ and (len(ret["kubernetes_secret_objects"]) == 0)
+ )
+
+ def test_module_parse_base_parsed_secrets(self, getpass):
+ getpass.return_value = "/tmp/ca.crt"
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ vp = DEFAULT_VAULT_POLICIES | {
+ "basicPolicy": 'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n', # noqa: E501
+ "advancedPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n', # noqa: E501
+ }
+
+ # Beware reading this structure aloud to your cat...
+ pspsps = {
+ "config-demo": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "config-demo",
+ "fields": {
+ "secret": None,
+ "secret2": "/tmp/ca.crt",
+ "ca_crt": "",
+ "ca_crt2": "",
+ },
+ "base64": ["ca_crt2"],
+ "generate": ["secret"],
+ "override": ["secret"],
+ "vault_policies": {
+ "secret": "basicPolicy",
+ },
+ "vault_prefixes": [
+ "region-one",
+ "snowflake.blueprints.rhecoeng.com",
+ ],
+ "paths": {
+ "ca_crt": "/tmp/ca.crt",
+ "ca_crt2": "/tmp/ca.crt",
+ },
+ },
+ }
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ (ret["failed"] is False)
+ and (ret["changed"] is False)
+ and (ds_eq(vp, ret["vault_policies"]))
+ and (ds_eq(pspsps, ret["parsed_secrets"]))
+ )
+
+ def test_module_parsed_secret_ini_files(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-ini-file.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ ps = {
+ "aws": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "aws",
+ "fields": {
+ "aws_access_key_id": "123123",
+ "aws_secret_access_key": "abcdefghi",
+ },
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": "/tmp/awscredentials",
+ "ini_section": "default",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": "/tmp/awscredentials",
+ "ini_section": "default",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ "awsfoobar": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "awsfoobar",
+ "fields": {
+ "aws_access_key_id": "345345",
+ "aws_secret_access_key": "rstuvwxyz",
+ },
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": "/tmp/awscredentials",
+ "ini_section": "foobar",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": "/tmp/awscredentials",
+ "ini_section": "foobar",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ }
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ (ret["failed"] is False)
+ and (ret["changed"] is False)
+ and (len(ret["parsed_secrets"]) == 2)
+ and (ds_eq(ps, ret["parsed_secrets"]))
+ )
+
+ def test_module_parsed_secret_ini_files_base64(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-ini-file-b64.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ ps = {
+ "aws": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "aws",
+ "fields": {
+ "aws_access_key_id": "A123456789012345678A",
+ "aws_secret_access_key": "A12345678901234567890123456789012345678A",
+ },
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ "awsb64": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "awsb64",
+ "fields": {
+ "aws_access_key_id": "QTEyMzQ1Njc4OTAxMjM0NTY3OEE=",
+ "aws_secret_access_key": "QTEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4QQ==",
+ },
+ "base64": [
+ "aws_access_key_id",
+ "aws_secret_access_key",
+ ],
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ }
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ (ret["failed"] is False)
+ and (ret["changed"] is False)
+ and (len(ret["parsed_secrets"]) == 2)
+ and (len(ret["kubernetes_secret_objects"]) == 0)
+ and (ds_eq(ps, ret["parsed_secrets"]))
+ )
+
+ def test_module_parsed_secret_ini_files_base64_kubernetes(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-ini-file-b64.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+
+ ps = {
+ "aws": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "aws",
+ "fields": {
+ "aws_access_key_id": "A123456789012345678A",
+ "aws_secret_access_key": "A12345678901234567890123456789012345678A",
+ },
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ "awsb64": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "name": "awsb64",
+ "fields": {
+ "aws_access_key_id": "QTEyMzQ1Njc4OTAxMjM0NTY3OEE=",
+ "aws_secret_access_key": "QTEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDEyMzQ1Njc4QQ==",
+ },
+ "base64": [
+ "aws_access_key_id",
+ "aws_secret_access_key",
+ ],
+ "ini_file": {
+ "aws_access_key_id": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_access_key_id",
+ },
+ "aws_secret_access_key": {
+ "ini_file": f"{os.environ['HOME']}/aws-example.ini",
+ "ini_section": "default",
+ "ini_key": "aws_secret_access_key",
+ },
+ },
+ },
+ }
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ (ret["failed"] is False)
+ and (ret["changed"] is False)
+ and (len(ret["parsed_secrets"]) == 2)
+ and (len(ret["kubernetes_secret_objects"]) == 2)
+ and (ds_eq(ps, ret["parsed_secrets"]))
+ )
+
+ def test_module_default_labels(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-default-labels.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = result.exception.args[0]
+ self.assertTrue(
+ ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ "labels": {"testlabel": "4"},
+ "namespace": "validated-patterns-secrets",
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_override_labels(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-override-labels.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ "labels": {"overridelabel": "42"},
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_override_namespace(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-override-namespace.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ "namespace": "overridden-namespace",
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_none_extra_namespaces(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-more-namespaces.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "none",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 2
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ "namespace": "default",
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ and ds_eq(
+ ret["kubernetes_secret_objects"][1],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ "namespace": "extra",
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_override_type_kubernetes(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-override-type.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "type": "user-specified",
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ },
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_override_type_none(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-override-type-none.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "none",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "type": "user-specified",
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {"name": "test-secret", "namespace": "default"},
+ "stringData": {"username": "user"},
+ },
+ )
+ )
+
+ def test_module_secret_file_contents(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-file-contents.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ },
+ "stringData": {"username": "This space intentionally left blank\n"},
+ },
+ )
+ )
+
+ def test_module_secret_file_contents_b64(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-file-contents-b64.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ },
+ "stringData": {
+ "username": "VGhpcyBzcGFjZSBpbnRlbnRpb25hbGx5IGxlZnQgYmxhbmsK"
+ },
+ },
+ )
+ )
+
+ def test_module_secret_file_contents_double_b64(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(
+ self.testdir_v2, "values-secret-v2-file-contents-double-b64.yaml"
+ )
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "test-secret",
+ },
+ "stringData": {
+ "username": "VkdocGN5QnpjR0ZqWlNCcGJuUmxiblJwYjI1aGJHeDVJR3hsWm5RZ1lteGhibXNL"
+ },
+ },
+ )
+ )
+
+ def test_module_secret_file_contents_binary_b64(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-secret-binary-b64.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+ ret = result.exception.args[0]
+
+ # The binary bytes are [ 8, 6, 7, 5, 3, 0, 9 ] (IYKYK)
+ self.assertTrue(
+ len(ret["kubernetes_secret_objects"]) == 1
+ and ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "secret",
+ },
+ "stringData": {"secret": "CAYHBQMACQ=="},
+ },
+ )
+ )
+
+ def test_ensure_success_retrieving_block_yaml_policy(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-defaultvp-policy.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "vault",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertTrue(
+ ds_eq(
+ ret["vault_policies"],
+ {
+ "basicPolicy": 'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n', # noqa: E501
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n', # noqa: E501
+ },
+ )
+ )
+
+ def test_ensure_success_retrieving_block_yaml_value(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-block-yamlstring.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "vault",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertTrue(
+ ds_eq(
+ ret["parsed_secrets"],
+ {
+ "config-demo": DEFAULT_PARSED_SECRET_VALUE
+ | {
+ "fields": {
+ "sshprivkey": "ssh-rsa oNb/kAvwdQl+FKdwzzKo5rnGIB68UOxWoaKPnKdgF/ts67CDBslWGnpUZCpp8TdaxfHmpoyA6nutMwQw8OAMEUybxvilDn+ZVJ/5qgfRBdi8wLKRLTIj0v+ZW7erN9yuZG53xUQAaQjivM3cRyNLIZ9torShYaYwD1UTTDkV97RMfNDlWI5f5FGRvfy429ZfCwbUWUbijrcv/mWc/uO3x/+MBXwa4f8ubzEYlrt4yH/Vbpzs67kE9UJ9z1zurFUFJydy1ZDAdKSiBS91ImI3ccKnbz0lji2bgSYR0Wp1IQhzSpjyJU2rIu9HAEUh85Rwf2jakfLpMcg/hSBer3sG kilroy@example.com", # noqa: E501
+ "sshpubkey": "-----BEGIN OPENSSH PRIVATE KEY-----\nTtzxGgWrNerAr1hzUqPW2xphF/Aur1rQXSLv4J7frEJxNED6u/eScsNgwJMGXwRx7QYVohh0ARHVhJdUzJK7pEIphi4BGw==\nwlo+oQsi828b47SKZB8/K9dbeLlLiXh9/hu47MGpeGHZsKbjAdauncuw+YUDDN2EADJjasNMZHjxYhXKtqDjXTIw1X1n0Q==\n-----END OPENSSH PRIVATE KEY-----", # noqa: E501
+ },
+ "name": "config-demo",
+ }
+ },
+ )
+ )
+
+ def test_ensure_kubernetes_object_block_yaml_value(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-block-yamlstring.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertTrue(
+ ds_eq(
+ ret["kubernetes_secret_objects"][0],
+ DEFAULT_KUBERNETES_SECRET_OBJECT
+ | {
+ "metadata": DEFAULT_KUBERNETES_METADATA
+ | {
+ "name": "config-demo",
+ },
+ "stringData": {
+ "sshprivkey": "ssh-rsa oNb/kAvwdQl+FKdwzzKo5rnGIB68UOxWoaKPnKdgF/ts67CDBslWGnpUZCpp8TdaxfHmpoyA6nutMwQw8OAMEUybxvilDn+ZVJ/5qgfRBdi8wLKRLTIj0v+ZW7erN9yuZG53xUQAaQjivM3cRyNLIZ9torShYaYwD1UTTDkV97RMfNDlWI5f5FGRvfy429ZfCwbUWUbijrcv/mWc/uO3x/+MBXwa4f8ubzEYlrt4yH/Vbpzs67kE9UJ9z1zurFUFJydy1ZDAdKSiBS91ImI3ccKnbz0lji2bgSYR0Wp1IQhzSpjyJU2rIu9HAEUh85Rwf2jakfLpMcg/hSBer3sG kilroy@example.com", # noqa: E501
+ "sshpubkey": "-----BEGIN OPENSSH PRIVATE KEY-----\nTtzxGgWrNerAr1hzUqPW2xphF/Aur1rQXSLv4J7frEJxNED6u/eScsNgwJMGXwRx7QYVohh0ARHVhJdUzJK7pEIphi4BGw==\nwlo+oQsi828b47SKZB8/K9dbeLlLiXh9/hu47MGpeGHZsKbjAdauncuw+YUDDN2EADJjasNMZHjxYhXKtqDjXTIw1X1n0Q==\n-----END OPENSSH PRIVATE KEY-----", # noqa: E501
+ },
+ },
+ )
+ )
+
+ def test_ensure_kubernetes_backend_allowed(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base-k8s-backend.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertFalse(ret["failed"])
+
+ def test_ensure_none_backend_allowed(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base-none-backend.yaml")
+ )
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "none",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertFalse(ret["failed"])
+
+ def test_ensure_error_conflicting_backends(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base-k8s-backend.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "vault",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Secrets file specifies 'kubernetes' backend but pattern config specifies 'vault'."
+ )
+
+ def test_ensure_error_unknown_backends(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-base-unknown-backend.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "unknown",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Currently only the 'vault', 'kubernetes' and 'none' backingStores are supported: unknown"
+ )
+
+ def test_ensure_error_secrets_same_name(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-same-secret-names.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1] == "You cannot have duplicate secret names: ['config-demo']"
+ )
+
+ def test_ensure_error_fields_same_name(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-same-field-names.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert ret["args"][1] == "You cannot have duplicate field names: ['secret']"
+
+ def test_ensure_generate_errors_on_kubernetes(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-generic-onlygenerate.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "kubernetes",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "You cannot have onMissingValue set to 'generate' unless using vault backingstore for secret config-demo field secret" # noqa: E501
+ )
+
+ def test_ensure_generate_errors_on_none_generate(self, getpass):
+ testfile_output = self.get_file_as_stdout(
+ os.path.join(self.testdir_v2, "values-secret-v2-generic-onlygenerate.yaml")
+ )
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets_plaintext": testfile_output,
+ "secrets_backing_store": "none",
+ }
+ )
+ parse_secrets_info.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "You cannot have onMissingValue set to 'generate' unless using vault backingstore for secret config-demo field secret" # noqa: E501
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/common/ansible/tests/unit/test_util_datastructures.py b/common/ansible/tests/unit/test_util_datastructures.py
new file mode 100644
index 00000000..11d7cdae
--- /dev/null
+++ b/common/ansible/tests/unit/test_util_datastructures.py
@@ -0,0 +1,205 @@
+DEFAULT_PARSED_SECRET_VALUE = {
+ "name": "overwrite-me",
+ "fields": {},
+ "base64": [],
+ "ini_file": {},
+ "generate": [],
+ "override": [],
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": ["hub"],
+ "type": "Opaque",
+ "target_namespaces": [],
+ "labels": {},
+ "annotations": {},
+ "paths": {},
+}
+
+DEFAULT_KUBERNETES_METADATA = {
+ "name": "overwrite-me",
+ "labels": {},
+ "annotations": {},
+ "namespace": "validated-patterns-secrets",
+}
+DEFAULT_KUBERNETES_SECRET_OBJECT = {
+ "kind": "Secret",
+ "type": "Opaque",
+ "apiVersion": "v1",
+ "metadata": DEFAULT_KUBERNETES_METADATA,
+ "stringData": {},
+}
+
+DEFAULT_VAULT_POLICIES = {
+ "validatedPatternDefaultPolicy": (
+ "length=20\n"
+ 'rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\n' # noqa: E501
+ 'rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\n' # noqa: E501
+ 'rule "charset" { charset = "0123456789" min-chars = 1 }\n'
+ 'rule "charset" { charset = "!@#%^&*" min-chars = 1 }\n'
+ ),
+}
+
+GENERATE_POLICY_B64_TEST = {
+ "vault_policies": {
+ "basicPolicy": 'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n', # noqa: E501
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n', # noqa: E501
+ },
+ "parsed_secrets": {
+ "config-demo": {
+ "annotations": {},
+ "base64": ["secret"],
+ "fields": {"secret": None},
+ "generate": ["secret"],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo",
+ "namespace": "validated-patterns-secrets",
+ "override": ["secret"],
+ "paths": {},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {"secret": "basicPolicy"},
+ "vault_prefixes": ["region-one", "snowflake.blueprints.rhecoeng.com"],
+ }
+ },
+}
+
+PARSED_SECRET_VALUE_TEST = {
+ "parsed_secrets": {
+ "config-demo": {
+ "annotations": {},
+ "base64": [],
+ "fields": {"secret": "value123"},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": ["hub"],
+ }
+ },
+ "vault_policies": {
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n' # noqa: E501
+ },
+}
+
+PARSED_SECRET_B64_VALUE_TEST = {
+ "parsed_secrets": {
+ "config-demo": {
+ "annotations": {},
+ "base64": ["secret"],
+ "fields": {"secret": "dmFsdWUxMjMK"},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": ["hub"],
+ }
+ },
+ "vault_policies": {
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n' # noqa: E501
+ },
+}
+
+PARSED_SECRET_FILE_INJECTION_TEST = {
+ "parsed_secrets": {
+ "config-demo": {
+ "annotations": {},
+ "base64": [],
+ "fields": {"secret": "value123"},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": [
+ "secret/region-one",
+ "secret/snowflake.blueprints.rhecoeng.com",
+ ],
+ },
+ "config-demo-file": {
+ "annotations": {},
+ "base64": [],
+ "fields": {"test": ""},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo-file",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {"test": "/tmp/footest"},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": [
+ "secret/region-two",
+ "secret/snowflake.blueprints.rhecoeng.com",
+ ],
+ },
+ },
+ "vault_policies": {
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n' # noqa: 501
+ },
+}
+
+PARSED_SECRET_FILE_B64_INJECTION_TEST = {
+ "parsed_secrets": {
+ "config-demo": {
+ "annotations": {},
+ "base64": [],
+ "fields": {"secret": "value123"},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": [
+ "secret/region-one",
+ "secret/snowflake.blueprints.rhecoeng.com",
+ ],
+ },
+ "config-demo-file": {
+ "annotations": {},
+ "base64": ["test"],
+ "fields": {"test": ""},
+ "generate": [],
+ "ini_file": {},
+ "labels": {},
+ "name": "config-demo-file",
+ "namespace": "validated-patterns-secrets",
+ "override": [],
+ "paths": {"test": "/tmp/footest"},
+ "type": "Opaque",
+ "vault_mount": "secret",
+ "vault_policies": {},
+ "vault_prefixes": [
+ "secret/region-two",
+ "secret/snowflake.blueprints.rhecoeng.com",
+ ],
+ },
+ },
+ "vault_policies": {
+ "validatedPatternDefaultPolicy": 'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n' # noqa: 501
+ },
+}
diff --git a/common/ansible/tests/unit/test_vault_load_parsed_secrets.py b/common/ansible/tests/unit/test_vault_load_parsed_secrets.py
new file mode 100644
index 00000000..ca37de94
--- /dev/null
+++ b/common/ansible/tests/unit/test_vault_load_parsed_secrets.py
@@ -0,0 +1,320 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple module to test vault_load_parsed_secrets
+"""
+
+import json
+import os
+import sys
+import unittest
+from unittest.mock import call, patch
+
+import test_util_datastructures
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+# TODO(bandini): I could not come up with something better to force the imports to be existing
+# when we 'import vault_load_secrets'
+sys.path.insert(1, "./ansible/plugins/module_utils")
+sys.path.insert(1, "./ansible/plugins/modules")
+
+import vault_load_parsed_secrets # noqa: E402
+
+sys.modules["ansible.modules.vault_load_parsed_secrets"] = vault_load_parsed_secrets
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs["failed"] = True
+ kwargs["args"] = args
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(
+ basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json
+ )
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.testdir_v2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "v2")
+
+ def tearDown(self):
+ return
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ vault_load_parsed_secrets.main()
+
+ # For these tests, we need the data structures that parse_secrets_info outputs.
+ # Several have been saved in the test_util_datastructures module for this purpose
+ def test_ensure_value_injection_works(self):
+ set_module_args(
+ {
+ "parsed_secrets": test_util_datastructures.PARSED_SECRET_VALUE_TEST[
+ "parsed_secrets"
+ ],
+ "vault_policies": test_util_datastructures.PARSED_SECRET_VALUE_TEST[
+ "vault_policies"
+ ],
+ }
+ )
+ with patch.object(
+ vault_load_parsed_secrets.VaultSecretLoader, "_run_command"
+ ) as mock_run_command:
+ stdout = ""
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_parsed_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 2
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret hub/config-demo secret='value123'\"",
+ attempts=3,
+ ),
+ ]
+ print(mock_run_command.mock_calls)
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_b64_value_injection_works(self):
+ set_module_args(
+ {
+ "parsed_secrets": test_util_datastructures.PARSED_SECRET_B64_VALUE_TEST[
+ "parsed_secrets"
+ ],
+ "vault_policies": test_util_datastructures.PARSED_SECRET_B64_VALUE_TEST[
+ "vault_policies"
+ ],
+ }
+ )
+ with patch.object(
+ vault_load_parsed_secrets.VaultSecretLoader, "_run_command"
+ ) as mock_run_command:
+ stdout = ""
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_parsed_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 2
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret hub/config-demo secret='dmFsdWUxMjMK'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ print(mock_run_command.mock_calls)
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_file_injection_works(self):
+ set_module_args(
+ {
+ "parsed_secrets": test_util_datastructures.PARSED_SECRET_FILE_INJECTION_TEST[
+ "parsed_secrets"
+ ],
+ "vault_policies": test_util_datastructures.PARSED_SECRET_FILE_INJECTION_TEST[
+ "vault_policies"
+ ],
+ }
+ )
+ with patch.object(
+ vault_load_parsed_secrets.VaultSecretLoader, "_run_command"
+ ) as mock_run_command:
+ stdout = ""
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_parsed_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 5
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/region-one/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/footest' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/region-two/config-demo-file test=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/footest' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo-file test=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ print(mock_run_command.mock_calls)
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_file_b64_injection_works(self):
+ set_module_args(
+ {
+ "parsed_secrets": test_util_datastructures.PARSED_SECRET_FILE_B64_INJECTION_TEST[
+ "parsed_secrets"
+ ],
+ "vault_policies": test_util_datastructures.PARSED_SECRET_FILE_B64_INJECTION_TEST[
+ "vault_policies"
+ ],
+ }
+ )
+ with patch.object(
+ vault_load_parsed_secrets.VaultSecretLoader, "_run_command"
+ ) as mock_run_command:
+ stdout = ""
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_parsed_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 5
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/region-one/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/footest' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0> /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/region-two/config-demo-file test=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/footest' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0> /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo-file test=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ print(mock_run_command.mock_calls)
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_b64_generate_passwords_works(self):
+ set_module_args(
+ {
+ "parsed_secrets": test_util_datastructures.GENERATE_POLICY_B64_TEST[
+ "parsed_secrets"
+ ],
+ "vault_policies": test_util_datastructures.GENERATE_POLICY_B64_TEST[
+ "vault_policies"
+ ],
+ }
+ )
+ with patch.object(
+ vault_load_parsed_secrets.VaultSecretLoader, "_run_command"
+ ) as mock_run_command:
+ stdout = ""
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_parsed_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 4
+
+ calls = [
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | base64 --wrap=0 | vault kv put -mount=secret region-one/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | base64 --wrap=0 | vault kv put -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ ]
+ print(mock_run_command.mock_calls)
+ mock_run_command.assert_has_calls(calls)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/common/ansible/tests/unit/test_vault_load_secrets.py b/common/ansible/tests/unit/test_vault_load_secrets.py
new file mode 100644
index 00000000..12deeb3f
--- /dev/null
+++ b/common/ansible/tests/unit/test_vault_load_secrets.py
@@ -0,0 +1,388 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple module to test vault_load_secrets
+"""
+
+import json
+import os
+import sys
+import unittest
+from unittest.mock import call, patch
+
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+# TODO(bandini): I could not come up with something better to force the imports to be existing
+# when we 'import vault_load_secrets'
+sys.path.insert(1, "./ansible/plugins/module_utils")
+sys.path.insert(1, "./ansible/plugins/modules")
+import load_secrets_common # noqa: E402
+
+sys.modules["ansible.module_utils.load_secrets_common"] = load_secrets_common
+import load_secrets_v1 # noqa: E402
+import load_secrets_v2 # noqa: E402
+
+sys.modules["ansible.module_utils.load_secrets_v1"] = load_secrets_v1
+sys.modules["ansible.module_utils.load_secrets_v2"] = load_secrets_v2
+import vault_load_secrets # noqa: E402
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs["failed"] = True
+ kwargs["args"] = args
+ raise AnsibleFailJson(kwargs)
+
+
+class TestMyModule(unittest.TestCase):
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(
+ basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json
+ )
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.testdir_v1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "v1")
+ self.testfile = open("/tmp/ca.crt", "w")
+
+ def tearDown(self):
+ self.testfile.close()
+ try:
+ os.remove("/tmp/ca.crt")
+ except OSError:
+ pass
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ vault_load_secrets.main()
+
+ def test_module_fail_when_values_secret_not_existing(self):
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": "/tmp/nonexisting",
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ self.assertEqual(ret["error"], "Missing /tmp/nonexisting file")
+ self.assertEqual(
+ ret["msg"], "Values secrets file does not exist: /tmp/nonexisting"
+ )
+
+ def test_ensure_empty_files_but_not_secrets_is_ok(self):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v1,
+ "values-secret-empty-files.yaml",
+ )
+ }
+ )
+
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 2
+
+ calls = [
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/config-demo' secret='VALUE'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/aws' access_key_id='VALUE' secret_access_key='VALUE'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_broken_files_fail(self):
+ for i in (
+ "values-secret-broken1.yaml",
+ "values-secret-broken2.yaml",
+ "values-secret-broken3.yaml",
+ ):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args({"values_secrets": os.path.join(self.testdir_v1, i)})
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+
+ def test_ensure_empty_secrets_but_not_files_is_ok(self):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v1,
+ "values-secret-empty-secrets.yaml",
+ ),
+ }
+ )
+
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 1
+
+ calls = [
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'base64 --wrap=0 /tmp/vcontent | vault kv put secret/hub/publickey b64content=- content=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_command_called(self):
+ set_module_args(
+ {"values_secrets": os.path.join(self.testdir_v1, "values-secret-good.yaml")}
+ )
+
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 9
+
+ calls = [
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/config-demo' secret='demo123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/googleapi' key='test123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/cluster_alejandro' name='alejandro' bearerToken='sha256~bumxi-012345678901233455675678678098-abcdef'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/test' s3.accessKey='1234' s3.secretKey='4321' s3Secret='czMuYWNjZXNzS2V5OiAxMjM0CnMzLnNlY3JldEtleTogNDMyMQ=='\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/test2' s3.accessKey='accessKey' s3.secretKey='secretKey' s3Secret='fooo'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/test3' s3.accessKey='aaaaa' s3.secretKey='bbbbbbbb' s3Secret='czMuYWNjZXNzS2V5OiBhYWFhYQpzMy5zZWNyZXRLZXk6IGJiYmJiYmJi'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/region-one/config-demo' secret='region123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'base64 --wrap=0 /tmp/vcontent | vault kv put secret/hub/cluster_alejandro_ca b64content=- content=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'base64 --wrap=0 /tmp/vcontent | vault kv put secret/region-one/ca b64content=- content=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_good_template_checking(self):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v1, "mcg-values-secret.yaml"
+ ),
+ "check_missing_secrets": True,
+ "values_secret_template": os.path.join(
+ self.testdir_v1, "template-mcg-working.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 1
+
+ calls = [
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/config-demo' secret='VALUE' additionalsecret='test'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_bad_template_checking(self):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v1, "mcg-values-secret.yaml"
+ ),
+ "check_missing_secrets": True,
+ "values_secret_template": os.path.join(
+ self.testdir_v1, "template-mcg-missing.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(result.exception.args[0]["failed"])
+ # In case of failure args[1] contains the msg of the failure
+ assert (
+ result.exception.args[0]["args"][1]
+ == "Values secret yaml is missing needed secrets from the templates: {'secrets.config-demo.foo'}"
+ )
+ assert mock_run_command.call_count == 0
+
+ def test_ensure_fqdn_secrets(self):
+ set_module_args(
+ {"values_secrets": os.path.join(self.testdir_v1, "values-secret-fqdn.yaml")}
+ )
+
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 3
+
+ calls = [
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/hub/test' secret1='foo'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put 'secret/region-one.blueprints.rhecoeng.com/config-demo' secret='region123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'base64 --wrap=0 /tmp/vcontent | vault kv put secret/region-one/ca b64content=- content=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_check_missing_secrets_errors_out(self):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v1, "mcg-values-secret.yaml"
+ ),
+ "check_missing_secrets": True,
+ "values_secret_template": "",
+ }
+ )
+ with patch.object(
+ load_secrets_v1.LoadSecretsV1, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(result.exception.args[0]["failed"])
+ # In case of failure args[1] contains the msg of the failure
+ assert (
+ result.exception.args[0]["args"][1]
+ == "No values_secret_template defined and check_missing_secrets set to True"
+ )
+ assert mock_run_command.call_count == 0
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/common/ansible/tests/unit/test_vault_load_secrets_v2.py b/common/ansible/tests/unit/test_vault_load_secrets_v2.py
new file mode 100644
index 00000000..d0e5881c
--- /dev/null
+++ b/common/ansible/tests/unit/test_vault_load_secrets_v2.py
@@ -0,0 +1,760 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple module to test vault_load_secrets
+"""
+
+import configparser
+import json
+import os
+import sys
+import unittest
+from unittest import mock
+from unittest.mock import call, patch
+
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+# TODO(bandini): I could not come up with something better to force the imports to be existing
+# when we 'import vault_load_secrets'
+sys.path.insert(1, "./ansible/plugins/module_utils")
+sys.path.insert(1, "./ansible/plugins/modules")
+import load_secrets_common # noqa: E402
+
+sys.modules["ansible.module_utils.load_secrets_common"] = load_secrets_common
+import load_secrets_v1 # noqa: E402
+import load_secrets_v2 # noqa: E402
+
+sys.modules["ansible.module_utils.load_secrets_v1"] = load_secrets_v1
+sys.modules["ansible.module_utils.load_secrets_v2"] = load_secrets_v2
+import vault_load_secrets # noqa: E402
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({"ANSIBLE_MODULE_ARGS": args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs["failed"] = True
+ kwargs["args"] = args
+ raise AnsibleFailJson(kwargs)
+
+
+@mock.patch("getpass.getpass")
+class TestMyModule(unittest.TestCase):
+ def create_inifile(self):
+ self.inifile = open("/tmp/awscredentials", "w")
+ config = configparser.ConfigParser()
+ config["default"] = {
+ "aws_access_key_id": "123123",
+ "aws_secret_access_key": "abcdefghi",
+ }
+ config["foobar"] = {
+ "aws_access_key_id": "345345",
+ "aws_secret_access_key": "rstuvwxyz",
+ }
+ with self.inifile as configfile:
+ config.write(configfile)
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(
+ basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json
+ )
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.testdir_v2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), "v2")
+ self.testfile = open("/tmp/ca.crt", "w")
+ self.create_inifile()
+
+ def tearDown(self):
+ self.testfile.close()
+ try:
+ os.remove("/tmp/ca.crt")
+ # os.remove("/tmp/awscredentials")
+ except OSError:
+ pass
+
+ def test_module_fail_when_required_args_missing(self, getpass):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ vault_load_secrets.main()
+
+ def test_module_fail_when_values_secret_not_existing(self, getpass):
+ with self.assertRaises(AnsibleExitJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": "/tmp/nonexisting",
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ self.assertEqual(ret["error"], "Missing /tmp/nonexisting file")
+ self.assertEqual(
+ ret["msg"], "Values secrets file does not exist: /tmp/nonexisting"
+ )
+
+ def test_ensure_no_vault_policies_is_ok(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-nopolicies.yaml"
+ ),
+ }
+ )
+ getpass.return_value = "foo"
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 5
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/region-one/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0 > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/region-two/config-demo-file ca_crt=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0 > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv put -mount=secret secret/snowflake.blueprints.rhecoeng.com/config-demo-file ca_crt=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_policies_are_injected(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-base.yaml"
+ ),
+ }
+ )
+ # this will be used for both a secret and a file path
+ getpass.return_value = "/tmp/ca.crt"
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 11
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/advancedPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/advancedPolicy policy=@/tmp/advancedPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=secret region-one/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv patch -mount=secret region-one/config-demo secret2='/tmp/ca.crt'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv patch -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret2='/tmp/ca.crt'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv patch -mount=secret region-one/config-demo ca_crt=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv patch -mount=secret snowflake.blueprints.rhecoeng.com/config-demo ca_crt=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0 > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv patch -mount=secret region-one/config-demo ca_crt2=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "cat '/tmp/ca.crt' | oc exec -n vault vault-0 -i -- sh -c 'cat - | base64 --wrap=0 > /tmp/vcontent'; oc exec -n vault vault-0 -i -- sh -c 'vault kv patch -mount=secret snowflake.blueprints.rhecoeng.com/config-demo ca_crt2=@/tmp/vcontent; rm /tmp/vcontent'", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_error_wrong_onmissing_value(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-wrong-onmissingvalue.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Secret has vaultPolicy set to nonExisting but no such policy exists"
+ )
+
+ def test_ensure_error_wrong_vaultpolicy(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-wrong-vaultpolicy.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Secret has vaultPolicy set to nonExisting but no such policy exists"
+ )
+
+ def test_ensure_error_file_wrong_onmissing_value(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2,
+ "values-secret-v2-files-wrong-onmissingvalue.yaml",
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Secret has onMissingValue set to 'generate' but has a path set"
+ )
+
+ def test_ensure_error_file_emptypath(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-files-emptypath.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Secret has onMissingValue set to 'error' and has neither value nor path nor ini_file set"
+ )
+
+ def test_ensure_error_file_wrongpath(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-files-wrongpath.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert ret["args"][1] == "Field has non-existing path: /tmp/nonexisting"
+
+ def test_ensure_error_empty_vaultprefix(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-emptyvaultprefix.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert ret["args"][1] == "Secret config-demo has empty vaultPrefixes"
+
+ def test_ensure_default_no_vaultprefix(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-novaultprefix.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 2
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret hub/config-demo secret='value123'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_only_generate_passwords_works(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-onlygenerate.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 7
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/advancedPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/advancedPolicy policy=@/tmp/advancedPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=foo region-one/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=foo snowflake.blueprints.rhecoeng.com/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/advancedPolicy/generate | vault kv patch -mount=foo region-one/config-demo secret2=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/advancedPolicy/generate | vault kv patch -mount=foo snowflake.blueprints.rhecoeng.com/config-demo secret2=-"', # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_generate_password_base64_works(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-generate-base64.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 4
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | base64 --wrap=0 | vault kv put -mount=secret region-one/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | base64 --wrap=0 | vault kv put -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_error_secrets_same_name(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-same-secret-names.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1] == "You cannot have duplicate secret names: ['config-demo']"
+ )
+
+ def test_ensure_error_fields_same_name(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-same-field-names.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert ret["args"][1] == "You cannot have duplicate field names: ['secret']"
+
+ def test_password_base64_secret(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-secret-base64.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 2
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret test/config-demo secret='Zm9v'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_error_on_unsupported_backingstore(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2,
+ "values-secret-v2-nonexisting-backingstore.yaml",
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "Currently only the 'vault' backingStore is supported: nonexisting"
+ )
+
+ def test_password_default_vp_policy(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-defaultvp-policy.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 6
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=secret region-one/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/basicPolicy/generate | vault kv put -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/validatedPatternDefaultPolicy/generate | vault kv patch -mount=secret region-one/config-demo secret2=-"', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault read -field=password sys/policies/password/validatedPatternDefaultPolicy/generate | vault kv patch -mount=secret snowflake.blueprints.rhecoeng.com/config-demo secret2=-"', # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_error_on_wrong_override(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2,
+ "values-secret-v2-wrong-override.yaml",
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert (
+ ret["args"][1]
+ == "'override' attribute requires 'onMissingValue' to be set to 'generate'"
+ )
+
+ def test_ensure_override_works(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-test-override.yaml"
+ ),
+ }
+ )
+ # this will be used for both a secret and a file path
+ getpass.return_value = "/tmp/ca.crt"
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 5
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=10\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/basicPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/basicPolicy policy=@/tmp/basicPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/advancedPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/advancedPolicy policy=@/tmp/advancedPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault kv get -mount=secret -field=secret region-one/config-demo"', # noqa: E501
+ attempts=1,
+ checkrc=False,
+ ),
+ call(
+ 'oc exec -n vault vault-0 -i -- sh -c "vault kv get -mount=secret -field=secret snowflake.blueprints.rhecoeng.com/config-demo"', # noqa: E501
+ attempts=1,
+ checkrc=False,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+ def test_ensure_error_wrong_ini_file(self, getpass):
+ with self.assertRaises(AnsibleFailJson) as ansible_err:
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-wrong-ini-file.yaml"
+ ),
+ }
+ )
+ vault_load_secrets.main()
+
+ ret = ansible_err.exception.args[0]
+ self.assertEqual(ret["failed"], True)
+ assert ret["args"][1] == "ini_file requires at least ini_key to be defined"
+
+ def test_ensure_ini_file_works(self, getpass):
+ set_module_args(
+ {
+ "values_secrets": os.path.join(
+ self.testdir_v2, "values-secret-v2-ini-file.yaml"
+ ),
+ }
+ )
+ with patch.object(
+ load_secrets_v2.LoadSecretsV2, "_run_command"
+ ) as mock_run_command:
+ stdout = "configuration updated"
+ stderr = ""
+ ret = 0
+ mock_run_command.return_value = ret, stdout, stderr # successful execution
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ vault_load_secrets.main()
+ self.assertTrue(
+ result.exception.args[0]["changed"]
+ ) # ensure result is changed
+ assert mock_run_command.call_count == 5
+
+ calls = [
+ call(
+ 'echo \'length=20\nrule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }\nrule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }\nrule "charset" { charset = "0123456789" min-chars = 1 }\nrule "charset" { charset = "!@#%^&*" min-chars = 1 }\n\' | oc exec -n vault vault-0 -i -- sh -c \'cat - > /tmp/validatedPatternDefaultPolicy.hcl\';oc exec -n vault vault-0 -i -- sh -c \'vault write sys/policies/password/validatedPatternDefaultPolicy policy=@/tmp/validatedPatternDefaultPolicy.hcl\'', # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret hub/aws aws_access_key_id='123123'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv patch -mount=secret hub/aws aws_secret_access_key='abcdefghi'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv put -mount=secret hub/awsfoobar aws_access_key_id='345345'\"", # noqa: E501
+ attempts=3,
+ ),
+ call(
+ "oc exec -n vault vault-0 -i -- sh -c \"vault kv patch -mount=secret hub/awsfoobar aws_secret_access_key='rstuvwxyz'\"", # noqa: E501
+ attempts=3,
+ ),
+ ]
+ mock_run_command.assert_has_calls(calls)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/common/ansible/tests/unit/v1/mcg-values-secret.yaml b/common/ansible/tests/unit/v1/mcg-values-secret.yaml
new file mode 100644
index 00000000..8586f1a6
--- /dev/null
+++ b/common/ansible/tests/unit/v1/mcg-values-secret.yaml
@@ -0,0 +1,27 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ config-demo:
+ # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ secret: VALUE
+ additionalsecret: test
+
+ # Required for automated spoke deployment
+ # aws:
+ # access_key_id: VALUE
+ # secret_access_key: VALUE
+
+# Required for automated spoke deployment
+files:
+ # # ssh-rsa AAA...
+ # publickey: ~/.ssh/id_rsa.pub
+ #
+ # # -----BEGIN RSA PRIVATE KEY
+ # # ...
+ # # -----END RSA PRIVATE KEY
+ # privatekey: ~/.ssh/id_rsa
+ #
+ # # {"auths":{"cloud.openshift.com":{"auth":"b3Blb... }}}
+ # openshiftPullSecret: ~/.dockerconfigjson
+ #
+ # azureOsServicePrincipal: ~/osServicePrincipal.json
diff --git a/common/ansible/tests/unit/v1/template-mcg-missing.yaml b/common/ansible/tests/unit/v1/template-mcg-missing.yaml
new file mode 100644
index 00000000..eca36b2e
--- /dev/null
+++ b/common/ansible/tests/unit/v1/template-mcg-missing.yaml
@@ -0,0 +1,27 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ config-demo:
+ # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ secret: VALUE
+ foo: bar
+
+ # Required for automated spoke deployment
+ # aws:
+ # access_key_id: VALUE
+ # secret_access_key: VALUE
+
+# Required for automated spoke deployment
+files:
+ # # ssh-rsa AAA...
+ # publickey: ~/.ssh/id_rsa.pub
+ #
+ # # -----BEGIN RSA PRIVATE KEY
+ # # ...
+ # # -----END RSA PRIVATE KEY
+ # privatekey: ~/.ssh/id_rsa
+ #
+ # # {"auths":{"cloud.openshift.com":{"auth":"b3Blb... }}}
+ # openshiftPullSecret: ~/.dockerconfigjson
+ #
+ # azureOsServicePrincipal: ~/osServicePrincipal.json
diff --git a/common/ansible/tests/unit/v1/template-mcg-working.yaml b/common/ansible/tests/unit/v1/template-mcg-working.yaml
new file mode 100644
index 00000000..8445c6f3
--- /dev/null
+++ b/common/ansible/tests/unit/v1/template-mcg-working.yaml
@@ -0,0 +1,26 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ config-demo:
+ # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ secret: VALUE
+
+ # Required for automated spoke deployment
+ # aws:
+ # access_key_id: VALUE
+ # secret_access_key: VALUE
+
+# Required for automated spoke deployment
+files:
+ # # ssh-rsa AAA...
+ # publickey: ~/.ssh/id_rsa.pub
+ #
+ # # -----BEGIN RSA PRIVATE KEY
+ # # ...
+ # # -----END RSA PRIVATE KEY
+ # privatekey: ~/.ssh/id_rsa
+ #
+ # # {"auths":{"cloud.openshift.com":{"auth":"b3Blb... }}}
+ # openshiftPullSecret: ~/.dockerconfigjson
+ #
+ # azureOsServicePrincipal: ~/osServicePrincipal.json
diff --git a/common/ansible/tests/unit/v1/values-secret-broken1.yaml b/common/ansible/tests/unit/v1/values-secret-broken1.yaml
new file mode 100644
index 00000000..ecfc9df4
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-broken1.yaml
@@ -0,0 +1,6 @@
+---
+secrets:
+ # empty
+
+files:
+ # empty
diff --git a/common/ansible/tests/unit/v1/values-secret-broken2.yaml b/common/ansible/tests/unit/v1/values-secret-broken2.yaml
new file mode 100644
index 00000000..82477acd
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-broken2.yaml
@@ -0,0 +1,6 @@
+---
+# secrets:
+# empty
+
+# files:
+# empty
diff --git a/common/ansible/tests/unit/v1/values-secret-broken3.yaml b/common/ansible/tests/unit/v1/values-secret-broken3.yaml
new file mode 100644
index 00000000..6d7295ba
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-broken3.yaml
@@ -0,0 +1,9 @@
+---
+secrets:
+ - borked1
+ - borked2
+
+files:
+ foo:
+ - broken
+ - broken2
diff --git a/common/ansible/tests/unit/v1/values-secret-empty-files.yaml b/common/ansible/tests/unit/v1/values-secret-empty-files.yaml
new file mode 100644
index 00000000..078166a0
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-empty-files.yaml
@@ -0,0 +1,15 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ config-demo:
+ # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ secret: VALUE
+
+ # Required for automated spoke deployment
+ aws:
+ access_key_id: VALUE
+ secret_access_key: VALUE
+
+# Required for automated spoke deployment
+files:
+ # # ssh-rsa AAA...
diff --git a/common/ansible/tests/unit/v1/values-secret-empty-secrets.yaml b/common/ansible/tests/unit/v1/values-secret-empty-secrets.yaml
new file mode 100644
index 00000000..13739a27
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-empty-secrets.yaml
@@ -0,0 +1,16 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ # config-demo:
+ # # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ # secret: VALUE
+
+ # # Required for automated spoke deployment
+ # aws:
+ # access_key_id: VALUE
+ # secret_access_key: VALUE
+
+# Required for automated spoke deployment
+files:
+ # # ssh-rsa AAA...
+ publickey: /tmp/ca.crt
diff --git a/common/ansible/tests/unit/v1/values-secret-fqdn.yaml b/common/ansible/tests/unit/v1/values-secret-fqdn.yaml
new file mode 100644
index 00000000..c77496c1
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-fqdn.yaml
@@ -0,0 +1,11 @@
+---
+secrets:
+ test:
+ secret1: foo
+
+secrets.region-one.blueprints.rhecoeng.com:
+ config-demo:
+ secret: region123
+
+files.region-one:
+ ca: /tmp/ca.crt
diff --git a/common/ansible/tests/unit/v1/values-secret-good.yaml b/common/ansible/tests/unit/v1/values-secret-good.yaml
new file mode 100644
index 00000000..6db47285
--- /dev/null
+++ b/common/ansible/tests/unit/v1/values-secret-good.yaml
@@ -0,0 +1,36 @@
+---
+secrets:
+ # NEVER COMMIT THESE VALUES TO GIT
+ config-demo:
+ # Secret used for demonstrating vault storage, external secrets, and ACM distribution
+ secret: demo123
+ googleapi:
+ key: test123
+
+ cluster_alejandro:
+ name: alejandro
+ bearerToken: sha256~bumxi-012345678901233455675678678098-abcdef
+
+ test:
+ s3.accessKey: "1234"
+ s3.secretKey: "4321"
+
+ test2:
+ s3.accessKey: accessKey
+ s3.secretKey: secretKey
+ s3Secret: fooo
+
+ test3:
+ s3.accessKey: "aaaaa"
+ s3.secretKey: "bbbbbbbb"
+
+files:
+ # oc extract -n openshift-config cm/kube-root-ca.crt --to=/home/michele/ --keys=ca.crt
+ cluster_alejandro_ca: /tmp/ca.crt
+
+secrets.region-one:
+ config-demo:
+ secret: region123
+
+files.region-one:
+ ca: /tmp/ca.crt
diff --git a/common/ansible/tests/unit/v2/aws-example.ini b/common/ansible/tests/unit/v2/aws-example.ini
new file mode 100644
index 00000000..5e38bfd8
--- /dev/null
+++ b/common/ansible/tests/unit/v2/aws-example.ini
@@ -0,0 +1,4 @@
+; https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
+[default]
+aws_access_key_id = A123456789012345678A
+aws_secret_access_key = A12345678901234567890123456789012345678A
diff --git a/common/ansible/tests/unit/v2/test-file-contents b/common/ansible/tests/unit/v2/test-file-contents
new file mode 100644
index 00000000..49c9a88c
--- /dev/null
+++ b/common/ansible/tests/unit/v2/test-file-contents
@@ -0,0 +1 @@
+This space intentionally left blank
diff --git a/common/ansible/tests/unit/v2/test-file-contents.b64 b/common/ansible/tests/unit/v2/test-file-contents.b64
new file mode 100644
index 00000000..da896ba7
--- /dev/null
+++ b/common/ansible/tests/unit/v2/test-file-contents.b64
@@ -0,0 +1 @@
+VGhpcyBzcGFjZSBpbnRlbnRpb25hbGx5IGxlZnQgYmxhbmsK
\ No newline at end of file
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-base-k8s-backend.yaml b/common/ansible/tests/unit/v2/values-secret-v2-base-k8s-backend.yaml
new file mode 100644
index 00000000..7194ebc3
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-base-k8s-backend.yaml
@@ -0,0 +1,9 @@
+version: "2.0"
+
+backingStore: kubernetes
+
+secrets:
+ - name: config-demo
+ fields:
+ - name: secret
+ value: secret
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-base-none-backend.yaml b/common/ansible/tests/unit/v2/values-secret-v2-base-none-backend.yaml
new file mode 100644
index 00000000..4e1e3cd2
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-base-none-backend.yaml
@@ -0,0 +1,11 @@
+version: "2.0"
+
+backingStore: none
+
+secrets:
+ - name: config-demo
+ targetNamespaces:
+ - default
+ fields:
+ - name: secret
+ value: secret
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-base-unknown-backend.yaml b/common/ansible/tests/unit/v2/values-secret-v2-base-unknown-backend.yaml
new file mode 100644
index 00000000..e1f4c6d5
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-base-unknown-backend.yaml
@@ -0,0 +1,9 @@
+version: "2.0"
+
+backingStore: unknown
+
+secrets:
+ - name: config-demo
+ fields:
+ - name: secret
+ value: secret
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-base.yaml b/common/ansible/tests/unit/v2/values-secret-v2-base.yaml
new file mode 100644
index 00000000..bf9670d8
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-base.yaml
@@ -0,0 +1,38 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secret2
+ value: null
+ onMissingValue: prompt
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error
+ - name: ca_crt2
+ path: null
+ base64: true
+ onMissingValue: prompt
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-block-yamlstring.yaml b/common/ansible/tests/unit/v2/values-secret-v2-block-yamlstring.yaml
new file mode 100644
index 00000000..84165f69
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-block-yamlstring.yaml
@@ -0,0 +1,16 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ fields:
+ - name: sshprivkey
+ onMissingValue: error
+ value: |-
+ ssh-rsa oNb/kAvwdQl+FKdwzzKo5rnGIB68UOxWoaKPnKdgF/ts67CDBslWGnpUZCpp8TdaxfHmpoyA6nutMwQw8OAMEUybxvilDn+ZVJ/5qgfRBdi8wLKRLTIj0v+ZW7erN9yuZG53xUQAaQjivM3cRyNLIZ9torShYaYwD1UTTDkV97RMfNDlWI5f5FGRvfy429ZfCwbUWUbijrcv/mWc/uO3x/+MBXwa4f8ubzEYlrt4yH/Vbpzs67kE9UJ9z1zurFUFJydy1ZDAdKSiBS91ImI3ccKnbz0lji2bgSYR0Wp1IQhzSpjyJU2rIu9HAEUh85Rwf2jakfLpMcg/hSBer3sG kilroy@example.com
+ - name: sshpubkey
+ onMissingValue: error
+ value: |-
+ -----BEGIN OPENSSH PRIVATE KEY-----
+ TtzxGgWrNerAr1hzUqPW2xphF/Aur1rQXSLv4J7frEJxNED6u/eScsNgwJMGXwRx7QYVohh0ARHVhJdUzJK7pEIphi4BGw==
+ wlo+oQsi828b47SKZB8/K9dbeLlLiXh9/hu47MGpeGHZsKbjAdauncuw+YUDDN2EADJjasNMZHjxYhXKtqDjXTIw1X1n0Q==
+ -----END OPENSSH PRIVATE KEY-----
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-default-annotations.yaml b/common/ansible/tests/unit/v2/values-secret-v2-default-annotations.yaml
new file mode 100644
index 00000000..af3e2f9b
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-default-annotations.yaml
@@ -0,0 +1,13 @@
+---
+version: "2.0"
+
+annotations:
+ test-annotation: 42
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ value: user
+ - name: password
+ value: testpass
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-default-labels.yaml b/common/ansible/tests/unit/v2/values-secret-v2-default-labels.yaml
new file mode 100644
index 00000000..56af6586
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-default-labels.yaml
@@ -0,0 +1,11 @@
+---
+version: "2.0"
+
+defaultLabels:
+ testlabel: 4
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-default-namespace.yaml b/common/ansible/tests/unit/v2/values-secret-v2-default-namespace.yaml
new file mode 100644
index 00000000..a0f4db63
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-default-namespace.yaml
@@ -0,0 +1,8 @@
+---
+version: "2.0"
+
+secrets:
+ test-secret:
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-defaultvp-policy.yaml b/common/ansible/tests/unit/v2/values-secret-v2-defaultvp-policy.yaml
new file mode 100644
index 00000000..e284d300
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-defaultvp-policy.yaml
@@ -0,0 +1,25 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secret2
+ onMissingValue: generate
+ override: true
+ vaultPolicy: validatedPatternDefaultPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-emptyvaultprefix.yaml b/common/ansible/tests/unit/v2/values-secret-v2-emptyvaultprefix.yaml
new file mode 100644
index 00000000..df1d420a
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-emptyvaultprefix.yaml
@@ -0,0 +1,9 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ fields:
+ - name: secret
+ value: value123
+ onMissingValue: error
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-file-contents-b64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-file-contents-b64.yaml
new file mode 100644
index 00000000..47ed7219
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-file-contents-b64.yaml
@@ -0,0 +1,9 @@
+---
+version: "2.0"
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ path: ~/test-file-contents
+ base64: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-file-contents-double-b64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-file-contents-double-b64.yaml
new file mode 100644
index 00000000..3a968eca
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-file-contents-double-b64.yaml
@@ -0,0 +1,9 @@
+---
+version: "2.0"
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ path: ~/test-file-contents.b64
+ base64: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-file-contents.yaml b/common/ansible/tests/unit/v2/values-secret-v2-file-contents.yaml
new file mode 100644
index 00000000..e2da90c2
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-file-contents.yaml
@@ -0,0 +1,8 @@
+---
+version: "2.0"
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ path: ~/test-file-contents
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-files-emptypath.yaml b/common/ansible/tests/unit/v2/values-secret-v2-files-emptypath.yaml
new file mode 100644
index 00000000..9c1142aa
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-files-emptypath.yaml
@@ -0,0 +1,25 @@
+version: "2.0"
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: ca_crt
+ onMissingValue: error
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-files-wrong-onmissingvalue.yaml b/common/ansible/tests/unit/v2/values-secret-v2-files-wrong-onmissingvalue.yaml
new file mode 100644
index 00000000..36b0e715
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-files-wrong-onmissingvalue.yaml
@@ -0,0 +1,26 @@
+version: "2.0"
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: generate
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-files-wrongpath.yaml b/common/ansible/tests/unit/v2/values-secret-v2-files-wrongpath.yaml
new file mode 100644
index 00000000..35e5cfcf
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-files-wrongpath.yaml
@@ -0,0 +1,26 @@
+version: "2.0"
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: ca_crt
+ path: /tmp/nonexisting
+ onMissingValue: error
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-generate-base64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-generate-base64.yaml
new file mode 100644
index 00000000..eed8b402
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-generate-base64.yaml
@@ -0,0 +1,21 @@
+version: "2.0"
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ base64: true
+ override: true
+ vaultPolicy: basicPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-generic-onlygenerate.yaml b/common/ansible/tests/unit/v2/values-secret-v2-generic-onlygenerate.yaml
new file mode 100644
index 00000000..46992af1
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-generic-onlygenerate.yaml
@@ -0,0 +1,33 @@
+version: "2.0"
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ targetNamespaces:
+ - default
+ vaultMount: foo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secret2
+ onMissingValue: generate
+ override: true
+ vaultPolicy: advancedPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-ini-file-b64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-ini-file-b64.yaml
new file mode 100644
index 00000000..ff08d20a
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-ini-file-b64.yaml
@@ -0,0 +1,23 @@
+version: "2.0"
+secrets:
+ - name: aws
+ fields:
+ - name: aws_access_key_id
+ ini_file: '~/aws-example.ini'
+ ini_section: default
+ ini_key: aws_access_key_id
+ - name: aws_secret_access_key
+ ini_file: '~/aws-example.ini'
+ ini_key: aws_secret_access_key
+ - name: awsb64
+ fields:
+ - name: aws_access_key_id
+ ini_file: '~/aws-example.ini'
+ ini_section: default
+ ini_key: aws_access_key_id
+ base64: true
+ - name: aws_secret_access_key
+ ini_file: '~/aws-example.ini'
+ ini_section: default
+ ini_key: aws_secret_access_key
+ base64: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-ini-file.yaml b/common/ansible/tests/unit/v2/values-secret-v2-ini-file.yaml
new file mode 100644
index 00000000..c69a1429
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-ini-file.yaml
@@ -0,0 +1,21 @@
+version: "2.0"
+secrets:
+ - name: aws
+ fields:
+ - name: aws_access_key_id
+ ini_file: /tmp/awscredentials
+ ini_section: default
+ ini_key: aws_access_key_id
+ - name: aws_secret_access_key
+ ini_file: /tmp/awscredentials
+ ini_key: aws_secret_access_key
+ - name: awsfoobar
+ fields:
+ - name: aws_access_key_id
+ ini_file: /tmp/awscredentials
+ ini_section: foobar
+ ini_key: aws_access_key_id
+ - name: aws_secret_access_key
+ ini_file: /tmp/awscredentials
+ ini_section: foobar
+ ini_key: aws_secret_access_key
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-more-namespaces.yaml b/common/ansible/tests/unit/v2/values-secret-v2-more-namespaces.yaml
new file mode 100644
index 00000000..be409af7
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-more-namespaces.yaml
@@ -0,0 +1,11 @@
+---
+version: "2.0"
+
+secrets:
+ - name: test-secret
+ targetNamespaces:
+ - default
+ - extra
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-nondefault-namespace.yaml b/common/ansible/tests/unit/v2/values-secret-v2-nondefault-namespace.yaml
new file mode 100644
index 00000000..a0f4db63
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-nondefault-namespace.yaml
@@ -0,0 +1,8 @@
+---
+version: "2.0"
+
+secrets:
+ test-secret:
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-none-no-targetnamespaces.yaml b/common/ansible/tests/unit/v2/values-secret-v2-none-no-targetnamespaces.yaml
new file mode 100644
index 00000000..2a5ef0b6
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-none-no-targetnamespaces.yaml
@@ -0,0 +1,33 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultMount: foo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secret2
+ onMissingValue: generate
+ override: true
+ vaultPolicy: advancedPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-nonexisting-backingstore.yaml b/common/ansible/tests/unit/v2/values-secret-v2-nonexisting-backingstore.yaml
new file mode 100644
index 00000000..906e3167
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-nonexisting-backingstore.yaml
@@ -0,0 +1,23 @@
+version: "2.0"
+
+backingStore: nonexisting
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ vaultPolicy: basicPolicy
+ - name: secret2
+ value: null
+ onMissingValue: prompt
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error
+ - name: ca_crt2
+ path: null
+ base64: true
+ onMissingValue: prompt
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-nopolicies.yaml b/common/ansible/tests/unit/v2/values-secret-v2-nopolicies.yaml
new file mode 100644
index 00000000..3b465700
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-nopolicies.yaml
@@ -0,0 +1,24 @@
+version: "2.0"
+
+backingStore: vault
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ value: value123
+ onMissingValue: error
+
+ - name: config-demo-file
+ vaultPrefixes:
+ - secret/region-two
+ - secret/snowflake.blueprints.rhecoeng.com
+
+ fields:
+ - name: ca_crt
+ path: /tmp/ca.crt
+ base64: true
+ onMissingValue: error
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-novaultprefix.yaml b/common/ansible/tests/unit/v2/values-secret-v2-novaultprefix.yaml
new file mode 100644
index 00000000..92449dae
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-novaultprefix.yaml
@@ -0,0 +1,8 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ fields:
+ - name: secret
+ value: value123
+ onMissingValue: error
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-onlygenerate.yaml b/common/ansible/tests/unit/v2/values-secret-v2-onlygenerate.yaml
new file mode 100644
index 00000000..2a5ef0b6
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-onlygenerate.yaml
@@ -0,0 +1,33 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultMount: foo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secret2
+ onMissingValue: generate
+ override: true
+ vaultPolicy: advancedPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-override-labels.yaml b/common/ansible/tests/unit/v2/values-secret-v2-override-labels.yaml
new file mode 100644
index 00000000..13a460be
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-override-labels.yaml
@@ -0,0 +1,13 @@
+---
+version: "2.0"
+
+defaultLabels:
+ testlabel: 4
+
+secrets:
+ - name: test-secret
+ labels:
+ overridelabel: 42
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-override-namespace.yaml b/common/ansible/tests/unit/v2/values-secret-v2-override-namespace.yaml
new file mode 100644
index 00000000..ad53cf77
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-override-namespace.yaml
@@ -0,0 +1,10 @@
+---
+version: "2.0"
+
+secretStoreNamespace: 'overridden-namespace'
+
+secrets:
+ - name: test-secret
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-override-type-none.yaml b/common/ansible/tests/unit/v2/values-secret-v2-override-type-none.yaml
new file mode 100644
index 00000000..1d110671
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-override-type-none.yaml
@@ -0,0 +1,14 @@
+---
+version: "2.0"
+
+# This is the actual default
+defaultNamespace: 'validated-patterns-secrets'
+
+secrets:
+ - name: test-secret
+ type: 'user-specified'
+ targetNamespaces:
+ - default
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-override-type.yaml b/common/ansible/tests/unit/v2/values-secret-v2-override-type.yaml
new file mode 100644
index 00000000..1bf8e369
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-override-type.yaml
@@ -0,0 +1,12 @@
+---
+version: "2.0"
+
+# This is the actual default
+defaultNamespace: 'validated-patterns-secrets'
+
+secrets:
+ - name: test-secret
+ type: 'user-specified'
+ fields:
+ - name: username
+ value: user
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-same-field-names.yaml b/common/ansible/tests/unit/v2/values-secret-v2-same-field-names.yaml
new file mode 100644
index 00000000..4845e269
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-same-field-names.yaml
@@ -0,0 +1,14 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ value: foo
+ onMissingValue: error
+ - name: secret
+ value: bar
+ onMissingValue: prompt
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-same-secret-names.yaml b/common/ansible/tests/unit/v2/values-secret-v2-same-secret-names.yaml
new file mode 100644
index 00000000..3e17e536
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-same-secret-names.yaml
@@ -0,0 +1,20 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ value: foo
+ onMissingValue: error
+
+ - name: config-demo
+ vaultPrefixes:
+ - region-two
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret2
+ value: bar
+ onMissingValue: prompt
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-secret-base64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-secret-base64.yaml
new file mode 100644
index 00000000..b361b34d
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-secret-base64.yaml
@@ -0,0 +1,11 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - test
+ fields:
+ - name: secret
+ value: foo
+ onMissingValue: error
+ base64: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-secret-binary-b64.yaml b/common/ansible/tests/unit/v2/values-secret-v2-secret-binary-b64.yaml
new file mode 100644
index 00000000..579c7d6e
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-secret-binary-b64.yaml
@@ -0,0 +1,10 @@
+version: "2.0"
+
+secrets:
+ - name: secret
+ fields:
+ - name: secret
+ # Should contain 8, 6, 7, 5, 3, 0, 9 in binary
+ path: '/tmp/testbinfile.bin'
+ onMissingValue: error
+ base64: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-test-override.yaml b/common/ansible/tests/unit/v2/values-secret-v2-test-override.yaml
new file mode 100644
index 00000000..8efdd95c
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-test-override.yaml
@@ -0,0 +1,28 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#%^&*" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: false
+ vaultPolicy: basicPolicy
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-wrong-ini-file.yaml b/common/ansible/tests/unit/v2/values-secret-v2-wrong-ini-file.yaml
new file mode 100644
index 00000000..fb9b253c
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-wrong-ini-file.yaml
@@ -0,0 +1,9 @@
+version: "2.0"
+secrets:
+ - name: aws
+ fields:
+ - name: aws_key_id
+ ini_file: ~/.aws/credentials
+ ini_section: default
+ # The below is required
+ # ini_key: aws_access_key_id
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-wrong-onmissingvalue.yaml b/common/ansible/tests/unit/v2/values-secret-v2-wrong-onmissingvalue.yaml
new file mode 100644
index 00000000..2d53807e
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-wrong-onmissingvalue.yaml
@@ -0,0 +1,20 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ vaultPolicy: nonExisting
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-wrong-override.yaml b/common/ansible/tests/unit/v2/values-secret-v2-wrong-override.yaml
new file mode 100644
index 00000000..650e93b5
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-wrong-override.yaml
@@ -0,0 +1,11 @@
+version: "2.0"
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - region-one
+ fields:
+ - name: secret
+ value: null
+ onMissingValue: prompt
+ override: true
diff --git a/common/ansible/tests/unit/v2/values-secret-v2-wrong-vaultpolicy.yaml b/common/ansible/tests/unit/v2/values-secret-v2-wrong-vaultpolicy.yaml
new file mode 100644
index 00000000..2d53807e
--- /dev/null
+++ b/common/ansible/tests/unit/v2/values-secret-v2-wrong-vaultpolicy.yaml
@@ -0,0 +1,20 @@
+version: "2.0"
+
+backingStore: vault
+
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+secrets:
+ - name: config-demo
+ vaultPrefixes:
+ - secret/region-one
+ - secret/snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ vaultPolicy: nonExisting
diff --git a/common/clustergroup/.github/workflows/update-helm-repo.yml b/common/clustergroup/.github/workflows/update-helm-repo.yml
new file mode 100644
index 00000000..fa1d6247
--- /dev/null
+++ b/common/clustergroup/.github/workflows/update-helm-repo.yml
@@ -0,0 +1,30 @@
+# This invokes the workflow named 'publish-charts' in the umbrella repo
+# It expects to have a secret called CHARTS_REPOS_TOKEN which contains
+# the GitHub token that has permissions to invoke workflows and commit code
+# inside the umbrella-repo.
+# The following fine-grained permissions were used in testing and were limited
+# to the umbrella repo only:
+# - Actions: r/w
+# - Commit statuses: r/w
+# - Contents: r/w
+# - Deployments: r/w
+# - Pages: r/w
+#
+
+name: vp-patterns/update-helm-repo
+on:
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+
+jobs:
+ helmlint:
+ uses: validatedpatterns/helm-charts/.github/workflows/helmlint.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions:
+ contents: read
+
+ update-helm-repo:
+ needs: [helmlint]
+ uses: validatedpatterns/helm-charts/.github/workflows/update-helm-repo.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions: read-all
+ secrets: inherit
diff --git a/common/clustergroup/.helmignore b/common/clustergroup/.helmignore
new file mode 100644
index 00000000..b25c15b8
--- /dev/null
+++ b/common/clustergroup/.helmignore
@@ -0,0 +1 @@
+*~
diff --git a/common/clustergroup/Chart.yaml b/common/clustergroup/Chart.yaml
new file mode 100644
index 00000000..345b8175
--- /dev/null
+++ b/common/clustergroup/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart to create per-clustergroup ArgoCD applications and any required namespaces or subscriptions.
+keywords:
+- pattern
+name: clustergroup
+version: 0.8.2
diff --git a/common/clustergroup/templates/_helpers.tpl b/common/clustergroup/templates/_helpers.tpl
new file mode 100644
index 00000000..83b06a04
--- /dev/null
+++ b/common/clustergroup/templates/_helpers.tpl
@@ -0,0 +1,72 @@
+{{/*
+Default always defined top-level variables for helm charts
+*/}}
+{{- define "clustergroup.app.globalvalues.helmparameters" -}}
+- name: global.repoURL
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+- name: global.targetRevision
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+- name: global.namespace
+ value: $ARGOCD_APP_NAMESPACE
+- name: global.pattern
+ value: {{ $.Values.global.pattern }}
+- name: global.clusterDomain
+ value: {{ $.Values.global.clusterDomain }}
+- name: global.clusterVersion
+ value: "{{ $.Values.global.clusterVersion }}"
+- name: global.clusterPlatform
+ value: "{{ $.Values.global.clusterPlatform }}"
+- name: global.hubClusterDomain
+ value: {{ $.Values.global.hubClusterDomain }}
+- name: global.localClusterDomain
+ value: {{ coalesce $.Values.global.localClusterDomain $.Values.global.hubClusterDomain }}
+- name: global.privateRepo
+ value: {{ $.Values.global.privateRepo | quote }}
+{{- end }} {{/* clustergroup.globalvaluesparameters */}}
+
+
+{{/*
+Default always defined valueFiles to be included in Applications
+*/}}
+{{- define "clustergroup.app.globalvalues.valuefiles" -}}
+- "/values-global.yaml"
+- "/values-{{ $.Values.clusterGroup.name }}.yaml"
+{{- if $.Values.global.clusterPlatform }}
+- "/values-{{ $.Values.global.clusterPlatform }}.yaml"
+ {{- if $.Values.global.clusterVersion }}
+- "/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.global.clusterVersion }}.yaml"
+ {{- end }}
+- "/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.clusterGroup.name }}.yaml"
+{{- end }}
+{{- if $.Values.global.clusterVersion }}
+- "/values-{{ $.Values.global.clusterVersion }}-{{ $.Values.clusterGroup.name }}.yaml"
+{{- end }}
+{{- if $.Values.global.extraValueFiles }}
+{{- range $.Values.global.extraValueFiles }}
+- {{ . | quote }}
+{{- end }} {{/* range $.Values.global.extraValueFiles */}}
+{{- end }} {{/* if $.Values.global.extraValueFiles */}}
+{{- end }} {{/* clustergroup.app.globalvalues.valuefiles */}}
+
+{{/*
+Default always defined valueFiles to be included in Applications but with a prefix called $patternref
+*/}}
+{{- define "clustergroup.app.globalvalues.prefixedvaluefiles" -}}
+- "$patternref/values-global.yaml"
+- "$patternref/values-{{ $.Values.clusterGroup.name }}.yaml"
+{{- if $.Values.global.clusterPlatform }}
+- "$patternref/values-{{ $.Values.global.clusterPlatform }}.yaml"
+ {{- if $.Values.global.clusterVersion }}
+- "$patternref/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.global.clusterVersion }}.yaml"
+ {{- end }}
+- "$patternref/values-{{ $.Values.global.clusterPlatform }}-{{ $.Values.clusterGroup.name }}.yaml"
+{{- end }}
+{{- if $.Values.global.clusterVersion }}
+- "$patternref/values-{{ $.Values.global.clusterVersion }}-{{ $.Values.clusterGroup.name }}.yaml"
+{{- end }}
+{{- if $.Values.global.extraValueFiles }}
+{{- range $.Values.global.extraValueFiles }}
+- "$patternref/{{ . }}"
+{{- end }} {{/* range $.Values.global.extraValueFiles */}}
+{{- end }} {{/* if $.Values.global.extraValueFiles */}}
+{{- end }} {{/* clustergroup.app.globalvalues.prefixedvaluefiles */}}
diff --git a/common/clustergroup/templates/core/catalog-sources.yaml b/common/clustergroup/templates/core/catalog-sources.yaml
new file mode 100644
index 00000000..73c2e949
--- /dev/null
+++ b/common/clustergroup/templates/core/catalog-sources.yaml
@@ -0,0 +1,14 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{- range .Values.clusterGroup.indexImages }}
+{{- $name := mustRegexReplaceAll "[^/]*/(.*):.*" .image "${1}" | replace "/" "-" }}
+apiVersion: operators.coreos.com/v1alpha1
+kind: CatalogSource
+metadata:
+ name: {{ coalesce .name $name }}
+ namespace: openshift-marketplace
+spec:
+ sourceType: grpc
+ image: {{ .image }}
+---
+{{- end -}}
+{{- end -}}
diff --git a/common/clustergroup/templates/core/namespaces.yaml b/common/clustergroup/templates/core/namespaces.yaml
new file mode 100644
index 00000000..dfa6ae1a
--- /dev/null
+++ b/common/clustergroup/templates/core/namespaces.yaml
@@ -0,0 +1,32 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{- range $ns := .Values.clusterGroup.namespaces }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ {{- if kindIs "map" $ns }}
+ {{- range $k, $v := $ns }}{{- /* We loop here even though the map has always just one key */}}
+ name: {{ $k }}
+ labels:
+ argocd.argoproj.io/managed-by: {{ $.Values.global.pattern }}-{{ $.Values.clusterGroup.name }}
+ {{- if $v.labels }}
+ {{- range $key, $value := $v.labels }} {{- /* We loop here even though the map has always just one key */}}
+ {{ $key }}: {{ $value | default "" | quote }}
+ {{- end }}
+ {{- end }}
+ {{- if $v.annotations }}
+ annotations:
+ {{- range $key, $value := $v.annotations }} {{- /* We loop through the map to get key/value pairs */}}
+ {{ $key }}: {{ $value | default "" | quote }}
+ {{- end }}
+ {{- end }}{{- /* if $v.annotations */}}
+ {{- end }}{{- /* range $k, $v := $ns */}}
+
+ {{- else if kindIs "string" $ns }}
+ labels:
+ argocd.argoproj.io/managed-by: {{ $.Values.global.pattern }}-{{ $.Values.clusterGroup.name }}
+ name: {{ $ns }}
+ {{- end }} {{- /* if kindIs "string" $ns */}}
+spec:
+---
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/core/operatorgroup.yaml b/common/clustergroup/templates/core/operatorgroup.yaml
new file mode 100644
index 00000000..cd679bd5
--- /dev/null
+++ b/common/clustergroup/templates/core/operatorgroup.yaml
@@ -0,0 +1,38 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{- range $ns := .Values.clusterGroup.namespaces }}
+
+{{- if or (empty $.Values.clusterGroup.operatorgroupExcludes) (not (has . $.Values.clusterGroup.operatorgroupExcludes)) }}
+
+ {{- if kindIs "map" $ns }}
+ {{- range $k, $v := $ns }}{{- /* We loop here even though the map has always just one key */}}
+ {{- if $v.operatorGroup }}{{- /* Checks if the user sets operatorGroup: false */}}
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: {{ $k }}-operator-group
+ namespace: {{ $k }}
+spec:
+ targetNamespaces:
+ {{- if (hasKey $v "targetNamespaces") }}
+ {{- range $v.targetNamespaces }}{{- /* We loop through the list of tergetnamespaces */}}
+ - {{ . }}
+ {{- end }}{{- /* End range targetNamespaces */}}
+ {{- else }}
+ - {{ $k }}
+ {{- end }}{{- /* End of if operatorGroup */}}
+ {{- end }}{{- /* range $k, $v := $ns */}}
+ {{- end }}{{- /* End of if operatorGroup */}}
+ {{- else if kindIs "string" $ns }}
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: {{ . }}-operator-group
+ namespace: {{ . }}
+spec:
+ targetNamespaces:
+ - {{ . }}
+ {{- end }} {{- /* if kindIs "string" $ns */}}
+---
+{{- end }} {{- /* if or (empty $.Values.clusterGroup.operatorgroupExcludes) (not (has . $.Values.clusterGroup.operatorgroupExcludes)) */}}
+{{- end }} {{- /* range $ns := .Values.clusterGroup.namespaces */}}
+{{- end }} {{- /* if not (eq .Values.enabled "plumbing") */}}
diff --git a/common/clustergroup/templates/core/subscriptions.yaml b/common/clustergroup/templates/core/subscriptions.yaml
new file mode 100644
index 00000000..f58f6c28
--- /dev/null
+++ b/common/clustergroup/templates/core/subscriptions.yaml
@@ -0,0 +1,73 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{- range .Values.clusterGroup.subscriptions }}
+{{- $subs := . }}
+{{- $installPlanValue := .installPlanApproval }}
+
+{{- if $subs.namespaces }}
+{{- if not $subs.disabled }}
+{{- range .namespaces }}
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: {{ $subs.name }}
+ namespace: {{ . }}
+spec:
+ name: {{ $subs.name }}
+ source: {{ default "redhat-operators" $subs.source }}
+ sourceNamespace: {{ default "openshift-marketplace" $subs.sourceNamespace }}
+ {{- if $subs.channel }}
+ channel: {{ $subs.channel }}
+ {{- end }}
+ installPlanApproval: {{ coalesce $installPlanValue $.Values.global.options.installPlanApproval }}
+ {{- if $subs.config }}
+ {{- if $subs.config.env }}
+ config:
+ env:
+ {{- range $subs.config.env }}
+ - name: {{ .name }}
+ value: {{ .value }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if $.Values.global.options.useCSV }}
+ startingCSV: {{ $subs.csv }}
+ {{- else if $subs.csv }}
+ startingCSV: {{ $subs.csv }}
+ {{- end }}
+---
+{{- end }}
+{{- end }}
+{{- else if not $subs.disabled }}
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: {{ $subs.name }}
+ namespace: {{ default "openshift-operators" $subs.namespace }}
+spec:
+ name: {{ $subs.name }}
+ source: {{ default "redhat-operators" $subs.source }}
+ sourceNamespace: {{ default "openshift-marketplace" $subs.sourceNamespace }}
+ {{- if $subs.channel }}
+ channel: {{ $subs.channel }}
+ {{- end }}
+ installPlanApproval: {{ coalesce $installPlanValue $.Values.global.options.installPlanApproval }}
+ {{- if $subs.config }}
+ {{- if $subs.config.env }}
+ config:
+ env:
+ {{- range $subs.config.env }}
+ - name: {{ .name }}
+ value: {{ .value }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if $.Values.global.options.useCSV }}
+ startingCSV: {{ $subs.csv }}
+ {{- else if $subs.csv }}
+ startingCSV: {{ $subs.csv }}
+ {{- end }}
+---
+{{- end }}
+{{- end }}
+---
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/_helpers.tpl b/common/clustergroup/templates/imperative/_helpers.tpl
new file mode 100644
index 00000000..f75e781e
--- /dev/null
+++ b/common/clustergroup/templates/imperative/_helpers.tpl
@@ -0,0 +1,65 @@
+# Pseudo-code
+# 1. Get the pattern's CR
+# 2. If there is a secret called vp-private-repo-credentials in the current namespace, fetch it
+# 3. If it is an http secret, generate the correct URL
+# 4. If it is an ssh secret, create the private ssh key and make sure the git clone works
+
+{{/* git-init InitContainer */}}
+{{- define "imperative.initcontainers.gitinit" }}
+- name: git-init
+ image: {{ $.Values.clusterGroup.imperative.image }}
+ imagePullPolicy: {{ $.Values.clusterGroup.imperative.imagePullPolicy }}
+ env:
+ - name: HOME
+ value: /git/home
+ command:
+ - 'sh'
+ - '-c'
+ - >-
+ if ! oc get secrets -n openshift-gitops vp-private-repo-credentials &> /dev/null; then
+ URL="{{ $.Values.global.repoURL }}";
+ else
+ if ! oc get secrets -n openshift-gitops vp-private-repo-credentials -o go-template='{{ `{{index .data.sshPrivateKey | base64decode}}` }}' &>/dev/null; then
+ U="$(oc get secret -n openshift-gitops vp-private-repo-credentials -o go-template='{{ `{{index .data.username | base64decode }}` }}')";
+ P="$(oc get secret -n openshift-gitops vp-private-repo-credentials -o go-template='{{ `{{index .data.password | base64decode }}` }}')";
+ URL=$(echo {{ $.Values.global.repoURL }} | sed -E "s/(https?:\/\/)/\1${U}:${P}@/");
+ echo "USER/PASS: ${URL}";
+ else
+ S="$(oc get secret -n openshift-gitops vp-private-repo-credentials -o go-template='{{ `{{index .data.sshPrivateKey | base64decode }}` }}')";
+ mkdir -p --mode 0700 "${HOME}/.ssh";
+ echo "${S}" > "${HOME}/.ssh/id_rsa";
+ chmod 0600 "${HOME}/.ssh/id_rsa";
+ URL=$(echo {{ $.Values.global.repoURL }} | sed -E "s/(https?:\/\/)/\1git@/");
+ git config --global core.sshCommand "ssh -i "${HOME}/.ssh/id_rsa" -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no";
+ echo "SSH: ${URL}";
+ fi;
+ fi;
+ mkdir /git/{repo,home};
+ git clone --single-branch --branch {{ $.Values.global.targetRevision }} --depth 1 -- "${URL}" /git/repo;
+ chmod 0770 /git/{repo,home};
+ volumeMounts:
+ - name: git
+ mountPath: "/git"
+{{- end }}
+
+{{/* Final done container */}}
+{{- define "imperative.containers.done" }}
+- name: "done"
+ image: {{ $.Values.clusterGroup.imperative.image }}
+ imagePullPolicy: {{ $.Values.clusterGroup.imperative.imagePullPolicy }}
+ command:
+ - 'sh'
+ - '-c'
+ - 'echo'
+ - 'done'
+ - '\n'
+{{- end }}
+
+{{/* volume-mounts for all containers */}}
+{{- define "imperative.volumemounts" }}
+- name: git
+ mountPath: "/git"
+- name: values-volume
+ mountPath: /values/values.yaml
+ subPath: values.yaml
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/clusterrole.yaml b/common/clustergroup/templates/imperative/clusterrole.yaml
new file mode 100644
index 00000000..e3646917
--- /dev/null
+++ b/common/clustergroup/templates/imperative/clusterrole.yaml
@@ -0,0 +1,21 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.clusterRoleName }}
+rules:
+{{- if $.Values.clusterGroup.imperative.clusterRoleYaml -}}
+ {{ toYaml $.Values.clusterGroup.imperative.clusterRoleYaml | nindent 2 }}
+{{- else }}
+ - apiGroups:
+ - '*'
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/configmap.yaml b/common/clustergroup/templates/imperative/configmap.yaml
new file mode 100644
index 00000000..8ca5a176
--- /dev/null
+++ b/common/clustergroup/templates/imperative/configmap.yaml
@@ -0,0 +1,12 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+{{- $valuesyaml := toYaml $.Values -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.valuesConfigMap }}-{{ $.Values.clusterGroup.name }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace}}
+data:
+ values.yaml: |
+{{ tpl $valuesyaml . | indent 4 }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/job.yaml b/common/clustergroup/templates/imperative/job.yaml
new file mode 100644
index 00000000..cb092649
--- /dev/null
+++ b/common/clustergroup/templates/imperative/job.yaml
@@ -0,0 +1,69 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* Define this if needed (jobs defined */}}
+{{- if (and $.Values.clusterGroup.imperative (gt (len $.Values.clusterGroup.imperative.jobs) 0)) -}}
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.cronJobName }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace}}
+spec:
+ schedule: {{ $.Values.clusterGroup.imperative.schedule | quote }}
+ # if previous Job is still running, skip execution of a new Job
+ concurrencyPolicy: Forbid
+ jobTemplate:
+ spec:
+ activeDeadlineSeconds: {{ $.Values.clusterGroup.imperative.activeDeadlineSeconds }}
+ template:
+ metadata:
+ name: {{ $.Values.clusterGroup.imperative.jobName }}
+ spec:
+ serviceAccountName: {{ $.Values.clusterGroup.imperative.serviceAccountName }}
+ initContainers:
+ # git init happens in /git/repo so that we can set the folder to 0770 permissions
+ # reason for that is ansible refuses to create temporary folders in there
+ {{- include "imperative.initcontainers.gitinit" . | indent 12 }}
+ {{- range $.Values.clusterGroup.imperative.jobs }}
+ {{- if ne (.disabled | default "false" | toString | lower ) "true" }}
+ - name: {{ .name }}
+ image: {{ .image | default $.Values.clusterGroup.imperative.image }}
+ imagePullPolicy: {{ $.Values.clusterGroup.imperative.imagePullPolicy }}
+ env:
+ - name: HOME
+ value: /git/home
+ workingDir: /git/repo
+ # We have a default timeout of 600s for each playbook. Can be overridden
+ # on a per-job basis
+ command:
+ - timeout
+ - {{ .timeout | default "600" | quote }}
+ - ansible-playbook
+ {{- if .verbosity }}
+ - {{ .verbosity }}
+ {{- end }}
+ {{- if .tags }}
+ - -t
+ - {{ .tags }}
+ {{- end }}
+ - -e
+ - "@/values/values.yaml"
+ {{- range .extravars }}
+ - -e
+ - {{ . | quote }}
+ {{- end }}
+ - {{ .playbook }}
+ volumeMounts:
+ {{- include "imperative.volumemounts" . | indent 16 }}
+ {{- end }}
+ {{- end }}
+ containers:
+ {{- include "imperative.containers.done" . | indent 12 }}
+ volumes:
+ - name: git
+ emptyDir: {}
+ - name: values-volume
+ configMap:
+ name: {{ $.Values.clusterGroup.imperative.valuesConfigMap }}-{{ $.Values.clusterGroup.name }}
+ restartPolicy: Never
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/namespace.yaml b/common/clustergroup/templates/imperative/namespace.yaml
new file mode 100644
index 00000000..ee7b8adb
--- /dev/null
+++ b/common/clustergroup/templates/imperative/namespace.yaml
@@ -0,0 +1,10 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ name: {{ $.Values.clusterGroup.imperative.namespace }}
+ argocd.argoproj.io/managed-by: {{ $.Values.global.pattern }}-{{ $.Values.clusterGroup.name }}
+ name: {{ $.Values.clusterGroup.imperative.namespace }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/rbac.yaml b/common/clustergroup/templates/imperative/rbac.yaml
new file mode 100644
index 00000000..1a4b3e2b
--- /dev/null
+++ b/common/clustergroup/templates/imperative/rbac.yaml
@@ -0,0 +1,30 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.namespace }}-cluster-admin-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ $.Values.clusterGroup.imperative.clusterRoleName }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ $.Values.clusterGroup.imperative.serviceAccountName }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.namespace }}-admin-rolebinding
+ namespace: {{ $.Values.clusterGroup.imperative.namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ $.Values.clusterGroup.imperative.roleName }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ $.Values.clusterGroup.imperative.serviceAccountName }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/role.yaml b/common/clustergroup/templates/imperative/role.yaml
new file mode 100644
index 00000000..63ad37d1
--- /dev/null
+++ b/common/clustergroup/templates/imperative/role.yaml
@@ -0,0 +1,20 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.roleName }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace }}
+rules:
+{{- if $.Values.clusterGroup.imperative.roleYaml -}}
+ {{ toYaml $.Values.clusterGroup.imperative.roleYaml | nindent 2 }}
+{{- else }}
+ - apiGroups:
+ - '*'
+ resources:
+ - '*'
+ verbs:
+ - '*'
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/serviceaccount.yaml b/common/clustergroup/templates/imperative/serviceaccount.yaml
new file mode 100644
index 00000000..ac051348
--- /dev/null
+++ b/common/clustergroup/templates/imperative/serviceaccount.yaml
@@ -0,0 +1,10 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+{{/* This is always defined as we always unseal the cluster with an imperative job */}}
+{{- if $.Values.clusterGroup.imperative.serviceAccountCreate -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ $.Values.clusterGroup.imperative.serviceAccountName }}
+ namespace: {{ $.Values.clusterGroup.imperative.namespace }}
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/imperative/unsealjob.yaml b/common/clustergroup/templates/imperative/unsealjob.yaml
new file mode 100644
index 00000000..4db14be3
--- /dev/null
+++ b/common/clustergroup/templates/imperative/unsealjob.yaml
@@ -0,0 +1,60 @@
+{{- if eq .Values.global.secretStore.backend "vault" | default "vault" }}
+{{- if not (eq .Values.enabled "plumbing") }}
+{{- if $.Values.clusterGroup.isHubCluster }}
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: unsealvault-cronjob
+ namespace: {{ $.Values.clusterGroup.imperative.namespace}}
+spec:
+ schedule: {{ $.Values.clusterGroup.imperative.insecureUnsealVaultInsideClusterSchedule | quote }}
+ # if previous Job is still running, skip execution of a new Job
+ concurrencyPolicy: Forbid
+ jobTemplate:
+ spec:
+ activeDeadlineSeconds: {{ $.Values.clusterGroup.imperative.activeDeadlineSeconds }}
+ template:
+ metadata:
+ name: unsealvault-job
+ spec:
+ serviceAccountName: {{ $.Values.clusterGroup.imperative.serviceAccountName }}
+ initContainers:
+ # git init happens in /git/repo so that we can set the folder to 0770 permissions
+ # reason for that is ansible refuses to create temporary folders in there
+ {{- include "imperative.initcontainers.gitinit" . | indent 12 }}
+ - name: unseal-playbook
+ image: {{ $.Values.clusterGroup.imperative.image }}
+ imagePullPolicy: {{ $.Values.clusterGroup.imperative.imagePullPolicy }}
+ env:
+ - name: HOME
+ value: /git/home
+ workingDir: /git/repo
+ # We have a default timeout of 600s for each playbook. Can be overridden
+ # on a per-job basis
+ command:
+ - timeout
+ - {{ .timeout | default "600" | quote }}
+ - ansible-playbook
+ {{- if $.Values.clusterGroup.imperative.verbosity }}
+ - {{ $.Values.clusterGroup.imperative.verbosity }}
+ {{- end }}
+ - -e
+ - "@/values/values.yaml"
+ - -t
+ - 'vault_init,vault_unseal,vault_secrets_init,vault_spokes_init'
+ - "common/ansible/playbooks/vault/vault.yaml"
+ volumeMounts:
+ {{- include "imperative.volumemounts" . | indent 16 }}
+ containers:
+ {{- include "imperative.containers.done" . | indent 12 }}
+ volumes:
+ - name: git
+ emptyDir: {}
+ - name: values-volume
+ configMap:
+ name: {{ $.Values.clusterGroup.imperative.valuesConfigMap }}-{{ $.Values.clusterGroup.name }}
+ restartPolicy: Never
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/applications.yaml b/common/clustergroup/templates/plumbing/applications.yaml
new file mode 100644
index 00000000..3706d839
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/applications.yaml
@@ -0,0 +1,286 @@
+{{- if not (eq .Values.enabled "core") }}
+{{- $namespace := print $.Values.global.pattern "-" $.Values.clusterGroup.name }}
+{{- if (eq .Values.enabled "plumbing") }}
+{{- $namespace = "openshift-gitops" }}
+{{- end }}
+{{- range .Values.clusterGroup.applications }}
+{{- if .disabled }} {{- /* This allows us to null out an Application entry by specifying disabled: true in an override file */}}
+{{- else if or (.generators) (.generatorFile) (.useGeneratorValues) (.destinationServer) (.destinationNamespace) }}
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+metadata:
+ name: {{ .name }}
+ namespace: {{ $namespace }}
+ labels:
+ app: {{ .name }}
+spec:
+ {{- if .generators }}
+ generators: {{ .generators | toPrettyJson }}
+ {{- else }}
+ generators:
+ - git:
+ repoURL: {{ $.Values.global.repoURL }}
+ revision: {{ $.Values.global.targetRevision }}
+ {{- if .generatorFile }}
+ files:
+ - path: {{ .generatorFile | quote }}
+ {{- end }}
+ {{- end }}
+ template:
+ metadata:
+ name: {{ coalesce .namespace $namespace }}
+ spec:
+ project: {{ .project }}
+ {{- if .syncPolicy }}
+ syncPolicy: {{ .syncPolicy | toPrettyJson }}
+ {{- else }}
+ syncPolicy:
+ automated: {}
+ retry:
+ limit: {{ default 20 $.Values.global.options.applicationRetryLimit }}
+ {{- end }}
+ {{- if .ignoreDifferences }}
+ ignoreDifferences: {{ .ignoreDifferences | toPrettyJson }}
+ {{- end }}
+ source:
+ repoURL: {{ coalesce .repoURL $.Values.global.repoURL }}
+ targetRevision: {{ coalesce .targetRevision $.Values.global.targetRevision }}
+ {{- if .chart }}
+ chart: {{ .chart }}
+ {{- end }}
+ {{- if .path }}
+ path: {{ .path }}
+ {{- end }}
+ {{- if .plugin }}
+ plugin: {{ .plugin | toPrettyJson }}
+ {{- end }}
+ {{- if not .kustomize }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ {{- include "clustergroup.app.globalvalues.valuefiles" $ | nindent 12 }}
+ {{- range $valueFile := $.Values.clusterGroup.sharedValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ {{- range $valueFile := .extraValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ {{- if .useGeneratorValues }}
+ values: |-
+ {{ `{{ values }}` }}
+ {{- end }}
+ parameters:
+ {{- include "clustergroup.app.globalvalues.helmparameters" $ | nindent 12 }}
+ - name: global.repoURL
+ value: {{ $.Values.global.repoURL }}
+ - name: global.targetRevision
+ value: {{ $.Values.global.targetRevision }}
+ - name: global.namespace
+ value: {{ $.Values.global.namespace }}
+ - name: clusterGroup.name
+ value: {{ .Values.clusterGroup.name }}
+ {{- range .extraHubClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.hubClusterDomain }}
+ {{- end }}
+ {{- range .extraLocalClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.localClusterDomain }}
+ {{- end }}
+ {{- range .extraRepoURLFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.repoURL }}
+ {{- end }}
+ {{- range .extraTargetRevisionFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.targetRevision }}
+ {{- end }}
+ {{- range .extraNamespaceFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.namespace }}
+ {{- end }}
+ {{- range .extraPatternNameFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.pattern }}
+ {{- end }}
+ {{- range $k, $v := $.Values.extraParametersNested }}
+ - name: {{ $k }}
+ value: {{ $v }}
+ {{- end }}
+ {{- range .overrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- if .forceString }}
+ forceString: true
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ destination:
+ server: {{ coalesce .destinationServer "https://kubernetes.default.svc" }}
+ namespace: {{ coalesce .destinationNamespace .namespace $namespace }}
+{{- else }}
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: {{ .name }}
+ namespace: {{ $namespace }}
+ labels:
+ validatedpatterns.io/pattern: {{ $.Values.global.pattern }}
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io/foreground
+spec:
+ destination:
+ name: {{ $.Values.clusterGroup.targetCluster }}
+ namespace: {{ default $namespace .namespace }}
+ project: {{ .project }}
+ {{- if and .chart .chartVersion }} {{- /* if .chartVersion is set *and* .repoURL is undefined we assume this is a multisource app */}}
+ sources:
+ - repoURL: {{ coalesce .repoURL $.Values.global.repoURL }}
+ {{- /* We do not allow overriding the values with .targetRevision because when we use .targetRevision in a chart to specify the helm
+ chart, that revision (e.g. 0.0.1) won't exist in the git tree. So here we simply always take the pattern's git branch/commit */}}
+ targetRevision: {{ $.Values.global.targetRevision }}
+ ref: patternref
+ - repoURL: {{ coalesce .repoURL $.Values.global.multiSourceRepoUrl }}
+ chart: {{ .chart }}
+ targetRevision: {{ .chartVersion }}
+ {{- if .plugin }}
+ plugin: {{ .plugin | toPrettyJson }}
+ {{- else }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ {{- include "clustergroup.app.globalvalues.prefixedvaluefiles" $ | nindent 8 }}
+ {{- range $valueFile := $.Values.clusterGroup.sharedValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ {{- range $valueFile := .extraValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ parameters:
+ {{- include "clustergroup.app.globalvalues.helmparameters" $ | nindent 8 }}
+ {{- range .extraHubClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.hubClusterDomain }}
+ {{- end }}
+ {{- range .extraLocalClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.localClusterDomain }}
+ {{- end }}
+ {{- range .extraRepoURLFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+ {{- end }}
+ {{- range .extraTargetRevisionFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+ {{- end }}
+ {{- range .extraNamespaceFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_NAMESPACE
+ {{- end }}
+ {{- range .extraPatternNameFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.pattern }}
+ {{- end }}
+ {{- range $k, $v := $.Values.extraParametersNested }}
+ - name: {{ $k }}
+ value: {{ $v }}
+ {{- end }}
+ {{- range .overrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- if .forceString }}
+ forceString: true
+ {{- end }}
+ {{- end }}{{- /* range .overrides */}}
+ {{- if .fileParameters }}
+ fileParameters:
+ {{- range .fileParameters }}
+ - name: {{ .name }}
+ path: {{ .path }}
+ {{- end }}
+ {{- end }}{{- /* if .fileParameters */}}
+ {{- end }}{{- /* if .plugin */}}
+ {{- else }} {{- /* if .chartVersion */}}
+ source:
+ repoURL: {{ coalesce .repoURL $.Values.global.repoURL }}
+ targetRevision: {{ coalesce .targetRevision $.Values.global.targetRevision }}
+ {{- if .chart }}
+ chart: {{ .chart }}
+ {{- else }}
+ path: {{ .path }}
+ {{- end }}{{- /* if .chart */}}
+ {{- if .plugin }}
+ plugin: {{ .plugin | toPrettyJson }}
+ {{- else if not .kustomize }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ {{- include "clustergroup.app.globalvalues.valuefiles" $ | nindent 6 }}
+ {{- range $valueFile := $.Values.clusterGroup.sharedValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ {{- range $valueFile := .extraValueFiles }}
+ - {{ tpl $valueFile $ | quote }}
+ {{- end }}
+ parameters:
+ {{- include "clustergroup.app.globalvalues.helmparameters" $ | nindent 8 }}
+ {{- range .extraHubClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.hubClusterDomain }}
+ {{- end }}
+ {{- range .extraLocalClusterDomainFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.localClusterDomain }}
+ {{- end }}
+ {{- range .extraRepoURLFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+ {{- end }}
+ {{- range .extraTargetRevisionFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+ {{- end }}
+ {{- range .extraNamespaceFields }}
+ - name: {{ . }}
+ value: $ARGOCD_APP_NAMESPACE
+ {{- end }}
+ {{- range .extraPatternNameFields }}
+ - name: {{ . }}
+ value: {{ $.Values.global.pattern }}
+ {{- end }}
+ {{- range $k, $v := $.Values.extraParametersNested }}
+ - name: {{ $k }}
+ value: {{ $v }}
+ {{- end }}
+ {{- range .overrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- if .forceString }}
+ forceString: true
+ {{- end }}
+ {{- end }}{{- /* range .overrides */}}
+ {{- if .fileParameters }}
+ fileParameters:
+ {{- range .fileParameters }}
+ - name: {{ .name }}
+ path: {{ .path }}
+ {{- end }}{{- /* range .fileParameters */}}
+ {{- end }}{{- /* if .fileParameters */}}
+ {{- end }}{{- /* if .plugin */}}
+ {{- end }}{{- /* if .chartVersion */}}
+ {{- if .ignoreDifferences }}
+ ignoreDifferences: {{ .ignoreDifferences | toPrettyJson }}
+ {{- end }}
+ {{- if .syncPolicy }}
+ syncPolicy: {{ .syncPolicy | toPrettyJson }}
+ {{- else }}
+ syncPolicy:
+ automated: {}
+ retry:
+ limit: {{ default 20 $.Values.global.applicationRetryLimit }}
+ {{- end }}{{- /* .syncPolicy */}}
+---
+{{- end }}{{- /* if or (.generators) (.generatorFile) (.useGeneratorValues) (.destinationServer) (.destinationNamespace) */}}
+{{- end }}{{- /* range .Values.clusterGroup.applications */}}
+{{- end }}{{- /* if not (eq .Values.enabled "core") */}}
diff --git a/common/clustergroup/templates/plumbing/argocd-cmp-plugin-cms.yaml b/common/clustergroup/templates/plumbing/argocd-cmp-plugin-cms.yaml
new file mode 100644
index 00000000..6f86c316
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/argocd-cmp-plugin-cms.yaml
@@ -0,0 +1,12 @@
+{{- range $cmp := $.Values.clusterGroup.argoCD.configManagementPlugins }}
+{{- if $cmp.pluginConfig }}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: "argocd-cmp-{{ $cmp.name }}"
+ namespace: {{ $.Values.global.pattern }}-{{ $.Values.clusterGroup.name }}
+data:
+ "plugin.yaml": | {{ tpl $cmp.pluginConfig $ | nindent 4 }}
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/argocd-super-role.yaml b/common/clustergroup/templates/plumbing/argocd-super-role.yaml
new file mode 100644
index 00000000..2d5f8f76
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/argocd-super-role.yaml
@@ -0,0 +1,43 @@
+{{- if (eq .Values.enabled "all") }}
+# WARNING: ONLY USE THIS FOR MANAGING CLUSTERS NOT FOR REGULAR USERS
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: openshift-gitops-cluster-admin-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: openshift-gitops-argocd-application-controller
+ namespace: openshift-gitops
+ # NOTE: THIS MUST BE FIXED FOR MULTITENANT SETUP
+ - kind: ServiceAccount
+ name: openshift-gitops-argocd-server
+ namespace: openshift-gitops
+---
+# WARNING: ONLY USE THIS FOR MANAGING CLUSTERS NOT FOR REGULAR USERS
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}-cluster-admin-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ # This is the {ArgoCD.name}-argocd-application-controller
+ name: {{ .Values.clusterGroup.name }}-gitops-argocd-application-controller
+ namespace: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}
+ # NOTE: THIS MUST BE FIXED FOR MULTITENANT SETUP
+ - kind: ServiceAccount
+ # This is the {ArgoCD.name}-argocd-server
+ name: {{ .Values.clusterGroup.name }}-gitops-argocd-server
+ namespace: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}
+ # NOTE: This is needed starting with gitops-1.5.0 (see issue common#76)
+ - kind: ServiceAccount
+ name: {{ .Values.clusterGroup.name }}-gitops-argocd-dex-server
+ namespace: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/argocd.yaml b/common/clustergroup/templates/plumbing/argocd.yaml
new file mode 100644
index 00000000..12e362aa
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/argocd.yaml
@@ -0,0 +1,167 @@
+{{- if (eq .Values.enabled "all") }}
+{{- $namespace := print $.Values.global.pattern "-" $.Values.clusterGroup.name }}
+apiVersion: argoproj.io/v1beta1
+kind: ArgoCD
+metadata:
+ finalizers:
+ - argoproj.io/finalizer
+ # Changing the name affects the ClusterRoleBinding, the generated secret,
+ # route URL, and argocd.argoproj.io/managed-by annotations
+ name: {{ .Values.clusterGroup.name }}-gitops
+ namespace: {{ $namespace }}
+ annotations:
+ argocd.argoproj.io/compare-options: IgnoreExtraneous
+spec:
+# Adding health checks to argocd to prevent pvc resources
+# that aren't bound state from blocking deployments
+ resourceHealthChecks:
+ - kind: PersistentVolumeClaim
+ check: |
+ hs = {}
+ if obj.status ~= nil then
+ if obj.status.phase ~= nil then
+ if obj.status.phase == "Pending" then
+ hs.status = "Healthy"
+ hs.message = obj.status.phase
+ return hs
+ elseif obj.status.phase == "Bound" then
+ hs.status = "Healthy"
+ hs.message = obj.status.phase
+ return hs
+ end
+ end
+ end
+ hs.status = "Progressing"
+ hs.message = "Waiting for PVC"
+ return hs
+
+ applicationInstanceLabelKey: argocd.argoproj.io/instance
+ applicationSet:
+ resources:
+ limits:
+ cpu: "2"
+ memory: 1Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+ controller:
+ processors: {}
+ resources:
+ limits:
+ cpu: "4"
+ memory: 4Gi
+ requests:
+ cpu: 500m
+ memory: 2Gi
+ sso:
+ provider: dex
+ dex:
+ openShiftOAuth: true
+ resources:
+ limits:
+ cpu: 500m
+ memory: 256Mi
+ requests:
+ cpu: 250m
+ memory: 128Mi
+ initialSSHKnownHosts: {}
+ rbac:
+ defaultPolicy: role:admin
+ repo:
+{{- if len $.Values.clusterGroup.argoCD.initContainers }}
+ initContainers: {{ $.Values.clusterGroup.argoCD.initContainers | toPrettyJson }}
+{{- end }}
+{{- if len $.Values.clusterGroup.argoCD.configManagementPlugins }}
+ sidecarContainers:
+{{- range $cmp := $.Values.clusterGroup.argoCD.configManagementPlugins }}
+ - name: {{ $cmp.name }}
+ command: [/var/run/argocd/argocd-cmp-server]
+{{- if $cmp.pluginArgs }}
+ args: {{ $cmp.pluginArgs | toPrettyJson }}
+{{- end }}
+ image: {{ $cmp.image }}
+ imagePullPolicy: {{ coalesce $cmp.imagePullPolicy "Always" }}
+ securityContext:
+ runAsNonRoot: true
+ volumeMounts:
+ - mountPath: /var/run/argocd
+ name: var-files
+ - mountPath: /home/argocd/cmp-server/plugins
+ name: plugins
+ - mountPath: /tmp
+ name: cmp-tmp
+{{- if $cmp.pluginConfig }}
+ - mountPath: /home/argocd/cmp-server/config/plugin.yaml
+ subPath: plugin.yaml
+ name: {{ $cmp.name }}
+{{- end }}
+{{- end }}
+{{- end }}
+{{- if len $.Values.clusterGroup.argoCD.configManagementPlugins }}
+ volumes:
+ - emptyDir: {}
+ name: cmp-tmp
+{{- range $cmp := $.Values.clusterGroup.argoCD.configManagementPlugins }}
+ - configMap:
+ name: "argocd-cmp-{{ $cmp.name }}"
+ name: {{ $cmp.name }}
+{{- end }}
+{{- end }}
+ resources:
+ limits:
+ cpu: "1"
+ memory: 512Mi
+ requests:
+ cpu: 250m
+ memory: 256Mi
+ resourceExclusions: |
+ - apiGroups:
+ - tekton.dev
+ kinds:
+ - TaskRun
+ - PipelineRun
+{{- if .Values.global.excludeESO }}
+ - apiGroups:
+ - external-secrets.io
+ kinds:
+ - ExternalSecret
+{{- end }}
+ server:
+ autoscale:
+ enabled: false
+ grpc:
+ ingress:
+ enabled: false
+ ingress:
+ enabled: false
+ resources:
+ limits:
+ cpu: 500m
+ memory: 256Mi
+ requests:
+ cpu: 125m
+ memory: 128Mi
+ route:
+ enabled: true
+ tls:
+ insecureEdgeTerminationPolicy: Redirect
+ termination: reencrypt
+ service:
+ type: ""
+ tls:
+ ca: {}
+status:
+---
+apiVersion: console.openshift.io/v1
+kind: ConsoleLink
+metadata:
+ name: {{ .Values.clusterGroup.name }}-gitops-link
+ namespace: {{ $namespace }}
+spec:
+ applicationMenu:
+ section: OpenShift GitOps
+ imageURL: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQwAAAEMCAYAAAAxjIiTAABtCklEQVR4nOy9B5gkx30f+qvqMHHj5RwA3OGAQwaIQ86JYBJFUgyiRJHm06Msy7QtPkkkre9ZFml9T5ItW6YtySZNijkiA0Q85EM6AAfgIu4Ol/Pepokd6v++qu7Zm9udmZ3QPTML9I/fcHE7O9011VW/+uc/R4QIESLUiYgwIkSIUDciwogQIULdiAgjQoQIdSMijAgRItSNiDAiRIhQNyLCiBAhQt2ICCNChAh1IyKMCBEi1I2IMCJEiFA3IsKIECFC3YgII0KECHUjIowIESLUjYgwIkSIUDciwogQIULdiAgjQoQIdSMijAgRItSNiDAiRIhQNyLCiBAhQt2ICCNChAh1IyKMCBEi1I2IMCJEiFA3IsKIECFC3YgII0KECHUjIowIESLUjYgwIkSIUDciwogQIULdiAgjQoQIdSMijAgRItSNiDAiRIhQNyLCiBAhQt2ICCNChAh1IyKMCBEi1I2IMCJEiFA39E4PIEK4uPduQnzVCDRiIOIQjMDAAJA6LggAo1M/S2AT/1cGOvU7kv8jBsbkdcn7tfw3995jROqCrutgDWZj6XmTLxZhJiJ6iu8y/HDDBswaOBu6yyH3rEtFMIfDYRx6UWeWUdQ1xnXOSbc1YRK0mO5S3AXFGbEYgBgHmRzQAGYAjHk8IWmBbDDmcIIlOCxBKALIOy4VdWIFMGZpGhwXwo05wnE0jbjG4QoHBo/B4QyCGI4sjuPz/UanpypCE4gIYwbiVy8dgx5jSHAd4Jp39MsnKQg3n9uHe986Eou5RpoIAwAGGKPZAJtHDHMBzGHALACDYOgjIA1CEkCcATFf6tT8taFNrBBP+nDlXbyf5BCYJAz5yjJgnAijjGEYwBBAxwCoFyMcJ2LDNuMjNljmxl0566U1aUlC4IqK5OUZNMHw/No0vs6iZdmtiJ7MDMJTb2dgFQVcYSNl6Bgby2lIxOIQop8YLdQJywWjlYyxFYywRJKEJAwAvQBS8AihXXYrt0QmAMYAnARwlED7wPg7JGi3YLSHEzukA2OOqxeEbglT0lA8DodiuOPcmBRw2jTcCPUgehpdigf3ONCzOXW0M9/kQKKgua4+QKDFYOIMRmwNY2wNAWcxYCGAPikpzADblA2gANAIAztAwE4CthBhK4F2c7BDI+gdXkCjwjYNtUiZYMi6PfjQhZGdvpOICKOL8K1rCCv+5zg0JsCtIrJunMMspHXwxZpgaxnDxWA4D4QzAMwH0FOvxEAT/zcJPhlVOsjLf0cVPktlRtAp12YNLy5BwCgDDoNhFwibiOg1AbxlAIfZsMiwOZwcMlEQWXzkgoWNXT1CIIgIo8NY/04WTtZWOjyLWRgb1vV4zJnHGFvNCJcBeB8DzgOwAFC2hmkJopwc5KbncvMyBo0zcM6gaVD/Xfr3xEv9redDUWThf04yA/meFPWTSO1uVxCEfBHBdcn/t/d7+SLh/V052TSgYbieOkMHQXgTjL8gBNsoSOw4kjlwfNnslS6Ts+YCKZ7EunMjI2o7EBFGh3DXGwWktDzcvAOXyNC4NodrdCEB14DhcgCrAWWkrKpeTGxE/zSXm13TGHSNwdA5TIPB1Dl0Xf6OeyShMfV3vJwQGtvI/s1PCRUlEpE/FXkowgAcR8BxBWybYDkCtnrRBNFMJrZpINWYIwC2AdgggGeInDdN2zhRSFpukhKw+lO4Y3FEHGEiIow24tEdeTDHUv/99F6NXbEwNw9g5zGwGwi4lgFrAPTXkiKITkkNmiZJgSMmX6b3U/5b88mBsSobkSprJ0Gg0v3IlzIkSSgCcQSKNqFouSjaApYticUnkSrq0SS4BJxkwGYQnmSMnmYCb26+cPbQeZtHldGHx5K48cyIPIJGRBhtwN07c0gWbMSdHPIsnnTJWa0x3CjAbmHA+QDmVSKJiRPYJwgpNUhSSMQ0xGOa+m/5u5I6MRFUFRYbBICJgDCftCRJeAQiUCy6yBddFCyPVMrVmRokIlWXwwBeg8CjxOkJAtut28U8j/cgbzn44MWDbft+73ZEhBESHt6TBc/YKtrxNV2wtTlawDitA9idDLgOwBIAZqXPlk5ZqVoogojrSMY1xM1TBMHKjI1dzA91ofy7SJVGqi1S+sgVXOSKLoqWUOqNmF76KALYA+AJIjwAwV65/aLBo49uHlVLXaTjuH15rC3f6d2KiDBCwBM7crDzOeRhGRqMFTqx2xjwQTBcDC9o6jSUJIkSSUgJIp3QkfBJQqoYvu3xPYPS93UFKZUll3eQlQRScOA4njEVtSWPYwBeIsHuFZweExb2mZrraskUbj473b4v8i5DRBgB4bHNNohyakZtx4mD03ncxYfA6AMAO9uPjzgNJa/kBEkkdaQTGkxDUzaIctH9vYwSKQifPLJ5F5m8g3zBVcbUaeweOYA2E9jdBHrAFWJr3IxbBEImlsRHz6wo5EWogogwAsBj2/JwrTG4jpEApws46BNgeD+g4iVO83KUpAlJCPEYR48kiaShJImSqvFekiQaRYkYlORhCUUc41lH2T7c2kZTm4BtINxPhF/mdXpzrk2WlUzipkjiqBsRYTSJB3cRYoVxCBAKtpvQiS5mjD5JDB9gwNLJRszSQjZ1jlRSQ2/KUHYJ/T2obgSFUgSsI0hJG2NZWxGIJBJRfXG7AHYR4W4CfkEkNsWMmEXE4FAP7jg/2hK1EM1OE3jknTzY6CgsGAYHzuMcnyGiDwFYWYkoOAdipoa+lI6e1ClpIiKJ4CDJQwjAsl2M5xyMZmwUVN4NVZM4JHHsIKJfMmI/Fba2VY/ZLtPjuOXc3raPf6YgIowG8MiOLLjtYtR0eCpLq8DokwB+C8BZfobnBCQZaBpDMqahP20gndKVhyOSJsLFhNThEjI5GyMZB9mCo/5dZbE7ALaA8EMi9suhkeHd8+bMI8OI4frVkX1jMiLCqBNPbilini2wV+TmgdNHAfwugIsmu0ZLRJGKaxjoMZBK6jA0T+iIeKK9YL6tI5t3MJKxleRRgzgKAF4Ese+Qyx/gsfyQafbjhlXJdg+7qxERRi3QX+DxLV/2KkflKeXq7o0M9EUAN/rp4qf+1CeKdEKfIApdqh2dG30EH566QsotOzxmTUcco0TsEcbwj8TwvK7reUPTcf3qVLuH3ZWICKMGntmcw2ExwvqFeY4g9gUw+gSAReV/o4iCA8mEjsEeQ3k8dC0iim6EJI6SxDE85kkcrlvVxrEHYD9yGL5jFrHb6EnSDWcn2j7mbkNEGBWwfnsWju2gAGvQcNlHGMMfEOHCcjsF+QswGdMw2Gsqr0dEFDMDijiUjcPByTFLeVYEVdwMtlJTQP+DhPaAHuNjOo/hvUwcEWFMwtPb8jhycjtPJRZeqHH+hwA+4letOg2mwRVR9KcN9d8RUcw8yMVvuwJjGRtDYzYKRbe8znE5jgP4KZH4h0R2zhZ7MEe3rHlvqigRYfh4ansejmPBtZx+wfFxEP2hKlZTNkdyMemcoS9tYFafqRLAWGTMnPGQz7BoCyVtjIxbsJyK9g1BDK9AiP/quuy+WMIcJ8Zx65qeTgy5Y4gIA8AT2zLoORbDyf7Rc4jwr3xX6YRUUTp1UnENs/pjKjpTiwya7yr4NZSVfWNotKjsG5XVFDpGjP0AwLdu75+1+6mxPK5f+97xpLynCWPDdgsZkYddKCY457cB+AqAdeXBV0RQ4VmDPQYG+0wVqRkRxbsXjEElt0lJY2jMUpmyFWBL7dUV9Demw59gSd2Sf3fnRVM013cd3rOEcf9OQj5zBGnNmAPBvshAXwKwuPR+SapIJ3TMGYipn+/d2XpvIl9wcWKkqELO3cpG0V1E+G+c0fc1XR9maQM3LXt356W8J0swP7k1i/s0oBfG+RD4zwz0tclkYWgMcwdjWDIvoVSQiCzee0gmNCyam8D82XFVl6SCZHkGY/iPBPZXdtE96++W3oXHt+c7MdS24T23DZ7cnsdQLq8nubgJwNcZcMXksO5kXMNcKVUkDVXJKmwVRHUM4gx+SyK4ROpEi9A9yOUdHBspqszYCpAqynqN2DfGdPZsWmPitjXvTvXkPUMYv9i4FX2xhXBdN80gPkOeveKM0vvkb9r+Hh1z+mOIGVpbbBUGZ0jpDDGNqS5gEg4R8i4h51eZaiem5rlMdTS+F3sLMVXnhDA0UlS2jSqRolsE6BuWW7wrFU/nIdK4ZW23t4hpDO+JR//jLW9gCT8PY7mTc7km/iXA/gDA7NL7ckuYOlNEMdBrqkzSdkCSRb/J1c9KkIQxZgdDGl6LgFK7gFL5f1Jp4Or3pWK901XsUXV9/ALD8KqO89JPvwp56ffvxsUl52gsY+HocFHVHq3Qr/oQIP6rzdg/9SXNkevO7OvQSMPBu/GZnoaHdo1jtZXGlvzRlZqmf40Bn/T7e0xAqiDzBj0VpF2Qm6vf1BDXqj8CuW/HLYGMU9FSXxXC7xvi/SSl4oiJl0cQCDh+pPQtSsThtTJg0Bib+O/S798NyBddHDtZwFhlFWUMDN9hTPtbztiBmBHDtavfHdGh746nVwWP7y7ixsdM/PryoQsY2P8L0J3yYJ/4Awb0pQxFFnGzPSpICTHOMBDTMJ0wU3QJw5ZbVcooSQ6SFBzVD0Qo+4dQ0gR1hQuY+VKJRyBS9eMqAE6SyUyVROR3smyB48NFlZci53/S9yiA6BfE6D/kkNuZzC3BHVdonRpuYJiJz6ouPLZtDBaBk128QiP2DQDXln9fqXbM6jOVGqLr7S9mk9I5+szpnVRyIZ4sCthljCHKCMIRXpEY0SXkUC9KjZcUcZQRyEySQJj/LIZGLUUczlRLtQvCr4m0P7/9wnWvPrzjddw+wyWNmfN0GsCj28cwUjjJepC+GcBfAqrloPquKhBLZ8oLMthnqgXaiY3WCGEMFV0labg+QdjilIrxbkFJbTG4JBGPQGYKeXh2DRtHTxZQsKfYNaQ++bQQ2p/tjw2/uNSZTXecP3Mres2MJ9IAntyWw2hhVDdIu4Nz/k0Aa8vfjxkc82fF0ZvubFesmMYwYE6vkuRdgcNZGwXXPdVe8F2OkpvZ4Fy9tBlCHtm8gyNDBVV3o4Ix9GUC/mxkvLh+4ax+cf0MTV7r/qfQAJ7cmkMxm9dIFx8Gk5IFW1N6T260ZExTZJFOdt7VJYlCEkZsGqPn0ZyN43mrrWPrJqg2DJI4NA7TJ49uBfONoYeHCip1vgJeg8CfuIX842Zvn5iJtUO7d/YbxFPbcsjncgZxfIQxSMnizNJ7pEK8NSyYlVAekW45pSVZSLVEr3J6jsrFlyueZr94L0NKGaZPHgZnE42kuwle5quLI0NFVYi4At4gwp8ULfuRVH9a3LJqZmW7dt+MN4GHNmdg5jLcNrTfAGP/yS/KOwEpUSycHW+bJ6QkUnM/A9KpYWvQGZDQGRI6h+Y/DkkQY7aDE3kHtmjMpfpeQEnqiGkeeXSjumI7QqknI+MVSWMTCXxlXIw+tii5lK5aM3OaRnffTDeIJ3YUMDw6qqdM/f0A/TWAVeXv96Z0LFC5AO2O3OQTVvS8S8jY4rT7u0SwXIGi6yoRSP697ovbRVeo92r01ogwQcwecZhdRhxecR7C0aEChsetSl64112Irww4vY8X0kQ3zhDvSffMcBN4/u1R7M/FWS/GbmVgfzPZwNmb1pUaUiVxKFDIvZ7UOZI6m6JilAdgiTKicMpUjfLxzeiH0iHoXUocjksqwOvkqDVlDRLwEhG+nEmmNgwIC7ec3f1Rod0zsw3ivjfzGGAnWEYkrgaxvwPo4vL3lWQxJ4FYyPUrmG+LSOm8pgHTEqS8HTnHOY0oIgQLSRxxnzi6wcbBfNKQksbJsamkAeAZIvZvDE3bWDQ03Hl2d9s0Zmx6+4p5Qxh3kxeB8JcAXVT6vXwgvUmphoRPFpIfegyuQrxrkUUJUqqIDJjhwhECWdtBxnaUJNfp2VZJjRrD3Flx9PdWbIx0FWP0F7ZwzlrT/uE1jM5TcIO4fwfBdEZRKNpnmlxKFqrpsReUBaAnoWPRnLhqTRjmYpEEIcnCrNPNl7UF9o0XahpAIwQLKWDENE299A67Y0s2jcMn8pUMoS4BPyMSfxoz4vs2bn8e/89Hb+/MQKfBjJMw4sUhFB1nvs7xNQC3lpNFKq55Bs4QyUKuu7QvVdRLFlKoGLWciCzaDDndBcdFxrLVT+rg/KsC0hrzggZTU7wiUj79DQ3831lFZ+Cy867szCDrwIwijPXbx2A51KMR/i0H+2R5IlnC5IosErHwyMLgDH2mpiSLOjQQhaJLOJKzMFys6F6L0Aa4RJ6aIkm7w25qU+dYMCum4oImrdM4Mfwe4+L/zhdyyce2jXVqiDUxYwjjV5sc2IWsyTn9Dge+ICcY/ikiH4Jk7mRcD40s4ppXuyKh1ZddqZLGCg72ZQoYKthtL4QTYSosITBuOcg7TsekDXlXKQHPnx1HMsYnu1t7wPBH3NV/czw7zp/a3X3l/mYEYTz9dg5HR10moL8f4F8BMFh6T9cZ5s2KoWeqmBcIVCFgXwWpVuhmMrKOwIGMhUO5IvIN1rKIEC4EEXK2q4yinZI2vDQF3+U/NQFxPoCvxrl5neMW2XO7u0vSmBGEcfL4OFb2jl0AsD8DsKz0e8a8Kll96XDa8ku1o9fkSgWphyscQTiet3FgvKhsFlS50nSELoDlCqWiFN3OkUYqqataLNrkFpsMqxljXyvm7NUjue6KAu16wli/PYdESltCjH3NT1OfwGCv14EsDHe77tsrUjqva9PnHIGDWQtHcxaKYmrptpkJVvZ690HZNiwbOdvpWKkAedjJQ2/SgST13usZ8BVOuVlP7Mh2ZGyV0NWE8cTWHEat8QQBvw/gzvKV25P0+oWEkb1o+rU2a5XPK0EVUCk42J/xpYqZsr0ky3IO4pp6Qb04qMS+RGDkggnHe5HwzkVV+YZ7f6/ppz7L+IysDiyfV95xlVHU7YChSS5feegN9FTynLCPw6XPZfPZ2DO7c20fWyV07RN+9BULNh/XOKdPgOHvAMyF/4ATpobFcxOqb0TQB0NMY+g1qhfmLYflqyAjRadSibbugqqTJ0VfpjY/s4vghSx4bhxabhQ8NwYtPw5eyIAV8kCxAOY4YK6jVjVxHWSYICMGiifhJnogUr3eK9kLN9kDMpMg3fDvQX4J8plj7ZVSZVLXVUJbOyHXjWULHDiWVy0aJ/HuXgH8YSqtP0DjBl1/YWfraHS+MEQVaEszEAfpAmL4tyWygO/LnjsY89LUA16LUqLorZFuXo6sI3AsZyFju+rf3UcWzDu+5E/hKnLQxk7AGDoI4/h+GEOHoY0PgWdHwYs5RSBMJcIJ+BWEQVK/91V8mnxdKY1IcjDjoEQabk8/nIG5cGYvhj13CZxZC+Gm+xXBqM8oAuluA7AjSBlDk6Qhprev/qaqWm9wZc+wHKEaQ5etp2Uc+OPMeHE7UrG32zaoKui+dQ5g/bY88vn8bM7dvwPYp0vjlPt47kAMcwbigUu/CUUW2rTxFaTqVDg4mreVwazrJlBJEhxwbejjJ2Ec24fYwR0wDu/ySCI/Dji22rxe53lWmt2pKoXa45I4PAI5/T0q+0meRCElGE1TJOL2DcKZtxTFxWfBXngm7DmLIeJpb2ySOLo4iE3OQkLXEde1tmpZ8lYnxywcPlGYrB5JXfcfXcG/lk6lR69bHY6Rv94xdhWefyGH8WTRcMn9EvfqcapsHDl9/WkDC+cklJQRJBK6VEOmJ4uSvUKqIU5XqSDeiS83olQtzMO7EH/nDcQObId+8ognQRB59gnWhBGTCMIh1N2OzVdHpAJEmg7R0wd7/jIUl5+D4srzYc9aBDITXS11yBmShJFQpNG+Jy2El6h2YnRKlbUhAP8uyXq+f+35sY5NWveseR8/y55A7w52LTj9r/LaFjGTY+m8JBIBqyL1ShZSXD2Wt3Gy6AVhdcfE+UTh2jCGDiG++3Ukdm6EeXQfWCHnbdgAjZHk+GpKo/OvvEakpA/RO4DisjUonH0ZikvXwO0Z9HsldCdxxDWOhKG3LWVe2TMcgf1Hc8jkJ9cGZa8R4fPxROr1G1bH2zKeSuPrGjy2Iw9nPDuHdPwPBvxmaXycM1Uxa6Bytl/TiGue63Q6srBcUu7Skhek8/CIgjk2jON7kNyyAfGdr8EYPgK4TqgeC6mekNMEaUxcQChpRySSsBedgfy565A/6xK4fXO897uwwlhM40i2mTTGczb2H82rhLWyu7pE+A4Y+xPu9A3fdkn7TZBdQxgP7RiFm3cNjdw/YEz1EZkwBw/2mipPJMgWhjEV6j09WRRdgUPZU8bNjkMShevCOLoHqc1PI7H9ZWhjJ70TmvP2PFIhVZRTBtGmoNy2Qnle7IXLkDv/WuTPfh/c3tkTKk03od2kIXFsuICjJ4uTyXmYiP3b/HD8n5ckkuKyde3dwl3jJel3NIwy6yKA/YsSWSgXaoxjdr8XbxHUEjK55zqdjiwKqsR/l5AF81x9+vARpN58CsnNz0EfPubHRkiJoo1dtTgD17ln12g2doExkByz68DYuxN9h/ciseVFZC+5GfmzLgbF07600R3E4UWEOm0jDXmHwR4TubyrXK1lGGCMvpTsL7ywb3B4W+gDqTCujmP9tjwK1ngfU5Wz2O+WxiVJYuGcOPp7glNFvIzT6etY5B2fLJypPSbaDs7BCzkktr+I9MZHYB7d420m3uG4Oylp2AFJAyWJI55E/uyLkHnf+2EtPMsLCusi+0Y7JQ15i0zOUfYMyzlNNZGz/vfksD/n6b7s7avbd+53hYRRyKlONXeAsQ+U17foTeuVagc0jVKFrGnJQkoWOQvZTpOF79Ewj7yD9MsPIrnjZWXMLEVldhzysRnwSaPFa5UkjmIByU3PwzywC9nLbkH2ghtVcFi32DaUK525SLbBeyJ5OJXQlUquVJNTkJviE0Lnj99h/4cHQx3EJHT88Hx6SxY5O7cSxL4Nhuvhk0Xc4Fg6PxlYfQv5RXtVbkjtr1wos1l0liw4mJ1HcusL6HnxfhgnDlSOlegCtGwIrQThAoaJwqoLkbn6Iygu8h1mXWLbSCiXq96Wx2FX9ZrgPpf4F1OJ2NHrV7cnArTjx5Rj2TojfAIMV5R+JwWAwT4T8QCL4aT8it61YLmEI1kL2U6TBecqCrPvqZ+i/7F/hnH8QFfnajCNgekBLyUpRTkOEltexsA9/xPJN59SXqGSLafTKDiual/ZDpg6x6y+WKUyg9drTHyk4Ii2TUpHV+AT28Zh5YsXg+EHYFA1UEtFfJfMS6q03yCQ8N2ntTQRW1X19lynHQXnMA+9jb5nfo747jc9/b1LNsl0IFt4UaEBgwkXIplG9n23YHzdByFSfV2hokj+Thm6qhkaNogIh44XMDRmTd60LzKw3zNjia03nJ0MfRwdW4m/fJtg5wopMPZZsFMBWpJFZ/WZgUVzGpypAji1yMIlLyiro2ThSw/xna9i8KH/jfjOTac8IDMESsoIIXuYuAaWzyL93P3of+R7KnpVSSAdhtSO8rYLuw01NThjSuo2p/bYuUiAPmHbblsKZ3RsNV6YewmuhnVg9FEAE0+/L22o1oZBnFMlI2etzFP50E8UnM7W3GRegljqracx+PC3YRx5p30xFUGCyX3Mwhm2JE7HRfL1Z9D/4P+CcWR3V5CpPGxyjpetHCa8EANNuVonTa/JgE8JUTz/8e2FUMeAThHGl39F2MrOTDOw3wawBKWMPZ1joNcILEArqU9f02LEcjCU72DNTcZUCnl60xPoW/8jaCMnuuL0bBoaAwurpL+fnh/fsQkDD34b5sEdXTFXjiBVhCdse6yc1f4ew3MEnH6vM0D4tGNZoeskHSGM//IbDIz0axlwB07lSqKvx1C1DoOY+LjfjawWMrbAsVwHE8lKZPHqI+h76ifQMqPd4S5tEUo1CXFCiXGY72xF/0PfQWz/tq6QNCxXqOLCYUIVEDa4crNOWiY6GH5DuNYlT20Lt3Bw22f6nh153P/a2ACH86mJojjkTcRAjxGII0Bn09stSvkhHSunJ8lCqiGbnkDvc78Cz2ffFWShILWSkIvQENdg7t+Jvoe/q4zE3SBpFF1XEUeYUE6BlFGpQv4yBvoE2SLUrLS2r9DdAtA0+yoGuqW8zkV/rxlIAyJ5wZTBagZneUbOTgZmMfXkk289g75nfgGezzR+SpbnW5RqYKB74hSYFn7MiJI09u1QhlDj2J6OSxpSrc23wZ5h6EzVs51UnpKD4YMFN3/pE9vDK+fX9hk+3y2mOfAJAPPgr++4qaEvHUwQTExjSExzug0XHWW76JhJkTHEd21E77O/AM+ONbXQmWmCz5oFfelS6CvPgL5yJfSly8BnzwYMo/PEwXzSCBnENJh7tqPviR9BHz3WcUnDEYR8yPYM8mvapqaUemCLieFjjm2HZstoa2j4kzuyyOez6xj4zaXfKemix0DMaL3Ohea3MaylimRtFyfyHaxpIUXpg9vR/+RPoI8cb3yBMwbePwBt/nzwZFKKa6e9rQkBkc3CPXoUYni4o3kYkjDIZaGTlzKEbnsVvck+jNzyWa+yVwcJU6olOndVAZ6woGtc7Zts3ik32MstcKdw7R8/sbXw4o1rgtdO2iphOHYuxcA+Wi5dxEyO3lQw0kVSr50nYvtFey3RKSMnhzZ6TAVlGcf2NUUW2ty50JcvB+/p8ats0ekvSSg9PTCWLYc2b15no0NZ+5JoJS8m3ngW6Y0Pe4WLO/i9yY8EDbNRkrxHOmkgMdWWsQKED7mOFUpcRtsI48mtGbgOPw9gt5buK59pX8rwbBctHgimxhRhVIO8/MmCg/FOhX3LjWzl0fvCPYi/82bjBk4i8P5+aAsXgU2ncsj3DB3aggXgAwOdTRHnIcVlTIZcTJaF9IaHkNjxUsfD6F0irwF0iPfQNaYcBZMyZzUwfMh17TMf2xG8x6RthGHbri6I7gSwHGWVkvvSrXtGVIiuzmrWt8jaQpXX69zWYUhueQ6pN5/192+DX9owlMQwLVmUQKT+Vp83H8yIdUxEZ6yNCXOMg4+PoufZu2Ec29txr5NUTSwnvHwTpqQMXdWMmfR4zwTo9iEKXr5ry4w+9vY4HNdezqAIY+JL9CR1xKYGoTSMOGeI11gcjiCcyFtKJemM3YLDPPy2yjplxVzjG0hKFz094KkGdXNJGqkUWE9Pw0MODMqB075ZJ8ZhHNyDnufvbc77FORYVPazG1qDJFIek4qHbhwMH+wtZhY+viVYj0lbZtN2MgycbgJjq0u/U60I00bLA5BrMWnwmntwpOh0Ll1dnnq5cfS89AD0k4ebs+IzBpZMNXdicg6eSnVURGdtjnKXnJrY8hKSW5/veHS9PKyKIWa1ysfak9K9HJPTeekiDXTFz3YEK2SEThgPbBaArc9mjEnpQrl7lMEmoQdS6yKh1TZ0FlypijgtlZ9sFXLhJnZsbH7XMAYuVZFmN73ZwmeDAG9zHQ9JsIUc0i895KkmHY7PKLoiNAOoF/SoVXIc9AvBPvhbZ+YDLZQR+kwuOsoBMi8EnWqkzBlT1bRa7YuqSelCZ1W3oZQETxaczjUcYhz60EGkX39cdRbr2Kbtgliudu9Zqb5rRw4i/epjYE4H597vZ1NwRGiPQX613pQxNcOb4eqi45zzzNbxwO4V+mPcO3DcAJzbTw/U4qr0WKu7WEoXtTJRc46rupR1BCpPxFZJZU25UMtBBGHbzRsuLavzgVzt8paUQxASbz2P2N7NHZcyLBFeGrxSwWJapfahiwDc4tp2YHpJqLP4g82vI8axhIGuLw8D70nqyljTyhqWZJqoUUGrJF3YndoojKsOZMmtG1rfrESgbAZoRhd2XRXI1WnCaGf3sLKbgo+NIv3a46rJdEdjM8iLzQjrMWgaU1LGpPPTAMNNlmDzz3j404HcJ1TCOG7tBQO/sryDmfxiPQEEasWnkS7GbbdzMRdgSgVJvfEktNETrZ9ujEGMj0NkGlz08nOZDEQmOJG0abDOkAaBIbbrTcR3vd7x2AxHCFgh2jKk1G6apxfYYcD5DtkX7bz8h4HcJ1TCWK1fkRYkbpzoM0JAMqap3JEwpQuXCMMFO/QkoKpQbtSdwS5Sx4F79AhIqhf1XJMxkGWrEHHU+5mw0YkhSNUwl0PyzadV39lOu1mLUuILaV2aBlfOhEmYxRi/4b59I4FEfoY2e49uHQe5fCUYu3yi5aGvjrRq7IxNJ11YAlmng2nrdhHJzc9CywwHt0CltDA6Cmf/flBxGiNeiSwOHoAYHekOsgBCKd9XD5SUsWerb8vosJThChUPFAbkV0sn9cnFghlj7FounIXffeNoy/cIjTC0jJAXv4wBKzARrdy6sVNOSlyr7hmRUsWIFX6KcVUwDuPEftU9PQyIoRNw9rwDMTICKCOa77IsvYRQJOG8sxvuieMdt12chk7t1ZKUsfUFMKvQ8TwTy3VVUd8wLi4l+Jg52T5IZ3JiF//ueXNbvkVo2arFJJKw6TqAJUq/S8YrfZnGYPLatS6ytuhsmwASSLz9CvSRAGwXVSDJgjIZsFQaLJ1Wqe4KtgWRyUJkM4Btd/w0nQxJ88Q64+ZV1ar2bIF5ZBeKS88FqHPtL20pZWik8p+ChPyOujyU4zqy+dO+Xy9n7Lr73hp+UG7NVu4RGmFoYEsEY5eW/q3yPRK6qtfZLGEwv/ReNb6Qkt6oL110LBt1/IRnu1DtAUJK1ZQqhzylpLoxNuoTg999zM9Y7TayUGA+aXSCMRgHGxtB/O1XvaZI6tl0RvoqSRmGxgNfp15+iYahMQZxSvXhYHRZgusLAOxp5fqhHIFP73bhOsVLSwV+4VcJSia0lp6RxpkqkFMNeUd0tnEyY6rGpHHiYHuMayVSoLJWhd1IFOXoZLa9KxDf/Qb0saGO2VNKsAXBDcFjoroGmpoqeTlpq61ybPuc9Ttaq44fyqq28rkYgzJ2eqHgfmCJqU/5Eg0hxr16ndUwZjmdSzDzjZ3xPW+CWZ2NLOxasM4SBjEO/fgh5cHqdJKJIAqt/qdSSxJTpNtBxvA+x7FaEntDIIx/A5ec+QAuLq97IfWqVrwjnHmxF9VguaTiLjoGvzhO7MCOzo0hQm2oHJOC11HO7WAfGh+WEKG4WOV+S8r9dvqhxRlhHSPqa+XagRPGq4f/QurXZ6heCSVDjMaQiGstkbrOWU1XqlRFrE7ljPgwj7wDbfR4x8XdrkaHp0Z56w7shD5+suPh4kJQaC7WhMlhGKfbC4nhbMspLH9px2tNXzfwGRs6Ns4IJKWL2eoXfguBVr0jMV7d2OkSMGa7Hc1Iheso+wWzrc7viq5Gh+eGMegjx2AcfafjaiP5HpOgKUORosGVLWMS5migC9YfvrDpawdOGLrWm2BgF5V7YOIxTRUtbRaSKGq5UouqiUwHXamMQcuPw5SLMEJ3Q6kleZiHdqv2lJ0mMEeIUArscMZUGMMkTkwQ2MXr+kdjTV83iMGVw4E7D4S1EzdgXjBJK2Sus9rqyLjlqkIlnYIypp085FUBj4yd3Q9XqP61vJjt+PNyicKplcE8R8Mku6H8x1qL89nNXjZQwrh/I8FxrDPBsLD0O01jSsJoBWaN2As54dmQi61OBzk04/h+8EKu4ydW16MLpoekWjJ0GFqmO8LmbSECD8iV1zMNPiUrnIDltmstfviVI01dt6HArce2ZCpHyHmNvDB0jFjfADsXQC/KWiC2ksrOfPtFNRQcrzhJJ9URuLYiDGV574KWfRGmA4M2PgJ9+AjsOUs7PRglHcuDr1bIQDPQ1WHNkS+e2rNM1aWh1UgmXnxs8yjK3xCq+76Om1dVL9JVN2E8usPGtrffxhmL585nhAu9gjjEAeaC0WGH2Ka+3uFxBpyr8vD9QUjpQmshBFbnbHIyzWmQ0kXH8kYUGHgxB334KDoU9Tyj0BVzxJiKlVE1VrsAwldL9IAPG8YYEqaGEXaaCznOGHsf2fYeF1hCRKafnzdsc/5W3iq+88z2vLhmdaLiNesijB/nx5DfUeRnLpp7E4A/BlPl9uKn5EuW0xmeB6cfM2A1lWWnxk3e0iIxeHUvpSSKnK+OdE7CgKpOrY0NoUMhYxGagSuUWgLhdIWeJAmDVEuR4CAFlpjJlR2jzLAqb/FJAn5TaQKnipTYOtGOHvC/LxbyP39wt5V//0pzyjXrIoxF6wWyi+zzwfCfAFxS4U8kHX0QDBcSoGrak6pbwWAaWtPHirIN8OqZqZZLSiXpLBh4bhQ8P9YV+nCEekHQR08oNzgZ8Y7LPVItkZKGFvAaMg2uVBPHpfLlOavCXRKMcCmAv3Qhxk4Qv7vS9eoyep48gwxAfAzAdA7cJQD61X+Rlz8iB9wspGRRyzuSd7xqzJ3cpiowLTMCrtKmOziQCA2BiIGPjyh1shuIXpJF0O5VqanrmmdDbABLBPDZ+Xa2t9KbdV0pZfMUA84pb0JUD+RAJbs1a2KQbFvLEJR3RWeDtXzw7AiY23mf/oxAt0yRVCULWd+12unBeAdPGO5VTWOqbF8jYMAqLjCr0nt1XcmFK/WKxkp8MU8c4i2ESes1ojsdv3R7p8GkGJkdVYVrIswsMKuo7E/dYnuSazpoxUjZMaZp9DUZUjlwBatorqiLMKiJCgbKHdrgQCejljZjuaSSdzr+qEmoRcc6b/uP0BAY4NhKyugWCEHlNSwCQzP7UK+iFoSWfcN9CaNZMNROZS+6osPuVB8kwKzgu2RHCB9SjZTPruOHjg9lxwg8gsszDbRaR7eE8AiDM8/Y0rT9AjW7sRfc4KPjmgETAlwlnDUAKnuJslf579/N6JodKsm+wWcXIsgPFQj6mprGlfEziEuHUqLPS2nnyuDS7Bg1Zb+ovLLkpBb9LL/Orj3mSRiuVf17kletTxKC/KkCZQU7VQR2UhMJlJr+cPKyrzUvC7vdDY27AqUpIubPI5vy3gQm5o7UMegtHao9Z0RgTufrYpTDDSEeQ+OexzLfUjVPD+HU9CQvLLVZg2cphqOaRuIKz4bRNZhM3eQRAzn+T9cnDSr7g1qXK3+/VJ5T88pQcsMvR/luJQ/yCUJ4BDFBEnU+bgI7nTw076dHuJOfE4GJDrXSrAJ5GMrDJMimT560H8z1QisCLAfYrNrk2S+q7wmbSFmUu2fPeCNREoQkCauMJFoF+Xwkr20DoggwHeC657cKq85w2yHJwWXeHIoWn2y5ZCJ8ElFSGoFp5BHJBA91zyqCX8iaQhCdDb01B0QJoRCGHJiuBtjcCJkvYVSD7YpQrMkNQ6kOmlqYciMLX6II1QZBHnG4NsAsjzS4OZOI4/TnSiWicFm48yZO3csjDqFOJdLMrgjcKkFKFyriM2DGkBK//Jqt2jFCkzCmtJ5vAGof1vi4JTpSqP50cA5WyCG+5RWwvYfg5tvfnXxC3bF90jA7XnWufghAuDx8opgMpS5K4uBgLgMfHlLNjcgwuyKWpmT4DKSvoQ91gGveAd5qA6XQJAytBUZjqK3O2D5hdKo6uIR+eC9SzzyI+JsvqQpOje9UqrxRmjjtJGm4cggOoMUaDrFrK1RHBIdD2F6Ryc6BgRxC4vknwLJ5ZK+6De7cRf4AO3schVEYWPO7Bba6b0IiDNZySb5qHhLhE0ZHwJiyqsfeeAHpJ++Ffnh//U2DSgux1GhI08B0Tf30WhySKhlHjuOddOUNieokESlpyI/zGKDFu88wSg7gFuQ4GxzYhEdp8maetPwn5gv1fXnGwLNjSD7/CIy9O5C94UMonnsZSNM7ShphxBcpr6PcWO40nqNpEDhhkL/hW+kCx2tI9yKskmbTgWtg2TGknnsIyeceUQtt2mI5pY2v66qtoTZ7LrR5C6HNmQ/ePwieSoOZMU86IaGaLIvsOMTwENzjR+AeOQT35HFQLgu4rq+rTUPEkncKHrPyRPeoKMLyxlV3h8Iy0mSmCZZMg/f0gvX2gyVT4GbcWyhCQBQLoMw4xPioelE+57WKlJ/nfJrG1d4EGQd2o/eu/4Pc8UPIXXkbRLLXr/nZfpSfK0GBK8Jo/TrBSxjkSRit5JDwGi5VdRC3RpJNDIhDGz2B9CM/Q2Ljs/4xXoMsfEKTpKAvPxPG6rUwVq6GNneBWuxM12ufgEQgxwblMnCPHoK9cxvsHW/B3rsLNDY6MaZakBuUBKAlPK9Kx0CeZ0dKFnXZKuTcyQOjtw/6omXQV5wJfclK6PMXgfcNgMUTgKZPGNSp9BnHARVycCXZHjkAe89OOHt2wj18wCNcTDNnXAPPjCH92F3QTh5H5taPw+2f3RG7hvCTMaoXdmgctaT2RhDKUuJ1HIQ1P19jO7kihPDZmoPRoJ08ip4Hf4T4Gy+eOrUqwV/s2tz5MC98H8yL1sFYvFydjg1BEqZhgvUNgvcNwli1FnTtrbD37ULx1RdgbXoZ4uQJf3zVJ1qpADlAS3aINMgjCkkY05KFlKB0HdrCpTDPvwTm2osVYfDe/pofU+tEcrecr0QSfGC2Iuf4uhsgRk8qkrXeeAXW5tcgho7Xfn7y966LxCtPK7vU+J2fgTtrftslDQrYtVqSVlo5xEsIzejZSuBJre/lBbY0fenGwDi0kRPoeeAHHlmgij3Bf8J8YBZil12F+JU3qcUeiAxYGkq6F+Y5F8FcfT7sq25C4bnHYW3cADE2XFPaUQbRTpBGiSwK0/2d8OZ56QrEL78WsUuuhDZ7futzx7kij5h8nXcJnIN7UXz5GRRefhbi+LHqtiHfUh9/80WVazL24d+BOzivrZIGTQTvBSdhKKm/W+MwJJO1Iv3U+qhSSZq/dP2QCy47hvSjP0f8zZerk4VcSJoOY835SN72YZirzlMnZWjQNBgrVkFfvBzW2ouRf+Qe2G9vmdh4lVDyoijSaFO8hopLmS4UWbhgPX2Ir7se8Wtvhb5wSTiWWk2HvvQMNWfmhZcjv/4hWK++ACoWKhOT/5xjWzeix4xh/IOfhds70D7SULEYwV5yRkgYTVcKn0bCCN1xrxorW0g+/QASG5+pboESAizdg8QN71cvqWO3C1JliV14OYylK5F79B4UnnnMM/ZVOZmleiJ80ggv5dDDtDYLf2HoZ6xG8v0fR2ztxYDeBl8w12CcsQb6ouUorjoXuUfuhnv4YJU58yWNTRsgUmmM3/EpkBlvm/ckhMoY3hJukTPCkTDk4Fr4vtPkC7XlmcXeeAHJDY+pFogVT27XBZ8zD6kPfRKxy68Da8eCrwA+OAepj/w2+Jz5yD/wc4iRk1VVFBX7UAzX5arC16cjC84Ru/gKJD/0SegL21/mn8UTSqLR5i9G9u4fwN6xxX9j0qT46kni5afgzFmA3Lrb/L8JdwFShfSkltGimaCEcM6aFpms1hcLPQRDnkIHdyO9/h7w3HhlshAC2sLF6PnM7yN+1c0dI4sSWCyO5A13Iv1bXwCfPbem6KxUhZASNKVWpOIsqt3edzEnbrgd6c/8fkfI4hQYjFXnoud3/xVil1zhr9cKi0tKm8UCUk/eD3P35kDtUrUQAl8E4qYN5du3ar+oKWGEye6q72YOyWcehH7kQOWT2nWhzVuA9Ce+APP8y8IbS6NgDLHLrkb6Y59Txteqln1qMB6iAVDRU30qv+mTxfV3IPWhT4P39AU/gCagzVuI1G99XhlbFSod7ZxDGz6O1FP3gY8Ptym4Jfh1Xo0TG0FI37w1D3JtwggX8bdeQvytV6raLPjAIFK/+Tswz7805JE0AcYRu+waJD/yabBUT9WjXpKFCLhujEqIq3pNzwYUv/ompcKpsXURtMG5SH/897xnWk0XYBzmzs1IvPKUz7bhRgKFoXZ3r0oyE8E9F2rixcfBivmphEGkRP/ErR9B7KJ1nRplXYivux6JG+8ENKPqylOBXUGVgiCfLKqpIoJgnncJknd+ovGYlDaBz5qrbEH6spWVVTo/LUAShn5kf9tUk25DSN+647mkTSH25osw9+2svBiIVIxF4rrbur5/KtMNJG7+oAqAqmpQEHUGVNUBYXsSRuU3XWiLliH14U9DG2i6aXhboC9ZgeSHP6UidCuSBtegHzuIxManPWP4DEOrmaoIizCoSiJm10IFaA0hsWmDvxAmSRdCQFu0FIlbPgwWT3ZqlA2Bp3uRvO0jyntSzQiqNnqrtgzyCgZVfOBSKosnkLz1g9CXndHijdqD2NpLEb/65uoSBEGprPqRfTNOylCPqEWtpCu/cS2yCUVzZIC5YxP0Q/umGrTkojdjylinL14e6G1HRkawb98+7NmzB8ePH4fjBHtqGSvPRvyam71AskqnC7VuyxCO3560EohUiHzs0qtbu0kFjI6Oqrl7J+i50zQlRRpnrK4iZXgG0PhbL/mG5S5LCa6Fri0C3EJs1XQfDTIhx7sgA8tnEdvyKphdnKpuCAH9jFWIXXplILdzXRevvvYaHnzwQbyycSOOHj2qftff349zzzkHt912G6675lqkewLQ9TlH/PLrYb36Ipw9b1cM8yzVHW0qArSWdKEMxLPU5gtKKpPz9PqmTXjggQfw8iuv4OixY3AdR83dOWvW4NZbbsH111+Pnp7WjKp8cI6K03D27/GiQSfbs4SL2LbXkb/0eriz5rXB1986vPil1scZCmEIOpVt18wQqUbmTUDtFU6BcWXEMva9XcEz4kkX8StuBO9tPYrz2LFj+Id//Ed857vfxf79+yHc0/WBJ9avxw9++CN88AN34it//MdYu3Zty/fUZs9DbN21cPa/U1HKKBUrboYwSjVMq8G84DIVWRkEhoaG1Nx9+zvfwb79+xVRlEPN3Y9+hDvvuEPN3QUXXNDS/WLnX4bCS8/AfmOjV7OkHGrNHFA1NFRy2oyAH27eYopKeDaMkEpiNVBPpk4QzD3boY2PTlVHhIC2ZDnMc6brQT09pNj89X//7/HNv/orJUpzzqEbxukvXcfo2Ci+/8Mf4kv/8l/i1Vdfbfm+ErHzLoM2f2FVW4ba9E0wu5JOKl1SqnG9fYhfdrXK42gVkiy+/ud/jr/85jexZ+9er0BThbkbGxvDD3/8YzV3r7zySkv3ZOleb/ymWeFNpqTR2M63VPe0MBBk1XAEKGGEQhiixeSZWp/lQQTET4ApF6r5zraqVnHz/Es9q3kLKBaL+G9///f43ve/D9u2oU0+scpvKXVkTcOzzz6rNsmhQ4daureENmc+zHMvqvp+1Y0/DapKF0LAOGtNIIZOx3Hw37/1LXznO9+BZVl1zd2GDRvwta9/XRFzKzDXXOAlxFUkWgZj305oYydDCeQKwzISRO5caBJGK3UJa31SYwEOmjPlHdGPHaygpwrw3j6Yq9e2LNK8/PLL+O4//7MiC16nZV3TdSVm/+SnPwW1+qQ1DcbZ54OlUpXVElV5trFLTjRlqnQx01RSGUukmh+zj5dfeQXf/d73YNU5d/JklnO3/qmn1Ny1AnlQyHmrciNooyf9mIzgt3eQV/QqQFIgtUJDocZWm8rW+mitBkcNQ4q2xw+qSkuVArW0+YtVQZdWIMXAu+65BwcOHKh5Ok4dGkOxUMBdd9+tjHutwli6EtqceZVFCWri9KnWd4XIK/oTgO1CShf33nuvslk0One2ZeFXd92Fw4cPNz8Arql8E5ZMTf2yfo6Jfmhv4EZPFoZKIhBIa47ACYOVJIwWDsVaH5WEUatnSUOQpHD8sGr7PzVTEdCXrlDxDK1geHhYSRjNxPpyTcP27duxa/fulsagrtU3AG3R8uriW4P9VE7v5Fb+BkFb4NUtbRWjo6N46eWXlXG40Q0k52737t3Ytm1bS2PQFy6FNji78vNzXXXgKO9awBs8aJnFFcHU2AjNhuG2JGFQVdKQUikPopWFH+qrDx2rnKilGyryr1WcOHFCuU5ZE0E+UgQfz2SUdNIyuOZ9n2r1MqoRQBVUDfiSUtviFV7tzRYxNj6Ow0eONDV3kmCyuVzLNiBVrHnewqpzow2fUAmLQRIGU1J0sJThlqT+rgzcIsBxRdO7WtSoecHlggwkws4rkqOyDyffy88bCeKUtG27paAiIYQy9gUBfe4CVYG74uSKBoQgqiEGapoq2BsEpGTR6tzJ+W8FzIypjNZq5fykOsvyuWDL6YWwMUsSRqujDMfoqfTP5hPRyW9IWwlywHoQRiZ5CasInh2vNADwZDqQ2Iu+vj4VSNSMS0t+xjRNzBpszUtTAu8fUIVyKzJ5g8F2Fb+OHxXLZ81paZwlxOPxlueuf6D1Z6jNnuu1QJ8MBvB8Vr2CROChAyS1p+p7qhGE5iVREkaTENPYMcyArNJSJWHFYgXaJa8dQDze8j1mz56NM888sykbhjwh58+bhxUrWleNJFgi5UVdVuOLejukqz+uXAxZzhlPB5O+3t/fj9VnndXU3MnNMXvWLJx5RuuuXXlwsIrxJEz1P/Gym1u+zan7Vasf2wLkfgwiZT60XBLbad5TIr+YW+OjkjBaHzjz+otUSjaT95Y6eACVtOQpecdttyGeSDTM8PLvr7nmGixfFlAOi2GCxWJV80oaQ+UPMDOuXkEglUrh1ltvRSqdVuTZ0OiIcOWVV3pk3SKUl6RKNzQmXM9oHiBUEe0Ar0f+fgzClxMaYThu835f8nWuajA4D6Qpi9/Su+I7rNTCMADcfvvtuPqqK6eEM9eC1N2XLFmCz37mM0gkWzcgQkU082AqmtcMlNECTf+//bbbcM1VV00Jo68Fx3WxcMEC/M5v/7Yi7FahGk9Vc+uSAFNt+1u+zQSCWdunIPeh7TRvUyxHaCX6pAgk9aZmv7pTI0Xe4EzZMVr//kFGjVbHokWL8NU/+ypWr14Npw4jnOu6Snf/8h/9Ea699trgBjKd3tGFOVTz58/Hn/3pn+Lss89WJDqdlCbnrjedxr/58pdx3XXXtW2cQSJwwhCehBEEQpMwXOEZPpvdj7UaFmk8IDsG95shV9gp5NiBBuRcf911+Ju//mtcdNFFE9Z/KWaXDLzyJRe7JJQF8+fj61/9Kn7/i19sKGBpOpC8n11FymmEO2s2jqkutTULqZb957/9W1x6ySVqzirNnePP3by5cxXB/MGXvqSMnoFANciuIuEwDtKMwKRR1T8kQL5gikSFkviDGGJoHXeEIBRtgWaTtD2vbOUMNsnApmQNu5XqLwQYBsgwp/IFY6BCHrCDK3zJGMMH7rwTK1euVBmXDz/yiMpYzeXz6tQ3DEMt9nXr1uHzn/scbrjhBpVQFSisQuV07YkxNnKxyuX2ySqqV5CQc3fH7bdj+bJl+D/f/S5+/fDD2Lt3L/KFgiILOXdz5szBussvV3N34403qt8FBbUWSs2wJ7+naSAzFph4xsGClTCYJ120EhdVjtAIQ0oHlt28ZdZVXdorq45yOuMab02ZkCqPYYJUvkOFhZ/NQOSzXgXuAHHOmjX4q29+A1/8whdUFOLBw4cgXIHBwUGsXrVKqS2t1nOoBpHLAPlsxYXfSE6f97eVS/JTMa/mLgysWbMG3/zGN/CFz38eW7dtU0FZruNgcNYsrDrrLDV3vb2tReZWghgfAVWyP0npxoiBAqzCJsmixZU9BXIfBhEWjjAJQ6Jou2qgzbRoU7EcRIhVmbyYzqFx1gJzeg9bqHL3UxvYUC7rNQUKoXeGYZhKJ5evdkKcPAFRqFDgGI2bcxivQLOKMIpwh08grE4tUuqSxCBf7YI7dNyTMKYEDBIokYRIVHZVNwOtxTajkyEP7KJ/cHdtX5ISSqJQs+O0a6jCMc6UHaPp5yRPB92A0z97qtKoFn4B7tHWU8u7Ce6RgypuoCJ4AwuqViii48A98i6aN8f21kG1Eoc9faB4ZSm1GQSWJ+XDMw0E14QmPMJQupPwrLNNzoFTI2FG5wzxVg2CmgZ3zgKQXiFc2nXh7Nvd1q7docKxYVepugVfYmhUwqgIqUoe2BO4HaNTcMdGqhMgg+rsTvFEII1E5LkVSBSzD89bSbBrnbwNIjTCYH4sRdFqnt1cKjVfroyE3rodw5mzUImVlU4IZ/87EKMnW7lD18AdOgb34L7qBs8GuVf9faVLMQb38H6Ik8ebG2iXwT18AK78LhXKN4LrsOcurHzgNIHADZ4ALHlou40f2tW+TV2Ewb1A4IZnREoHRat5w6cKOKlho0jqvLV4DBKqJqMzOHfqA+cM7rHDsPe1nlreDbDfeVvZMCoaPHkThMGrSBmMQQwPwd69o/nBdgvk+nt7MygzPtV+IdWReALuwuWBuVQ1HjBhqP3n2REbvapV5QN1EYbhcilf1nPUyl2Xm0gFIaDgD7gZqJBWUT2k1VBqSQtCEhFEMg17aYV8A8ZBuQzsLZs8g9cMhlQP7M2vKQ9GxcXdBGGoz1RMr/DsP/bWTTNeLRHjI7C3vVWl6JCAOzgb9rxFgfU1DCYL+xTkqApF0YxWPUIaq+jqqpMwjBwx8bi80DR/egxEvwQwqv7FgIItlC2jWd60aqRdS+kiaWitqSWaBuuMcz3X2OQbEWBtfhXOsRaqNnUBnIN7YW17s6qRQm38JiaxImF478Da/hacQ63V1Ow07Le3enasKgYbe9lqiHR/IIFqQdsvAC9DtdC4ScAG8JhOOFHpzboIg5mcOMQDAP4ZQIV8cAVJEt8WjH1LEYe/Bh1HeHaMFiI+p1NLWrIsE8FZuALOvMVTDZycwz16GNamF5u/fqdBAsWNzys1oWLxHAbwJn2gkjAqSiacK/VH3jeUrsJtgJTGCi8/4wVtVSjfKA+Y4llrg8nNUd6RoPKjPDA//kK+Jl1WHvoVyUB+BMADxPE9GGZFd1pd3/bqtUk8+MaRk4LjLwzB3yASt4FjjhoXKfvGEU3Dr/NW/m5dS0hK2w5AOcrlHswVBXqbDPmUXGEJQkyrPJkJjSOmcWQdtzlOEgS3bxDF1RfA2L9r6vuui8ILT8G88HLo8xc3c4eOQp6QVmnjVliQvNqmrwPKjmFULwYs7+tcdjX0Sipfl8Paugn21jcqx2kLAXvhMthLmitbUAl60PYLBuQtV3lJyiBH+6Cu4Veuy24HaCUYvKdPGAPYc4LhJ2u3D+1d+rHKfXDrVppm9Q0grsWHfvXU738bXPuiS/g0EX1KEH2aC/q/bv4vqe+lEulRjXgewGvlhtZ80VXiUbOwpnGvplpSS0idiMVzLobbP6uylHFoH4rPPz7jGvCSVUThmUfhHj9aVbpgZouNbYwqn5fzdvwICs8+BgowxL4dEJkxFJ5+xDN2VmidKaWK4jmXQPQOBOJ2Z2HYLwSQLziT+czSdbxwy9pZv9TA/zUj+jTz9vCnBOFzFud/Y2r63l3nV6+YVvcoL18Ww83npHHHVf8RmqaN6oZxWL4M3TjMNGP8l/8auHHNADbtzbnEaOOEvYN5npIKolHdcASpVzX0GLw1/U+eGAuWobjm4qnvqRrtAoXnnoC1/c3m79EBWG+8rLp3VYOULJpVR+q6BhEKLz0N661gGjK1BUQoblgPa8umygZiqcLOXYjCuZcG6h0JOv7CdgXyxSlkNuw6eP3B14R8aDndMI+qfayrvXwyGU+4N5/bhxtWVW8P0bAC9pFLaoczX3jmIBwnK1WSPQAGmF/tR4pHyXhzsq/rqyVmFbUkrnOlmoyJJtUSCc1A/uKrENu6EdrJE6efyIxDDJ9E7td3KbWEDwZTgi5MuEcPIvfI3d4pWSXAjbcoXSgw7zrCruC8Zxw0Nobcw3epRtZB1EgNG/aurcivf9BLPKxU14Nz5C+4Au7sBYEF9emB1Xc5hYLlVjqkd2q6/vbt5zYf8Bh44NYt5yblxj4EwqbSElJ2jLzTUiOVYg21RGMMabNFbwkJ2IvPQOGiq32ymByXwZVOm33wF6BCrpU7hQ4pUmfv+ymc3W9XJQtm+IQRAJhe41oah7NzG3IP/Ey5qbsZ7omjyN7zY2XorkgWwoW9aAUKF14VaDq7EbQ6QkA+707Os5K/ftkGDbVy7VAiPdOHZ2XB2IuS6NQvGJArui0V8bCnUUvShqZS3lvJLYGmI3fZ9bAXr/Dy68shn6wUV59/wjuBWqhmHSbIKiL/yN0ovvJc9T9igBYLtOMkeKya99G7SeHFp5F77F6vzkgXQpJs7v6f+obOCl+ECBSLI7/uJr9jezDShcaCVUfgR1hnC+5k+0WGAS+OpvtbegDhVNxKqeTxlwAcRZmLJ190myZmyRWFGobTmMYVabQEIZSomb32/aBUeqoF3M/GzD30K+SeerDrFj9ZBaWG5B9/wGsSXGWyJVmwgNNJlS0jVu1NBlgW8o/ei/zj93WdEVRKPrn7foLChidr/BGhcO5lSh0JEsGVm/TAmBfdmbem7LXdBGxKtpgkFwphvO9KBo3FdvpqiYIk5EzOaYmYpVpSLbdEzk2fqbXO1lKKWHMpcpdeNyFVnAbuRYDm7v0p8k880DXRjFJNyv36V8j/+i6/SE7lR6tiJ6pt7BYhCaOqaiLnLZ9D7v6fIy8ljS6ZNzE+guw9P0T+qYerFsmRqoizYAly197pZaYG5EqVS9VsJVK5EgjI5it5JelVXdMOfHhZa/cLLfmMZ90MGJ72I8cUcgXHi/pswVtSrCFlJHSOtK61xqG+6Jm77gMqNqPi4mBcGRNz9/4Y2Xt+BDE23ModW4YYOobML76nJB+5KWslmGmJUJqN+zcAeLxGXIcKt88id//PkL3rBx1P7HOPHEDmR/+E/PpfeypmRbIQEOk+ZG76qLJfVC3V1wR0zlXAVpCQeySbn+JOzRKxZ5b05ls2IoVWQIcl4gKU2wAmDgJYrqRSmxRpxJqstahi411CokpBb6kPSilj3HZb61QtVZO+2cjc+jFo4yPQD+yeagSTJ2ahoMRs9/gRpN7/MejLz2r+ns2ACPbOrcg98FNYmzd55FbNgMb9zRxqyaRTpOTkqjSXkfNWLCL/+P1q3pJ3fhzGilXhDmoyhIvi5teQe/DncN72e69WcaGSYSB39e0orn1f4EWSpXQRaKwWAwpFV6n+p9unaL8Ae/Gk1WzBzFMIrsLsJHzvH/4Sn/vDr+YEicsAqFbewu+50JPUm+4dKfy+JNVUD4Mz5Byh1JfWvCYE0T8Lzqx5MA7sBs+MTj2afZXFPbQf9va3QCRUlywWC6YtQC2I4RPIrX9AndTOnp1+FFaVb8y8TVzVxhAwmE/oVM0uXJq3wwe8eROu6izfjnlzjx1WRuHcPT9Wz00RbBWygKYhd9WtyF7/4UDrdsKXLhK6FngP1eFxC+M557S1T8B9RZg/vPWcvpYt9aERhsS/+PzXiw535oDhRjlHzDdeppM6jCZ1N/LrHsarxGSUDEiZFupwnLoZqfR3t38WzIPvgGfHKpMGY6DMGOwdb8HZu0v1NNH6BlTbwKAhxkZQfPlZpXcXNzzlp17X6KEiySLuk0X4HRVO3VavgzT8eXO2b4a9d5dywfL+WeHM2/AJFF5Yj+zdP0Rx4wYvR6Ra/xRfUstdfgOyt3zMq/sacE5MXNcCt184rsDx4aKS5MuWQ4aB/Xe3SK/95H//fy3fI1QBVTMNQaLwNIB9AJTcadlCGT8TZvNcVXAFkoIpaaISegxNhYtL1aTlPUKkwoBHDRO99/8A+qE9VQN6YNuw33oNzq7t0FechdglV8BYvRbanAVgRvNBD6pc4LHDsLa+AevVDXD27lRivdfKvsai4z5ZBBGg1QRKEo1bqNH7UqooTmnetkFfuRqxi7150+fMV93amoUkBffIARW1WZTzdmCP8taoPhXV5o2EKoiTX3cjMjf9JkSqL1C7BXzV2Qw49kISRC7vqujOSWfHVsbEC0YsHgjjhb6MHn0rkxSi8C0ifA4+efekdCydn1Qhsc0ibXD0GtUnfbjo4FC2GFxrEc5h7tmG9K9/CnPXVu931U51+SVJALqpRG19+Zkwzjgb+tIV0GbN9Xqc6vpUyUB+TriqQjVlM3BPHFFl9ZydW5Xk4p445hnnqonRZVBuznhwwVmtQFiAyNeRBa6+v1AkoeZthZy3NdCXLIc2a55qJM10Y+r3l5tcdeuxIbLjat6cvbth79qm1DUpXXgekGnmTc59IoXsVbcje90HfI9I8CUapSqSNII9q4kIh47lMTRul29qF6C/c3Xja3ee2x+IWyp0wti2+wT2jNPHAfZPAPrhx84vmZdAb9poWtLTGcNgrHoOiUuE/ZkixqwApIwSOIc2dBSpJ+5C4vUNYMVC7RO+RBykyl2rjvCsvx/awGzwvkHVtJjFExP5KlTMQ2TG4Y6c9Cp8jw57Xg9lwcf0C94HMzzJImwDZyOQqombr6GinPbH5fNmgKdS4H0D4INl8xaLn5o3KUlkxiBGhvx5G/HmreQmne409+/nzFmA7I2/gcKFV/pl94InC6ky95hG4Lkj+YKLvYdzqkJ42RI5CuCzMXH00RsuOjeYewVylRp4ekcBmczoMq5p3wdwDXw7xKxeEwvnJFqyEvcYXL2qQZLFgUyxZl3QhsE5WD6LxGvPIvnsQ9CPHT61maeDWph0Sh9mZVIK+f9XGmqp538jE+RHXFaPuuws5P4TBU/iaMh+WGnelJG3/H3/vxudN6lu6AaKq85Txk1rxdmnrhkC4rpUl4Nn8uPDRRwZKpz2OwIeZOCfddMDJ+88I5itHvoZdO2qOB5+deQgwfk1gMsBKCF5POeoiLREXGv62eRdQlyjqraMtKGh19SUehIY5IkWTyF3xa2wl56F5IZHEHtrI3hufPpFWmsht/I8mZ/PEWs9+zRMSBJTcSA6IIp1ShuYZt7Q5Nz5a86ZuxD5992A/KXXq3iLMKvEa4whFmDryxIcR2AsY08ueZJnhPvStjZ8dUBkgbD7kpSgxXWHET0MYD/852vLL5m1WyJyRxDyNfJTJI8Mxg1FKIGeF+TVDbQXr8TYh38Pmds/BqSCdbvVC7n55CbUUt1NFhPws1vleBV5hOqnqwGNoXDRFRj59B8he+0HfONmuC0lTK3FMgwVIK+WyTtTQsEJ2EagpygdjLGzhLYQxs3npJFnsc0EPDohfBMwlnVaqpMBX8qwpinhJ0kjFN1LCIhYAs6KVeADCegpUpshdHWA+XaKpE8UscCSJ9sG5geSTRBHk3VFG76vynkhaP0GCu+7BvayVd6NQ7BXlENJF3oI0oVLGBm3J3OdlN0eEpq265o1wbqo26bpfvj8noJLdO9EvU/m5eyP5+yWDmaXCDmnemVxiYGYrtysYZz/jAjEuYoI5GWbWG0CI0DyKKkdcUBPea9utVU0gpI3R81ZEuEQLj+dYOWzQUxXwVisTRXhJVkE3dVMXi5bcJArTE40o4Mc9P+z9yVQclTnud+ttbfZRyONdoSYRUIImc2WhYUWsEViHib4OAbs45N4g2MH85I8h9gJNjbmxA7BwHNYEoixXjACO/HDBssGIctgErMaSSCBJCQQQpq1Z6ant+qq+nPurapRa6ZnNEtVd8+ov3OaQV3dXcu997v//v9cVzXfs/yKZkd/4JXX0SzNeZ5Av2XAlfx+uWDA2bEmqkJRJj9LMpaNkDV6MBdXSRpDKrKWPWaK/ORAjkVdUd2oMjdoiS9uz9hvun8tNx6BhtnUhjWpH2qMzNyoybxXKeIpigHhANIcCxd/Tvkv/szIHqfG55k7pDGeHSd5WXXGrQhQJafurN+wbEe6ME+MaiYi9mtbrX71w23+31/RCOPP37cct790sLddrvopGC4GUMvcep+JlIn6am3SEgDngKRpQ5PkgjVbOao0GbWmgu60/ynpxyff8HT4vMmKPKKwh/37+MePN0VmE29fOFMw4pnZJ/6FfdyphHyClfLIgo0hP/PfUlTxCvxemOMZ8builgjUSlnCfjHslztsSf3PS7u1tK8ndFFUT32jHiPLYk/LoP8CsAlu2ns8kRP5JVORMgyLkLZsREf5Df5QG3QV6ZyNwclWGC8Ecnqb2OOJSGQjJ/IpyAcTAztOHj7W+xESISf6oME3scmmQYwFLl3EE4awYZwY+seelmD990+bKZDZVVQN+FNnzuKyejeBHvH6mzhBJyYGUlNzffLHkzTH7mGiyQyzwuqobthJn5tLGGqoFE6SCiYFR410JIzgBk1mTEgXfi9bLwx8cOSa6QHhkRfS/xa/siWYrajoJjNNVYmBPQngOe89i4D4gCH8yVO5Ta7LDZr2mOHgMU1Gg69eEwJJMkgtUipoBb7A5iqkrAbGF8wN0vLbjQq3o1nvgCGaLJ9o68R2yWK/ba3a6Ps5PRSdMC5uj6BKo6ME/HhIyoBT87M/mZvy+GVMGrOUHz9Xva6gWvNRGxNeEr2iX0wXEECqJog+KKiiwZb/v+/FXQxPYQfQZYMeshQ5/ollK30/r4eSOOWYotlkS79kwFDTDLKB3oEcjClmmDqqiT2masJZf1ZEFZZrf1L4ZNiqXtFIphFIC4FkJRCVhKsiYUUJJDbGdKWLYSX4bID9GjLboanB2mVKQhirz6iBoutdNrDZ6wrvVQuKD0xdyuBkMZgbvS0B3BaLUy4aDHe3kpiYgBURY5qAwZEIfU4xR8CqCP/FgWQOydSIAr8dJNPmTX/fEN+4LOL7efNRsrAfRVHIluwnAfzKczQSHI/JyECUiSNj2UgNbxUw/Bok5sMSJ6fRkRaafuGWpyiEZ1YNJupNV+RAIjpFNfCcjZ5+Y3gypc3A/r+VTj7zr1uCry1bMsJY36Lj0lWzemyZHgRwDF47AtN2RK4pBlgJ1SQ3etFgcnu2+lT/2fGSCMKoKCblDybGi7iE4eNwKZIUiFcEbipFPGE4rTpOPHSACJujsfrU57T6AM58IkoaWPzi8zaytvlbEH7qFPtwxa7BHBLJEUadCYMzcSJnwyyQ4ZbK2Rg0pn4OAS7iaq6IW+GL8ofEYIt6GgU63E0SMmOIBBD+Dc8pkDbRN1JdN8DYQ7KivLSuvcr38xZCSQnjvPfJqNGr0mBCytjjvW/ahJ7+rJA2pgouRfQbtvCccGGDk0jatEXKu19h4kLE1ULTP7HjlAANSRi+tTt0q2gFEaAFN0iLqyLGyLCDVxiZD8mKVrQmLyWf4etbY0jJyk4C+zdOpHAHIJm2hGriB7haEjcs9GT4y0afYYMxyVdd0xFxS5WrXcG4QQAxyVcjtSjoG4DdwkP/YA79yRFBWn0g3KdC27e+Lfhq6x5KThgc1aqSY8zeAhKNjwSEAXTAEE1Z/NgIuFbCVRP+IteIFPaxcjOpuqMTV1D2YJLkmxtcl4OzW3htD7v7jeF9dvgk3moy/FwOKUVVgstihm9srcLL/z54hGT8AMARDBlASZQey00xAnQ0SFzvVBVhrJoqbE0XzZwrRozyh4jM1aa+K6uSM3/8TizzYFmE7r6sCDcYdoa3iPCDJWc1dK1tLY7twkNZEAbHBZ9pgpKj7cREBKhIKeUPKZE2haQR1DIUxip16sYqUriEUQZVdwOqRTmT4NQvmVrqtyzIQg2MLLyYi77BEapIhoh+OCjj+WMHit/UumwIY0N7FMbsZNKycB/Afu+9z+d/T78hEm2CinJQJUkUZp3s4DMuIXIRVy4tYRBjhdPsK8gDifYOthqa9C/wzSUqJNPgyCKdtdAVHxHRyf/xNDE82KA3GOuXFj9/qWwIg2POW4uQaIjsJ6K7hipzuapJZzxbyErsG1RZEpLGpEjDbSPgFNEp0WJ1i8Jg1YcgzV7ge/MdX+D1fi2xrYcT+2STBT01Vg3wHiyb0MVVEWNEAONhEO5sjpnvrn+jLrDzj4WyIoxzLmSoZhoZdu4JG/SjfNUkmTaFPjelJssngS7LLmlM9JsEW9acDMgSgmwb0qJ2hC/7LOQ5iwIvajsh2DZYrBryuetBsdrAa2iOCU6sqoaJloaWhGThf4vD4egdMNA/UhVJA/QvhiJvz7II4bK+QK9hNJQVYXD80QIFkUjVIBjuAfC7/GPxgRz6T+zs5DsEaSjKxElD7FqlIwzGGGwzh8G+OOQzzkb4ii9AXniGK2mUWEWxLbCaeoQ/cg1wzkbkiJXukrgkpihuo6Lxf01ybV1aABmoHkQmasos5BXh+JUF9mCVGjLev6QmsGs4GcqOMDgubovByDUeIOB7AA5575s2oSOeFYVPAyUNt5XduNUTkYA2eTHXL9iWhe6Oo8jlTChLViDy8S9BXX6+E1BWClXJbX0oz1+KyJ9cB+2CDyOZSsPIpEuadsMlQVsef/EcjyyCSFf3wB9HNmejozfjVNI/8fAek+F7lx5qOLyutXgxF4VQloTBUVWVhUzSNiLikkYS3kM1bHT2BmvPwAnqyXjOQqLBr62GSpqvKoHQ+c4hpJLicUFuPg3hK66FvvpSMD1cXLuG6CimQF35QYe4lp0v3u7p7EAunS5pop6QBMdpoC4GWcC1W3TGs07i5YmH4jZwZ5ZZzz/TWrSAzlFRtoSxoSUEPRTO5oD7AfzHUK4Jc7qm8Yc71QS1k4FPkqiqjMvl6kQPllbCUGQZx94+iK7OjqH3pJpGhC79NMIf+xzkuacNNWEKDK5UITXORXjTp4RkIc87XRzK2TYOH3wLOSMjVKhSwYnKVU4qYHjekKDJgj+y7v4s+hIj3KQ5Am2WGB6OhKLmh9om79nxC2VLGBxr28KI6OFugN0G4Pn8Y/EBQxiHgha0NVkaH2kIwiiduEguYaR6OrFr56snXpqqQztnAyKfvAHa+y8Bi8ScRsV+EgcnIssCC0WgnXsRolfdAP3Cy8AixwOLEolB7N/zGmRh8CwNYTDk18IYu8hSVFMCN3DCjbfo6TMKDcdvJJvdqYYifZvOiAV+HeNBWRMGx8b2GCLM2knALfn2DC5cdMWzGEj43zZgOFRZQkxTx3alMSYkjFKaF0VncJjY8fQ2ZLIjg3qEivK/Po/In14P9az3i8UtVIepeCxs2zFqhmNQV64WpBS+4jrIC9tGJOO9/fYhHNyzG6EAmhFPBLY2dt6PJkmIqScZbx/gef86ejLIjWz5+RqzpVu+c/bLBza0BFsUZyIog9DEk4P0KCnZ5JMmSd9nwDe8niamRcJIpCgMsbAS6GL1dpx0zhQNkUaAsZIX0SEQ5sRCePm/n8Ou3btx3jnvG/EZpmhQ28+Hsqgd5oFdMHb+DuZbr4ESfYBl5jU+LtAAeXgXdVWFVDcLyukroK54P5TFywRxjIYdO3Yg3dOJUHstqIQRqU791cLjpIt4nODCvT3wX88YFjp6ssiMbBfaScB3SbJ/d8vrF2JNoFcyMUwLwljbGsWONwcNK515EIQFYPgSH1t+jD/sYz0ZzJ8VRkgPph2iB0+nlZiFjGnhxOZlDLYeKSlhcKmruSqMROfreHjLFqxaeRYUpfAQc1VBXbEaats5sI69jdyB3bAO7YXVdQQ02AfKpl3pw/2CxABFE1KJVNMAefYCyIvboJy2HHJDsxO0NgaOHTuGx372M6zRJLFzBxlPMxZI1PMMu56j48TvpaiLRLIijKFh8nmbxeBIj1+SiP1gkOUebQxVm2tK7BUZjmlBGBxrW2LY/maqbzCTuF0leSGAP3GnsbAsH+3JYN6sMDTVp8K+o4BPprBr00iZ1gkTn7yyb6VaDESoD2mYG9PxyKOP4sqPXY4PfOADY39J1SEvaBEvyqZg9/eC+rpgD/SCkgOAlXPuSQ9DilWDVTcIQyqrqhXSynjx2M9/jl1/eAVXrVkKmWHMequBgjnFc4gLUO41iKK9RfCEeBAekd6ssF0MI4scQA8Rwz21oVh6XWv5qCIepg1hcKxriWDnS5kjR+TkzYxRA3/Ls54lUqaQNJobw0JFCRLMjdWQJCZUFK9CubC+MwmMzJIY9cjdJZfNqsHW3+3F7XfcgdbWVtTXj690G9MjkJsiQNN8X69rz949+Od77wUzDSyujZY4jIy5xmlnfLi0E1FlXzKWxwNbkEVGGO2HHyLglxZwa5KqOj/RWp59bsre6Dkc++vSqIprrwHsGwB25h/rG8wJm4ZlU1GWK59sMU1FSHbEWFFYVpJLGljJd+/ljVXQdRWPP/EE7rvvPuRywRuGR0MikcDt3/8+Xv3Dq5hbHcG8WKh00gWcHjK2FnIkRUVGTPOnvMF4wCXA7r4sevsLePcI/wXg5lmmfnBBrPTxFqNh2hHGFUvqwOo1MiPaszbR3wPYl3+cM3dnTyaALu2FIQJ7NAUxVYHERd0iibWjgd91W30Mc6rCSKXS+Kc77sC/P/QQLKv4yWiZTAZ33nUXfvzww+LfS+tiaAxrJTR4kpAAZT0kxqsYxs2hMwuyMNDVZxQgTNppgb7WFDNeOlCfweql1UW5pslg2hEGxwdX6ogylWSb/ZKIbvaK7sBdMD2cNHozQvwrBphrXQ9HIq7xr3RbKL/l+VUhtDXExG7a3d2Nm775TWzZsqWopJHNZnH3PffgtttvF8QlyTJWNlUhpARrYxoT/MSSDD0cDaQVwKindUs0dMWzhebkARBuSqeSzwzaNbjqtKaiXddkMC0Jg2Pd0hD0cCRnZ9gjRPRtAEPhjd4AeepJsSCpOpgSXL/O8YDvZDW6igua64QrmC/Uw4cP46+++lWxgJNu2HiQ4CR1y3e+g299+9vo6+sTRtO6kIpVTTWlb/WkyGCinmdxBomPR09/Fh29BSOT3ybQ15MmfhGLVdtrz4gW5ZqmgmlLGBwXtUcQqQkZKsk/ssn+B9G92oVHGp1FtGmILu6lTnF37RgfmFuPhrAuJqwsy8Kt+Xc33YS/ufFG7Nu/P5hzE+Hll1/GX3zlevzjbbehf2BAnNsmwhn1MfEqbUEwEqntIpekCNdxfA4WlCze4xudRew/6mNR8yNnBd9TxA9Ma8LgWNce45JGyrJy94Lou4VIQ0TSjdGg2R+Qo46UQcUriwjLZ1VhZVP1kL7MF+7AwADuvvdeXH3NNfjX++8XJOIHOFHs378f//Dd7+KTV1+Nh7c8AiOXgyRJLoExfHB+PZoiWsniL5wLdWthKMF7IPh9dsczo0kWRwn4Zlqy/l8oFDHWtZWf+3Q0TCu36mi4sC2Ep/b0p8xs5m7R6JSx/wNAUDafn70DhohgntMQgqpMtGzK+MFkRRRmKTX4PdfqCi5e0oTt73SLycuEg8DZH1548UXs2bMHWx55BFd87HJsWL8BixYtgq6PfyFxkkgkEti7dy+2bt2Kx37xC+zevVsQhZxn+OWfa4zouGhhoyiaWyxj9KjXrWqgkwSZTQXMq5gVzzp1LUbe7zGb0a2KxTZXRWKZjWUYazEWZgRhQOSc1GD7nv5ENpv+vyCZwNhfA2hEXps5vnA4aehBBHd5u5eql00h3vULG9BaH8NrXQOiaK0HRVGQSqfx1LZteObZZ7F40SKcd955uOD889HW1obmOXNQXVODcCgEVVWFsTSbzSKVSqG3txfvHD6MXbt24fkXXsCrO3eio6NDfIYThTzMS8Sf+bnNtVjVVD28J2gJQI4EGKBhOifKSWacxMiRpzgC4FvMZD/SI+H02tbyt1kMx4whDI6d+/fjzCWnJ7NG7p8ZkGMMfwNgyOzcP5gT7N/cEEI4gDBykuWSp7h74Dvb4poILj19Nvb2JNx+X8fBpQ3+Mk0Tb+7bhzfeeENIHDU1NWior0ddXR2i0aiQOvhnOMEkBgbQG4+jLx5HMpWCbdtDv1MoBJ0vmIiq4LKlc1CjKyWXLsRDUDUwWfV97EXt2ZyNY70ZURWuwO8fBti3sqa1OVoVzaxtmX5kgZlGGNd/9Bzx9+m9ycHBdPZuldlpBnwNwFDoYiJlwrLSQtKIRfy8facaNXzsqDXFqxEqwBUtzXhs/zHs7U4UrHLNGHOkAlkWBMAliO7u7sJSEmPi8/zlEcVY4BLF+XPrsH5hY5kIXeSojLL/wXWprOnkhqRG1OLk2E+Em3Ky/JNwWDc2TlOywEwwehbC+rYoYmE9Y1nshwT6WwIOeMe8Eu5HOtNOc1s/J44kl7xMXz64NNVWH8XH2+ZBkU+uhnlEwKUFRVVHvhRFkMvJiAKuKlKtK7hq2XzMjuploI44IC3kezsIvgm925kWfwtgNxF9VYL5aLUeMi5ZVh51LSaLGUkYcEkjFI5ksoweItD/BvBK/vFszsZ73Wl0xTOi98OUZQK3ZydKnOI+HBJj+ETbXFwwt66oMSn8VBef1oRNS5pK7jUaAh8XPj6iFsbUrom5Bt3efkNsPunsiJKRNoDnQOwv+tOJn4XC0dxFLaWvmDVVzFjCgDCEhlEdiloZK/W4CXwFwA53IMXc8eooHu3OCAKZYu8zEVnppE77dQdTB9/ZF1SFcO2qxZhVpJ2eP9cldVF84ezFwltTatPFCRiqtjV58LmTs2zhMj3akylUX5aLGr8khq+8p9T/prG20S52S8OgMKMJg2N9SxThcLX1R8sHfkvAlwh4lAsY3nHPg3K4IzWaSDl+8Jmkh9xKU+WzSvhO+JHTmvCZFQtFwlyQV8ZVkSpdwZfPWYLz5tQUVaoZD4RKwqZmw0ilHZW2q88JyBpezwIMPwLh+tnp9AvNGmhje/nmhkwUM54wODa112HrgTmYk1V3G8T+CsDdAPrzP5NMW0IP7Sk8CcYJJiakUxPDr6ufOkiUnWP4/MqF+OgZcwSBBHF5nHxlJuHTKxYKNYiVsP1IQTB3fITxd2JXxtz7608YeLczhf6kWegnugH8k2GzrymKcqCnoRGbWspI3PQBM8pLMhY2tUZwV2cvVnSZ76Zz9A0myQdB7AYAi+EKB1y0PNqTFUbRWXU6QtoEXa8M7oSUyq5VIVdFZkd0/N3qFiQME08e7BT2Db+ms01Og+I/bZ+HG85dgpgql42h0wENFQJy9snx1zFlboUsvpn0DuSEe7jAc9sLYt8zrNzD1ZHq1EXt0ysga7w4JSQMD19uqse+dBNULdSvEt0DYl8B8Pshu4YrvscTORzuSIv6GnzOj39ReUa18nysfAGfXhvBty5sEwZJclUIP35XlRiuXjYfX1/dIlLYy4ssXIFCVHYPjXtAvY8Npky825EWqekF8pIsgG2XbPaltK5sDumh1BE75fvllwvKc2YHiM+dy7BxWS30cMTY06k+RoTPAXiIz4v8z6Uyjp56tCc9IYMoO0lF6lKD747t9THctm45rlm+QNSwnOziJvf3miI6/vKCpfjGmlY0R/XS5ouMBckljHGMJhNRm7ZIXjzcmUYiXdC+xdXafyGwaw+dZWyLauHcJWfW4urljUFcfVnglFFJhmNNSwxP7UkT2bTLytl/DWa/DtjXAmwB8rwoPX0G0hkLjbU6qqOKKMs36nogcuIwZM9tV576KyeIRdVh3LymVVTnuv/Vt7EvPujYIKSTqymcEPhLV2SsnlePL65ajEsWzxI9PMpOssiHdHK3N3PVq0G3+bcgikJSJsN+EO6CLW1WdSXevjeMC5eXPo8oaJyyhAHX7crx9K7+Y6k0u00OYScj/CVAawAMZSglMxYynWnUxlQ01GiiOvmoULlKUv6PlS/supCKL5y9EGvm12PLnvfw60OdONiXQtZ07C/568rjAYkBtSENZ86qwmVnNOOjp89Gc0wfIpGyBpf8+PiMcp1ef9PeAUNUbssVjs/JANgGYv8oE3tWCYXNde3lVdk7SJT/zC4CspEqhKRB49iRmscbmrreZJCuA8PVAGbB23VsEhMpmTEFadTGNFFs+MS550oYAWZD+glvga9orELrB1tw1fL5eP5oHC8d7cP+eBLxjIGMZYuQ8piqiHqcyxqrRDLZyqZqYUSFG3dR/iBHVdRGBk8JadIipwNZvyHUURSWD98j4AFidP/83tSh3rmNuKjl1CELlK3MXCL8+rWsKDWfysZjErFNNsP1DDg/X9ogd5eNhhQ01mqigZJQU+AY1aS+Y9A3fxus87AbUTg9wCeC7N6HYdkYNCwkc5Zo2sQJI6LIiKqyKMevuG0Cylr9GA6yQdX1MK66Eda8Fqdbm6t+pDKmIAonz4gKaSxZAM8wRncyQ3vSCOUy7y5pwBcjp97yqUgYebhkuY5HX9iJusjiwVd27f3Jme2n7SawzzLgkwCakeeP55MrbVjCrtFQrSGsK068luzUW5huU8kzYHIojKE2pAiVhQ0dJ7fpGSE3jXjiBLjlB7wojIxhCTdp/2BuKFqzAFm8Q8BmInqgoaHxrYF4ArVSCH98CpIFKoQxEh8/7yzx9xev9FAItKeXWV8Py+oOybKvBeFDAISD3RNje/tzGExZqIkpqKvWEVbKo4jOVEDuf4IJ7yoRyKmIRqqGrGGifyAr3OfZnD2aeToBYBsBP5AZns3AyBhZAxevmDlRm5PBKedWHS/+eFUD+vUQoqFIetPyHz5myezzRLiZgD35UT8ir8C0RQn5d46l0NFnIidNb8KYqbCYgu6ELcbpWG9WkAVGkoVFwB9AuJEB183pyT7FFDUzT5+LC08vn0zkUqEiYYyBy9ucVOQdb6QQSSQODyjybRroSTD250S4nAFz8z/PRdyujIUaU4VuuzPx1JRcyw6MgIytoGOAkNOs0YblEICfMOBBi+zXNcj24+vm4utllH1calQIYxxY2xrBjURY99qAyRheNixjnwTpcYD+DMAGALXw7BtMgiXrQgYhrwF6RY4rHcjtuWxBjAtJciGy6Aaw1WL0QzD2nCapab4wdM2okMUwVAhjnLiVMdzq/v+2N5OJeL/xRFi1f8+YfYkE9mkAqwFUC8LwXHfkuvzdNIaKtFFEUB5ZuLAU3Y3CHbLNxAn4jQ1sBqOnGyy1PxlTsWEa9AcpFSp73ySwoSWKpjodqqb0bNra+GML9FkQbgDYdgJL2uqwfAU+cS3nNZPsiOUKThLeKx+CMARzi5DuJ4jhy8xmX5zb9M5/arLW310lV8jiJKhIGJPEh5Y62Yj3HXkPp/WGjuy05QeWU+5XpKgflrOD1xGTzhnxJZc4OJlUJI4AYB+X6EbBAEnybxhhC9n2UwsPNnYebR3A0c5VuHRlZTDGg8pT8gEPPLUP85pnw4aEubduUuc1Nd0hkXntSb8ouTaOyihMDZ7qcRLpzdCqXzzSdsk1zzVf+cayBQYkTcG6ikQxIVSmqo/o/LOPgGy5RY7KWySNnT2uL7E80qiMxsQwTqIY+niWOqxk5jNq63lb6/72lqCvbkaiYsPwEUcHeyTI8kYi1jLuxU+j69wVjALvmU3QJkRgs0gNfczc/+r0Lt1dQlQIwye896kNmB2b3QyGy0GYeLklcnVwa0LFoE4t5BPFRJ+RQ+ASA7vEIuns+OcvC+QSZzoqhOET1m7eBkWS1zKw84dijSejYuQtiomI2zMa+V6myZCpZ2R2sJBJ7Mp4OlkJ25wE/icAAP//iFU60gIwwN4AAAAASUVORK5CYII=
+ href: 'https://{{ .Values.clusterGroup.name }}-gitops-server-{{ $namespace }}.{{ coalesce .Values.global.localClusterDomain .Values.global.hubClusterDomain }}'
+ location: ApplicationMenu
+ text: '{{ title .Values.clusterGroup.name }} ArgoCD'
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/cluster-external-secrets.yaml b/common/clustergroup/templates/plumbing/cluster-external-secrets.yaml
new file mode 100644
index 00000000..20d6f261
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/cluster-external-secrets.yaml
@@ -0,0 +1,43 @@
+{{- if (eq .Values.enabled "plumbing") }}
+{{- $namespace := print $.Values.global.pattern "-" $.Values.clusterGroup.name }}
+apiVersion: "external-secrets.io/v1beta1"
+kind: ExternalSecret
+metadata:
+ name: {{ .Values.clusterGroup.targetCluster | kebabcase }}-secret
+ namespace: openshift-gitops
+ annotations:
+ argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
+ argocd.argoproj.io/sync-wave: "100"
+spec:
+ refreshInterval: 15s
+ secretStoreRef:
+ name: {{ $.Values.secretStore.name }}
+ kind: {{ $.Values.secretStore.kind }}
+ target:
+ name: {{ .Values.clusterGroup.targetCluster | kebabcase }}-secret
+ template:
+ type: Opaque
+ metadata:
+ labels:
+ argocd.argoproj.io/secret-type: cluster
+ data:
+ name: {{ .Values.clusterGroup.targetCluster }}
+ server: https://api.{{ .Values.global.clusterDomain }}:6443
+ config: |
+ {
+ "bearerToken": {{ "{{ .kubeBearer | toString | quote }}" }},
+ "tlsClientConfig": {
+ "insecure": false,
+ "caData": {{ "{{ .kubeCA | toString | quote }}" }}
+ }
+ }
+ data:
+ - secretKey: kubeBearer
+ remoteRef:
+ key: {{ $.Values.clusterGroup.hostedSite.secretsPath }}
+ property: bearerToken
+ - secretKey: kubeCA
+ remoteRef:
+ key: {{ $.Values.clusterGroup.hostedSite.secretsPath }}
+ property: caCert
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/gitops-namespace.yaml b/common/clustergroup/templates/plumbing/gitops-namespace.yaml
new file mode 100644
index 00000000..3cd7608d
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/gitops-namespace.yaml
@@ -0,0 +1,13 @@
+{{- if not (eq .Values.enabled "plumbing") }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ name: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}
+ # The name here needs to be consistent with
+ # - acm/templates/policies/application-policies.yaml
+ # - clustergroup/templates/applications.yaml
+ # - any references to secrets and route URLs in documentation
+ name: {{ $.Values.global.pattern }}-{{ .Values.clusterGroup.name }}
+spec: {}
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/hosted-sites.yaml b/common/clustergroup/templates/plumbing/hosted-sites.yaml
new file mode 100644
index 00000000..f1f57374
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/hosted-sites.yaml
@@ -0,0 +1,172 @@
+{{- if (eq .Values.enabled "all") }}
+{{- range .Values.clusterGroup.managedClusterGroups }}
+{{- $group := . }}
+{{- if .hostedArgoSites }}
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: {{ .name }}
+ namespace: openshift-gitops
+spec:
+ description: "Cluster Group {{ $group.name }}"
+ destinations:
+ - namespace: '*'
+ server: '*'
+ clusterResourceWhitelist:
+ - group: '*'
+ kind: '*'
+ namespaceResourceWhitelist:
+ - group: '*'
+ kind: '*'
+ sourceRepos:
+ - '*'
+status: {}
+---
+{{- end }}
+{{- range .hostedArgoSites }}
+{{ $secretsPathDefault := print "secret/data/hub/cluster_" .name }}
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: {{ $.Values.global.pattern }}-{{ $group.name }}-{{ .name }}
+ namespace: openshift-gitops
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io/foreground
+spec:
+ project: {{ $group.name }}
+ source:
+ repoURL: {{ coalesce $group.repoURL $.Values.global.repoURL }}
+ targetRevision: {{ coalesce $group.targetRevision $.Values.global.targetRevision }}
+ path: {{ default "common/clustergroup" $group.path }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ - "/values-global.yaml"
+ - "/values-{{ $group.name }}.yaml"
+ {{- range $valueFile := $group.extraValueFiles }}
+ - {{ $valueFile | quote }}
+ {{- end }}
+ parameters:
+ - name: global.repoURL
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+ - name: global.targetRevision
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+ - name: global.namespace
+ value: $ARGOCD_APP_NAMESPACE
+ - name: global.pattern
+ value: {{ $.Values.global.pattern }}
+ - name: global.hubClusterDomain
+ value: {{ $.Values.global.hubClusterDomain }}
+ - name: global.localClusterDomain
+ value: apps.{{ .domain }}
+ - name: global.clusterDomain
+ value: {{ .domain }}
+ - name: enabled
+ value: core
+ - name: clusterGroup.name
+ value: {{ $group.name }}
+ - name: clusterGroup.targetCluster
+ value: {{ .name }}
+ - name: clusterGroup.hostedSite.secretsPath
+ value: {{ default $secretsPathDefault .secretsPath }}
+ {{- range $group.helmOverrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- end }}
+ {{- if $group.fileParameters }}
+ fileParameters:
+ {{- range $group.fileParameters }}
+ - name: {{ .name }}
+ path: {{ .path }}
+ {{- end }}
+ {{- end }}
+ destination:
+ name: {{ .name }}
+ namespace: {{ $.Values.global.pattern }}-{{ $group.name }}
+ syncPolicy:
+ automated:
+ selfHeal: true
+ ignoreDifferences:
+ - group: apps
+ kind: Deployment
+ jsonPointers:
+ - /spec/replicas
+ - group: route.openshift.io
+ kind: Route
+ jsonPointers:
+ - /status
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: {{ $.Values.global.pattern }}-{{ $group.name }}-{{ .name }}-plumbing
+ namespace: openshift-gitops
+ finalizers:
+ - resources-finalizer.argocd.argoproj.io/foreground
+spec:
+ project: {{ $group.name }}
+ source:
+ repoURL: {{ coalesce $group.repoURL $.Values.global.repoURL }}
+ targetRevision: {{ coalesce $group.targetRevision $.Values.global.targetRevision }}
+ path: {{ default "common/clustergroup" $group.path }}
+ helm:
+ ignoreMissingValueFiles: true
+ valueFiles:
+ - "/values-global.yaml"
+ - "/values-{{ $group.name }}.yaml"
+ {{- range $valueFile := $group.extraValueFiles }}
+ - {{ $valueFile | quote }}
+ {{- end }}
+ parameters:
+ - name: global.repoURL
+ value: $ARGOCD_APP_SOURCE_REPO_URL
+ - name: global.targetRevision
+ value: $ARGOCD_APP_SOURCE_TARGET_REVISION
+ - name: global.namespace
+ value: $ARGOCD_APP_NAMESPACE
+ - name: global.pattern
+ value: {{ $.Values.global.pattern }}
+ - name: global.hubClusterDomain
+ value: {{ $.Values.global.hubClusterDomain }}
+ - name: global.localClusterDomain
+ value: apps.{{ .domain }}
+ - name: global.clusterDomain
+ value: {{ .domain }}
+ - name: enabled
+ value: plumbing
+ - name: clusterGroup.name
+ value: {{ $group.name }}
+ - name: clusterGroup.targetCluster
+ value: {{ .name }}
+ - name: clusterGroup.hostedSite.secretsPath
+ value: {{ default $secretsPathDefault .secretsPath }}
+ {{- range $group.helmOverrides }}
+ - name: {{ .name }}
+ value: {{ .value | quote }}
+ {{- end }}
+ {{- if $group.fileParameters }}
+ fileParameters:
+ {{- range $group.fileParameters }}
+ - name: {{ .name }}
+ path: {{ .path }}
+ {{- end }}
+ {{- end }}
+ destination:
+ name: in-cluster
+ namespace: openshift-gitops
+ syncPolicy:
+ automated:
+ selfHeal: true
+ ignoreDifferences:
+ - group: apps
+ kind: Deployment
+ jsonPointers:
+ - /spec/replicas
+ - group: route.openshift.io
+ kind: Route
+ jsonPointers:
+ - /status
+---
+{{- end }}
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/templates/plumbing/projects.yaml b/common/clustergroup/templates/plumbing/projects.yaml
new file mode 100644
index 00000000..7f3b8c22
--- /dev/null
+++ b/common/clustergroup/templates/plumbing/projects.yaml
@@ -0,0 +1,29 @@
+{{- if not (eq .Values.enabled "core") }}
+{{- $namespace := print $.Values.global.pattern "-" $.Values.clusterGroup.name }}
+{{- range .Values.clusterGroup.projects }}
+apiVersion: argoproj.io/v1alpha1
+kind: AppProject
+metadata:
+ name: {{ . }}
+{{- if (eq $.Values.enabled "plumbing") }}
+ namespace: openshift-gitops
+{{- else }}
+ namespace: {{ $namespace }}
+{{- end }}
+spec:
+ description: "Pattern {{ . }}"
+ destinations:
+ - namespace: '*'
+ server: '*'
+ clusterResourceWhitelist:
+ - group: '*'
+ kind: '*'
+ namespaceResourceWhitelist:
+ - group: '*'
+ kind: '*'
+ sourceRepos:
+ - '*'
+status: {}
+---
+{{- end }}
+{{- end }}
diff --git a/common/clustergroup/values.schema.json b/common/clustergroup/values.schema.json
new file mode 100644
index 00000000..e88fc5a9
--- /dev/null
+++ b/common/clustergroup/values.schema.json
@@ -0,0 +1,908 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$ref": "#/definitions/ValidatedPatterns",
+ "definitions": {
+ "ValidatedPatterns": {
+ "type": "object",
+ "additionalProperties": true,
+ "properties": {
+ "enabled": {
+ "type": "string",
+ "enum": [
+ "all",
+ "core",
+ "plumbing"
+ ]
+ },
+ "secretStore": {
+ "$ref": "#/definitions/SecretStore"
+ },
+ "main": {
+ "$ref": "#/definitions/Main"
+ },
+ "global": {
+ "$ref": "#/definitions/Global"
+ },
+ "clusterGroup": {
+ "$ref": "#/definitions/ClusterGroup"
+ }
+ },
+ "required": [
+ "clusterGroup"
+ ],
+ "title": "ValidatedPatterns"
+ },
+ "SecretStore": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the external secret backend",
+ "default": "vault-backend"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the external secret backend",
+ "default": "ClusterSecretStore"
+ }
+ },
+ "required": [
+ "name",
+ "kind"
+ ],
+ "title": "SecretsStore"
+ },
+ "Main": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "clusterGroupName"
+ ],
+ "title": "Main",
+ "description": "This section contains the 'main' variables which are used by the install chart only and are passed to helm via the Makefile",
+ "properties": {
+ "clusterGroupName": {
+ "type": "string"
+ },
+ "git": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "repoURL",
+ "revision"
+ ],
+ "properties": {
+ "repoURL": {
+ "type": "string",
+ "description": "URL of the pattern's git repository"
+ },
+ "revision": {
+ "type": "string",
+ "description": "revision (branch/commit/ref) to use on the pattern's git repository"
+ }
+ }
+ },
+ "gitops": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "channel": {
+ "type": "string",
+ "description": "The channel from which to install the gitops operator"
+ }
+ }
+ },
+ "multiSourceConfig": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "Enable the experimental support for multi source"
+ }
+ }
+ },
+ "analyticsUUID": {
+ "type": "string",
+ "description": "UUID used to generate analytics"
+ }
+ }
+ },
+ "Global": {
+ "type": "object",
+ "additionalProperties": true,
+ "properties": {
+ "pattern": {
+ "type": "string",
+ "readOnly": true,
+ "description": "The name of the pattern being installed. The default is the name of the repository's folder and is automatically set by the Makefile"
+ },
+ "clusterDomain": {
+ "type": "string",
+ "readOnly": true,
+ "description": "The FQDN domain of the cluster without the 'apps.' component. For example: mcg-hub.blueprints.rhecoeng.com. Gets set automatically by the framework"
+ },
+ "localClusterDomain": {
+ "type": "string",
+ "readOnly": true,
+ "description": "The FQDN domain of the cluster including the 'apps.' component. For example: apps.mcg-hub.blueprints.rhecoeng.com. Gets set automatically by the framework"
+ },
+ "targetRevision": {
+ "type": "string",
+ "readOnly": true,
+ "description": "revision (branch/commit/ref) to use on the pattern's git repository, it is set automatically by the pattern's operator"
+ },
+ "repoURL": {
+ "type": "string",
+ "readOnly": true,
+ "description": "URL of the pattern's git repository, it is set automatically by the pattern's operator"
+ },
+ "hubClusterDomain": {
+ "type": "string",
+ "readOnly": true,
+ "description": "The FQDN domain of the hub cluster including the 'apps.' component. For example: apps.mcg-hub.blueprints.rhecoeng.com. Gets set automatically by the framework. Only makes sense when using ACM"
+ },
+ "namespace": {
+ "type": "string",
+ "readOnly": true,
+ "description": "The namespace in which the ArgoCD instance is running. Automatically set to either 'openshift-operators' or '$ARGOCD_APP_NAMESPACE'"
+ },
+ "git": {
+ "$ref": "#/definitions/GlobalGit"
+ },
+ "options": {
+ "$ref": "#/definitions/Options"
+ }
+ },
+ "required": [
+ "options"
+ ],
+ "title": "Global"
+ },
+ "GlobalGit": {
+ "type": "object",
+ "additionalProperties": true,
+ "description": "The git configuration used to support Tekton pipeline tasks.",
+ "properties": {
+ "hostname": {
+ "type": "string",
+ "description": "The hostname for the Git provider being used. e.g. github.com or gitlab.com"
+ },
+ "account": {
+ "type": "string",
+ "description": "The account for the Git provider. Accounts allow you to organize and control access to that code. There are three types of accounts on GitHub. Personal accounts Organization accounts Enterprise accounts e.g. hybrid-cloud-patterns or claudiol"
+ },
+ "email": {
+ "type": "string",
+ "description": "The contact email for the Git account. e.g. account@gmail.com"
+ },
+ "dev_revision": {
+ "type": "string",
+ "deprecated": true,
+ "description": "This is used by the pipelines as the branch for the development repository. e.g. v2.0. This is marked as deprecated"
+ }
+ },
+ "required": [
+ "hostname",
+ "account",
+ "email"
+ ],
+ "title": "GlobalGit"
+ },
+ "Options": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "useCSV": {
+ "type": "boolean",
+ "deprecated": true
+ },
+ "syncPolicy": {
+ "type": "string",
+ "description": "This is the sync policy for the ArgoCD applications. When set to Automatic ArgoCD will automatically sync an application when it detects differences between the desired manifests in Git."
+ },
+ "installPlanApproval": {
+ "type": "string",
+ "deprecated": true,
+ "description": "This is used to approval strategy for the subscriptions of OpenShift Operators being installed. You can choose Automatic or Manual updates. NOTE: This setting is now available in the subcriptions description in the values file."
+ },
+ "applicationRetryLimit": {
+ "type": "integer",
+ "description": "Number of failed sync attempt retries; unlimited number of attempts if less than 0"
+ }
+ },
+ "required": [
+ "installPlanApproval",
+ "syncPolicy",
+ "useCSV"
+ ],
+ "title": "Options"
+ },
+ "ClusterGroup": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the cluster group."
+ },
+ "targetCluster": {
+ "type": "string"
+ },
+ "isHubCluster": {
+ "type": "boolean",
+ "description": "If set to true the values is used to identify whether this is the hub cluster or an edge/spoke cluster configuration."
+ },
+ "sharedValueFiles": {
+ "type": "array",
+ "description": "Templated value file paths."
+ },
+ "namespaces": {
+ "type": "array",
+ "description": "This is the array of namespaces that the VP framework will create. In addition, operator groups will also be created for each namespace.",
+ "items": {
+ "$ref": "#/definitions/Namespaces"
+ }
+ },
+ "indexImages": {
+ "anyOf": [
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ],
+ "description": "List of index images for overriding default catalog sources.",
+ "items": {
+ "$ref": "#/definitions/IndexImages"
+ }
+ },
+ "operatorgroupExcludes": {
+ "type": "array",
+ "description": "List of namespaces to exclude the creation of operator groups.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "operatorgroupExcludeTargetNS": {
+ "type": "array",
+ "description": "Specify the list of namespaces where the target namespace field in the corresponding operatorgroup object should be excluded.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "hostedSite": {
+ "type": "object",
+ "items": {
+ "$ref": "#/definitions/HostedSite"
+ }
+ },
+ "subscriptions": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ],
+ "description": "Description of the subscriptions that the VP Framework will install in the cluster. Two ways of defining subscriptions: Using a list or using a dictionary.",
+ "items": {
+ "$ref": "#/definitions/Subscription"
+ }
+ },
+ "projects": {
+ "type": "array",
+ "description": "The list of projects that will be created in the ArgoCD instances.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "applications": {
+ "anyOf": [
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ],
+ "description": "Description of the applications that will be created in the ArgoCD instances. Two ways of defining applications: Using a list or using a dictionary.",
+ "items": {
+ "$ref": "#/definitions/Applications"
+ }
+ },
+ "argoCD": {
+ "$ref": "#/definitions/ArgoCD"
+ },
+ "imperative": {
+ "$ref": "#/definitions/Imperative"
+ },
+ "managedClusterGroups": {
+ "anyOf": [
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ],
+ "description": "Description of the managed clusters that ACM will be able to manage. Two ways of defining managed clusters: Using a list or using a dictionary.",
+ "items": {
+ "$ref": "#/definitions/ManagedClusterGroup"
+ }
+ },
+ "externalClusters": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "applications",
+ "isHubCluster",
+ "name",
+ "namespaces",
+ "projects"
+ ],
+ "title": "ClusterGroup"
+ },
+ "Namespaces": {
+ "anyOf": [
+ {
+ "type": "object"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "Description of the applications that will be created in the ArgoCD instances. The Application CRD is the Kubernetes resource object representing a deployed application instance in an environment. Two ways of defining applications: Using a list or using a dictionary.",
+ "additionalProperties": true,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the namespace."
+ },
+ "labels": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/NameValue"
+ }
+ },
+ "annotations": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/NameValue"
+ }
+ }
+ }
+ },
+ "NameValue": {
+ "type": "object",
+ "description": "Description of the applications that will be created in the ArgoCD instances. The Application CRD is the Kubernetes resource object representing a deployed application instance in an environment. Two ways of defining applications: Using a list or using a dictionary.",
+ "additionalProperties": true,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the namespace."
+ },
+ "value": {
+ "type": "string",
+ "description": "Name of the namespace."
+ }
+ }
+ },
+ "Applications": {
+ "type": "object",
+ "description": "Description of the applications that will be created in the ArgoCD instances. The Application CRD is the Kubernetes resource object representing a deployed application instance in an environment. Two ways of defining applications: Using a list or using a dictionary.",
+ "additionalProperties": true,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the application in ArgoCD."
+ },
+ "repoURL": {
+ "type": "string",
+ "description": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests."
+ },
+ "targetRevision": {
+ "type": "string",
+ "description": "TargetRevision defines the revision of the source to sync the application to. In case of Git, this can be commit, tag, or branch. If omitted, will equal to HEAD. In case of Helm, this is a semver tag for the Chart's version."
+ },
+ "chart": {
+ "type": "string",
+ "description": "Chart is a Helm chart name, and must be specified for applications sourced from a Helm repo."
+ },
+ "chartVersion": {
+ "type": "string",
+ "description": "The version of the helm chart to be used. Can be a regex like '0.0.*'."
+ },
+ "kustomize": {
+ "type": "boolean",
+ "description": "If set to true it will tell ArgoCD to use kustomize to deploy the application."
+ },
+ "plugin": {
+ "type": "object",
+ "description": "Plugin holds config management plugin specific options"
+ },
+ "extraValueFiles": {
+ "type": "array",
+ "description": "List of extra values files that will be passed to ArgoCD."
+ },
+ "extraHubClusterDomainFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "extraLocalClusterDomainFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "extraRepoURLFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "extraTargetRevisionFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "extraNamespaceFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "extraPatternNameFields": {
+ "type": "array",
+ "description": "List of extra fields that will be passed to ArgoCD."
+ },
+ "overrides": {
+ "type": "object"
+ },
+ "fileParameters": {
+ "type": "array",
+ "description": "FileParameters are file parameters to the helm template"
+ },
+ "ignoreDifferences": {
+ "type": "array",
+ "description": "IgnoreDifferences is a list of resources and their fields which should be ignored during comparison"
+ },
+ "syncPolicy": {
+ "type": "object",
+ "description": "SyncPolicy controls when and how a sync will be performed"
+ },
+ "namespace": {
+ "type": "string",
+ "description": "Namespace specifies the target namespace for the application's resources. The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project is a reference to the project this application belongs to. The empty string means that application belongs to the 'default' project."
+ },
+ "path": {
+ "type": "string",
+ "description": "Path is a directory path within the Git repository, and is only valid for applications sourced from Git."
+ }
+ },
+ "required": [
+ "name",
+ "path",
+ "project"
+ ],
+ "title": "Applications"
+ },
+ "ArgoCD": {
+ "type": "object",
+ "description": "Details for configuring ArgoCD instances in particular",
+ "additionalProperties": false,
+ "properties": {
+ "configManagementPlugins": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ArgoCDConfigManagementPlugin"
+ },
+ "description": "The new configManagementPlugins array, will also generate configMaps to inject into the plugins"
+ },
+ "initContainers": {
+ "type": "array",
+ "description": "A list of initContainers to add to the repo-server if needed"
+ }
+ }
+ },
+ "ArgoCDConfigManagementPlugin": {
+ "type": "object",
+ "additionalProperties": true,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name for the config management plugin"
+ },
+ "image": {
+ "type": "string",
+ "description": "Image for a sidecar container"
+ },
+ "imagePullPolicy": {
+ "type": "string",
+ "description": "Image pull policy for the sidecar. Defaults to 'Always'"
+ },
+ "pluginConfig": {
+ "type": "string",
+ "description": "Configuration file to project into sidecar container. This will create a configMap if specified"
+ },
+ "pluginArgs": {
+ "type": "array",
+ "description": "Additional args to pass to the cmpserver command, usually loglevel"
+ }
+ },
+ "required": [
+ "name",
+ "image"
+ ]
+ },
+ "IndexImages": {
+ "type": "object",
+ "description": "Details for overriding default catalog sources",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name for the custom catalog source."
+ },
+ "image": {
+ "type": "string",
+ "description": "Location of the index image."
+ }
+ }
+ },
+ "HostedSite": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "secretsPath": {
+ "type": "string",
+ "description": "It represents the path in the vault that is supposed to contain two fields: 'bearerToken' representing the token to use to authenticate to the remote cluster and 'caCert' which is the base64-encoded Certificate Authority cert of the remote cluster."
+ }
+ },
+ "required": [
+ "secretsPath"
+ ],
+ "title": "HostedSite"
+ },
+ "Imperative": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "jobs": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Job"
+ }
+ },
+ "image": {
+ "type": "string",
+ "default": "registry.redhat.io/ansible-automation-platform-22/ee-supported-rhel8:latest"
+ },
+ "namespace": {
+ "type": "string",
+ "default": "imperative",
+ "enum": [
+ "imperative"
+ ]
+ },
+ "serviceAccountCreate": {
+ "type": "boolean"
+ },
+ "valuesConfigMap": {
+ "type": "string"
+ },
+ "cronJobName": {
+ "type": "string"
+ },
+ "jobName": {
+ "type": "string"
+ },
+ "imagePullPolicy": {
+ "type": "string",
+ "default": "Always",
+ "enum": [
+ "Always",
+ "IfNotPresent",
+ "Never"
+ ]
+ },
+ "activeDeadlineSeconds": {
+ "type": "integer",
+ "default": 3600
+ },
+ "schedule": {
+ "type": "string",
+ "default": "*/10 * * * *"
+ },
+ "insecureUnsealVaultInsideClusterSchedule": {
+ "type": "string",
+ "default": "*/5 * * * *"
+ },
+ "verbosity": {
+ "type": "string",
+ "default": "",
+ "enum": [
+ "",
+ "-v",
+ "-vv",
+ "-vvv",
+ "-vvvv"
+ ]
+ },
+ "serviceAccountName": {
+ "type": "string"
+ },
+ "clusterRoleName": {
+ "type": "string"
+ },
+ "clusterRoleYaml": {
+ "type": ["string", "array"]
+ },
+ "roleName": {
+ "type": "string"
+ },
+ "roleYaml": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "jobs"
+ ],
+ "title": "Imperative"
+ },
+ "Job": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "playbook": {
+ "type": "string"
+ },
+ "timeout": {
+ "type": ["integer", "string"]
+ },
+ "image": {
+ "type": "string",
+ "default": "registry.redhat.io/ansible-automation-platform-22/ee-supported-rhel8:latest"
+ },
+ "tags": {
+ "type": "string"
+ },
+ "extravars": {
+ "type": "array"
+ },
+ "verbosity": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "playbook"
+ ],
+ "title": "Job"
+ },
+ "ManagedClusterGroup": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "targetRevision": {
+ "type": "string"
+ },
+ "acmlabels": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/ACMLabels"
+ }
+ },
+ "hostedArgoSites": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/HostedArgoSites"
+ }
+ },
+ "clusterPools": {
+ "type": "object",
+ "items": {
+ "$ref": "#/definitions/ClusterPools"
+ }
+ },
+ "clusterSelector": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "helmOverrides": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/HelmOverride"
+ }
+ }
+ },
+ "required": [],
+ "title": "ManagedClusterGroup"
+ },
+ "ClusterPools": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "size": {
+ "type": "integer"
+ },
+ "name": {
+ "type": "string"
+ },
+ "openshiftVersion": {
+ "type": "string"
+ },
+ "baseDomain": {
+ "type": "string"
+ },
+ "platform": {
+ "type": "object",
+ "$ref": "#/definitions/ClusterPoolsPlatform"
+ },
+ "clusters": {
+ "type": "array"
+ }
+ },
+ "required": [
+ "name",
+ "openshiftVersion",
+ "baseDomain",
+ "platform",
+ "clusters"
+ ],
+ "title": "ClusterPools"
+ },
+ "ClusterPoolsPlatform": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "baseDomainResourceGroupName": {
+ "type": "string"
+ },
+ "region": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "region"
+ ],
+ "title": "ClusterPoolsPlatform"
+ },
+ "HelmOverride": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "boolean"
+ }
+ ]
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "title": "HelmOverride"
+ },
+ "ACMLabels": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "title": "ACMLabels"
+ },
+ "Subscription": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespaces": {
+ "type": "array"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "sourceNamespace": {
+ "type": "string"
+ },
+ "source": {
+ "type": "string"
+ },
+ "channel": {
+ "type": "string"
+ },
+ "csv": {
+ "type": "string"
+ },
+ "installPlanApproval": {
+ "type": "string",
+ "enum": [
+ "Manual",
+ "Automatic"
+ ]
+ },
+ "config": {
+ "type": "object",
+ "$ref": "#/definitions/SubscriptionsConfigEnv"
+ },
+ "disabled": {
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "name"
+ ],
+ "title": "Subscription"
+ },
+ "SubscriptionsConfigEnv": {
+ "type": "array",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "value": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "value"
+ ],
+ "title": "SubscriptionsConfigEnv"
+ },
+ "HostedArgoSites": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "domain": {
+ "type": "string"
+ },
+ "bearerKeyPath": {
+ "type": "string"
+ },
+ "caKeyPath": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "domain"
+ ],
+ "title": "HostedArgoSites"
+ }
+ }
+}
diff --git a/common/clustergroup/values.yaml b/common/clustergroup/values.yaml
new file mode 100644
index 00000000..c74db48c
--- /dev/null
+++ b/common/clustergroup/values.yaml
@@ -0,0 +1,97 @@
+global:
+ extraValueFiles: []
+ pattern: common
+ secretStore:
+ backend: "vault"
+ targetRevision: main
+ options:
+ useCSV: True
+ syncPolicy: Automatic
+ installPlanApproval: Automatic
+ applicationRetryLimit: 20
+
+
+enabled: "all"
+
+# Note that sometimes changing helm values might require a hard refresh (https://github.com/helm/helm/issues/3486)
+clusterGroup:
+ name: example
+ isHubCluster: true
+ targetCluster: in-cluster
+ sharedValueFiles: []
+
+ argoCD:
+ initContainers: []
+ configManagementPlugins: []
+
+ imperative:
+ jobs: []
+ # This image contains ansible + kubernetes.core by default and is used to run the jobs
+ image: registry.redhat.io/ansible-automation-platform-24/ee-supported-rhel9:latest
+ namespace: "imperative"
+ # configmap name in the namespace that will contain all helm values
+ valuesConfigMap: "helm-values-configmap"
+ cronJobName: "imperative-cronjob"
+ jobName: "imperative-job"
+ imagePullPolicy: Always
+ # This is the maximum timeout of all the jobs (1h)
+ activeDeadlineSeconds: 3600
+ # By default we run this every 10minutes
+ schedule: "*/10 * * * *"
+ # Schedule used to trigger the vault unsealing (if explicitely enabled)
+ # Set to run every 5 minutes in order for load-secrets to succeed within
+ # a reasonable amount of time (it waits up to 15 mins)
+ insecureUnsealVaultInsideClusterSchedule: "*/5 * * * *"
+ # Increase ansible verbosity with '-v' or '-vv..'
+ verbosity: ""
+ serviceAccountCreate: true
+ # service account to be used to run the cron pods
+ serviceAccountName: imperative-sa
+ clusterRoleName: imperative-cluster-role
+ clusterRoleYaml: ""
+ roleName: imperative-role
+ roleYaml: ""
+ managedClusterGroups: {}
+ namespaces: []
+# - name: factory
+# # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git
+# # Location of values-global.yaml, values-{name}.yaml, values-{app}.yaml
+# targetRevision: main
+# path: applications/factory
+# helmOverrides:
+# - name: clusterGroup.isHubCluster
+# value: false
+# clusterSelector:
+# matchExpressions:
+# - key: vendor
+# operator: In
+# values:
+# - OpenShift
+#
+# - open-cluster-management
+#
+ subscriptions: {}
+# - name: advanced-cluster-management
+# namespace: open-cluster-management
+# source: redhat-operators
+# channel: release-2.3
+# csv: v2.3.2
+#
+ projects: []
+# - datacenter
+#
+ applications: {}
+# - name: acm
+# namespace: default
+# project: datacenter
+# path: applications/acm
+
+secretStore:
+ name: vault-backend
+ kind: ClusterSecretStore
+
+# Depends on the value of 'vault_hub' ansible variable used
+# during the installation
+#secretsBase:
+# key: secret/data/hub
+
diff --git a/common/common b/common/common
new file mode 120000
index 00000000..945c9b46
--- /dev/null
+++ b/common/common
@@ -0,0 +1 @@
+.
\ No newline at end of file
diff --git a/common/examples/blank/Chart.yaml b/common/examples/blank/Chart.yaml
new file mode 100644
index 00000000..c552610d
--- /dev/null
+++ b/common/examples/blank/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: An empty Helm chart
+keywords:
+- pattern
+name: blank
+version: 0.0.1
diff --git a/common/examples/blank/templates/manifest.yaml b/common/examples/blank/templates/manifest.yaml
new file mode 100644
index 00000000..3f160b02
--- /dev/null
+++ b/common/examples/blank/templates/manifest.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: example
diff --git a/common/examples/blank/values.yaml b/common/examples/blank/values.yaml
new file mode 100644
index 00000000..35e4a6f4
--- /dev/null
+++ b/common/examples/blank/values.yaml
@@ -0,0 +1,2 @@
+tree:
+ of: "values"
diff --git a/common/examples/industrial-edge-factory.yaml b/common/examples/industrial-edge-factory.yaml
new file mode 100644
index 00000000..c60d0960
--- /dev/null
+++ b/common/examples/industrial-edge-factory.yaml
@@ -0,0 +1,112 @@
+clusterGroup:
+ name: factory
+ isHubCluster: false
+
+ namespaces:
+ - manuela-stormshift-line-dashboard
+ - manuela-stormshift-machine-sensor
+ - manuela-stormshift-messaging
+ - manuela-factory-ml-workspace
+
+ operatorgroupExcludes:
+ - manuela-factory-ml-workspace
+
+ subscriptions:
+ - name: opendatahub-operator
+ channel: stable
+ source: community-operators
+
+ - name: seldon-operator
+ namespace: manuela-stormshift-messaging
+ channel: stable
+ source: community-operators
+
+ - name: amq-streams
+ namespace: manuela-stormshift-messaging
+ channel: stable
+
+ - name: amq-broker-rhel8
+ namespace: manuela-stormshift-messaging
+ channel: 7.x
+
+ - name: red-hat-camel-k
+ namespace: manuela-stormshift-messaging
+ channel: stable
+
+ projects:
+ - factory
+
+ argoCD:
+ configManagementPlugins:
+ - name: helm-with-kustomize
+ image: quay.io/hybridcloudpatterns/utility-container:latest
+ pluginArgs:
+ - '--loglevel=debug'
+ pluginConfig: |
+ apiVersion: argoproj.io/v1alpha1
+ kind: ConfigManagementPlugin
+ metadata:
+ name: helm-with-kustomize
+ spec:
+ preserveFileMode: true
+ init:
+ command: ["/bin/sh", "-c"]
+ args: ["helm dependency build"]
+ generate:
+ command: ["/bin/bash", "-c"]
+ args: ["helm template . --name-template ${ARGOCD_APP_NAME:0:52}
+ -f $(git rev-parse --show-toplevel)/values-global.yaml
+ -f $(git rev-parse --show-toplevel)/values-{{ .Values.clusterGroup.name }}.yaml
+ --set global.repoURL=$ARGOCD_APP_SOURCE_REPO_URL
+ --set global.targetRevision=$ARGOCD_APP_SOURCE_TARGET_REVISION
+ --set global.namespace=$ARGOCD_APP_NAMESPACE
+ --set global.pattern={{ .Values.global.pattern }}
+ --set global.clusterDomain={{ .Values.global.clusterDomain }}
+ --set global.hubClusterDomain={{ .Values.global.hubClusterDomain }}
+ --set global.localClusterDomain={{ coalesce .Values.global.localClusterDomain .Values.global.hubClusterDomain }}
+ --set clusterGroup.name={{ .Values.clusterGroup.name }}
+ --post-renderer ./kustomize"]
+
+ applications:
+ - name: stormshift
+ project: factory
+ path: charts/factory/manuela-stormshift
+ plugin:
+ name: helm-with-kustomize
+
+ - name: odh
+ namespace: manuela-factory-ml-workspace
+ project: factory
+ path: charts/datacenter/opendatahub
+
+#
+# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate
+#
+# - name: pipelines
+# namespace: production
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: stable
+# overrides:
+# - name: myparam
+# value: myparam
+#
+# - name: pipelines
+# namespace: staging
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: main
+#
+# Additional applications
+# Be sure to include additional resources your apps will require
+# +X machines
+# +Y RAM
+# +Z CPU
+# - name: vendor-app
+# namespace: default
+# project: vendor
+# path: path/to/myapp
+# repoURL: https://github.com/vendor/applications.git
+# targetRevision: main
diff --git a/common/examples/industrial-edge-hub.yaml b/common/examples/industrial-edge-hub.yaml
new file mode 100644
index 00000000..e48c4013
--- /dev/null
+++ b/common/examples/industrial-edge-hub.yaml
@@ -0,0 +1,241 @@
+clusterGroup:
+ name: datacenter
+ isHubCluster: true
+
+ namespaces:
+ - golang-external-secrets
+ - external-secrets
+ - open-cluster-management
+ - manuela-ml-workspace
+ - manuela-tst-all
+ - manuela-ci
+ - manuela-data-lake
+ - staging
+ - vault
+
+ operatorgroupExcludes:
+ - manuela-ml-workspace
+
+ subscriptions:
+ acm:
+ name: advanced-cluster-management
+ namespace: open-cluster-management
+ channel: release-2.6
+
+ amqbroker-prod:
+ name: amq-broker-rhel8
+ namespace: manuela-tst-all
+ channel: 7.x
+
+ amqstreams-prod-dev:
+ name: amq-streams
+ namespaces:
+ - manuela-data-lake
+ - manuela-tst-all
+ channel: stable
+
+ camelk-prod-dev:
+ name: red-hat-camel-k
+ namespaces:
+ - manuela-data-lake
+ - manuela-tst-all
+ channel: stable
+
+ seldon-prod-dev:
+ name: seldon-operator
+ namespaces:
+ - manuela-ml-workspace
+ - manuela-tst-all
+ channel: stable
+ source: community-operators
+
+ pipelines:
+ name: openshift-pipelines-operator-rh
+ channel: latest
+ source: redhat-operators
+
+ odh:
+ name: opendatahub-operator
+ channel: stable
+ source: community-operators
+
+ projects:
+ - datacenter
+ - production-datalake
+ - golang-external-secrets
+ - vault
+
+ argoCD:
+ configManagementPlugins:
+ - name: helm-with-kustomize
+ image: quay.io/hybridcloudpatterns/utility-container:latest
+ pluginArgs:
+ - '--loglevel=debug'
+ pluginConfig: |
+ apiVersion: argoproj.io/v1alpha1
+ kind: ConfigManagementPlugin
+ metadata:
+ name: helm-with-kustomize
+ spec:
+ preserveFileMode: true
+ init:
+ command: ["/bin/sh", "-c"]
+ args: ["helm dependency build"]
+ generate:
+ command: ["/bin/bash", "-c"]
+ args: ["helm template . --name-template ${ARGOCD_APP_NAME:0:52}
+ -f $(git rev-parse --show-toplevel)/values-global.yaml
+ -f $(git rev-parse --show-toplevel)/values-{{ .Values.clusterGroup.name }}.yaml
+ --set global.repoURL=$ARGOCD_APP_SOURCE_REPO_URL
+ --set global.targetRevision=$ARGOCD_APP_SOURCE_TARGET_REVISION
+ --set global.namespace=$ARGOCD_APP_NAMESPACE
+ --set global.pattern={{ .Values.global.pattern }}
+ --set global.clusterDomain={{ .Values.global.clusterDomain }}
+ --set global.hubClusterDomain={{ .Values.global.hubClusterDomain }}
+ --set global.localClusterDomain={{ coalesce .Values.global.localClusterDomain .Values.global.hubClusterDomain }}
+ --set clusterGroup.name={{ .Values.clusterGroup.name }}
+ --post-renderer ./kustomize"]
+
+ applications:
+ acm:
+ name: acm
+ namespace: open-cluster-management
+ project: datacenter
+ path: common/acm
+ ignoreDifferences:
+ - group: internal.open-cluster-management.io
+ kind: ManagedClusterInfo
+ jsonPointers:
+ - /spec/loggingCA
+
+ odh:
+ name: odh
+ namespace: manuela-ml-workspace
+ project: datacenter
+ path: charts/datacenter/opendatahub
+
+ pipelines:
+ name: pipelines
+ namespace: manuela-ci
+ project: datacenter
+ path: charts/datacenter/pipelines
+
+ production-data-lake:
+ name: production-data-lake
+ namespace: manuela-data-lake
+ project: production-datalake
+ path: charts/datacenter/manuela-data-lake
+ ignoreDifferences:
+ - group: apps
+ kind: Deployment
+ jsonPointers:
+ - /spec/replicas
+ - group: route.openshift.io
+ kind: Route
+ jsonPointers:
+ - /status
+ - group: image.openshift.io
+ kind: ImageStream
+ jsonPointers:
+ - /spec/tags
+ - group: apps.openshift.io
+ kind: DeploymentConfig
+ jsonPointers:
+ - /spec/template/spec/containers/0/image
+
+ test:
+ name: manuela-test
+ namespace: manuela-tst-all
+ project: datacenter
+ path: charts/datacenter/manuela-tst
+ plugin:
+ name: helm-with-kustomize
+
+ vault:
+ name: vault
+ namespace: vault
+ project: datacenter
+ chart: vault
+ repoURL: https://helm.releases.hashicorp.com
+ targetRevision: v0.20.1
+ overrides:
+ - name: global.openshift
+ value: "true"
+ - name: injector.enabled
+ value: "false"
+ - name: ui.enabled
+ value: "true"
+ - name: ui.serviceType
+ value: LoadBalancer
+ - name: server.route.enabled
+ value: "true"
+ - name: server.route.host
+ value: null
+ - name: server.route.tls.termination
+ value: edge
+ - name: server.image.repository
+ value: "registry.connect.redhat.com/hashicorp/vault"
+ - name: server.image.tag
+ value: "1.10.3-ubi"
+
+ secrets-operator:
+ name: golang-external-secrets
+ namespace: golang-external-secrets
+ project: golang-external-secrets
+ path: common/golang-external-secrets
+
+ secrets:
+ name: external-secrets
+ namespace: external-secrets
+ project: golang-external-secrets
+ path: charts/datacenter/external-secrets
+
+# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate
+#
+# - name: pipelines
+# namespace: production
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: stable
+# overrides:
+# - name: myparam
+# value: myparam
+#
+# - name: pipelines
+# namespace: staging
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: main
+#
+# Additional applications
+# Be sure to include additional resources your apps will require
+# +X machines
+# +Y RAM
+# +Z CPU
+# - name: vendor-app
+# namespace: default
+# project: vendor
+# path: path/to/myapp
+# repoURL: https://github.com/vendor/applications.git
+# targetRevision: main
+
+ managedClusterGroups:
+ factory:
+ name: factory
+ # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git
+ # targetRevision: main
+ helmOverrides:
+ # Values must be strings!
+ - name: clusterGroup.isHubCluster
+ value: "false"
+ clusterSelector:
+ matchLabels:
+ clusterGroup: factory
+ matchExpressions:
+ - key: vendor
+ operator: In
+ values:
+ - OpenShift
+
diff --git a/common/examples/kustomize-renderer/Chart.yaml b/common/examples/kustomize-renderer/Chart.yaml
new file mode 100644
index 00000000..88a786c9
--- /dev/null
+++ b/common/examples/kustomize-renderer/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+description: A Helm chart to demonstrate how to use with kustomize
+keywords:
+- pattern
+name: example
+version: 0.0.1
diff --git a/common/examples/kustomize-renderer/environment.yaml b/common/examples/kustomize-renderer/environment.yaml
new file mode 100644
index 00000000..de4c48a9
--- /dev/null
+++ b/common/examples/kustomize-renderer/environment.yaml
@@ -0,0 +1,34 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: environment
+data:
+ IMAGE_PROVIDER: {{ .Values.global.imageregistry.hostname }}
+ IMAGE_ACCOUNT: {{ .Values.global.imageregistry.account }}
+ GIT_EMAIL: {{ .Values.global.git.email }}
+ GIT_DEV_REPO_URL: https://{{ .Values.global.git.hostname }}/{{ .Values.global.git.account }}/manuela-dev.git
+ GIT_DEV_REPO_REVISION: {{ .Values.global.git.dev_revision }}
+ GIT_OPS_REPO_TEST_URL: {{ .Values.global.repoURL }}
+ GIT_OPS_REPO_TEST_REVISION: {{ .Values.global.targetRevision }}
+ GIT_OPS_REPO_PROD_URL: {{ .Values.global.repoURL }}
+ GIT_OPS_REPO_PROD_REVISION: {{ .Values.global.targetRevision }}
+ IOT_CONSUMER_IMAGE: iot-consumer
+ IOT_CONSUMER_YAML_PATH: images.(name==messaging).newTag
+ IOT_CONSUMER_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_CONSUMER_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/messaging/kustomization.yaml
+ IOT_CONSUMER_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/messaging/messaging-is.yaml
+ IOT_FRONTEND_IMAGE: iot-frontend
+ IOT_FRONTEND_YAML_PATH: images.(name==line-dashboard).newTag
+ IOT_FRONTEND_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_FRONTEND_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/line-dashboard/kustomization.yaml
+ IOT_FRONTEND_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/line-dashboard/line-dashboard-is.yaml
+ IOT_SWSENSOR_IMAGE: iot-software-sensor
+ IOT_SWSENSOR_YAML_PATH: images.(name==machine-sensor).newTag
+ IOT_SWSENSOR_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_SWSENSOR_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/machine-sensor/kustomization.yaml
+ IOT_SWSENSOR_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/machine-sensor/machine-sensor-is.yaml
+ IOT_ANOMALY_IMAGE: iot-anomaly-detection
+ IOT_ANOMALY_YAML_PATH: images.(name==anomaly-detection).newTag
+ IOT_ANOMALY_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_ANOMALY_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/anomaly-detection/kustomization.yaml
+ IOT_ANOMALY_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/anomaly-detection/anomaly-detection-is.yaml
diff --git a/common/examples/kustomize-renderer/kustomization.yaml b/common/examples/kustomize-renderer/kustomization.yaml
new file mode 100644
index 00000000..8d8bcd10
--- /dev/null
+++ b/common/examples/kustomize-renderer/kustomization.yaml
@@ -0,0 +1,5 @@
+resources:
+ - environment.yaml
+
+patches:
+- helm.patch.yaml
diff --git a/common/examples/kustomize-renderer/kustomize b/common/examples/kustomize-renderer/kustomize
new file mode 100755
index 00000000..3266d453
--- /dev/null
+++ b/common/examples/kustomize-renderer/kustomize
@@ -0,0 +1,14 @@
+#!/bin/bash -x
+
+BASE=`dirname $0`
+if [ $BASE = $PWD ]; then
+ BASE=./
+fi
+
+cat <&0 > "$BASE/helm.yaml"
+
+# Including at least one log to stderr allows us to see the full -x output
+echo $HOME $PWD 1>&2
+ls -al 1>&2
+
+kustomize build "$BASE" && rm "$BASE/helm.yaml"
diff --git a/common/examples/kustomize-renderer/templates/environment.yaml b/common/examples/kustomize-renderer/templates/environment.yaml
new file mode 100644
index 00000000..de4c48a9
--- /dev/null
+++ b/common/examples/kustomize-renderer/templates/environment.yaml
@@ -0,0 +1,34 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: environment
+data:
+ IMAGE_PROVIDER: {{ .Values.global.imageregistry.hostname }}
+ IMAGE_ACCOUNT: {{ .Values.global.imageregistry.account }}
+ GIT_EMAIL: {{ .Values.global.git.email }}
+ GIT_DEV_REPO_URL: https://{{ .Values.global.git.hostname }}/{{ .Values.global.git.account }}/manuela-dev.git
+ GIT_DEV_REPO_REVISION: {{ .Values.global.git.dev_revision }}
+ GIT_OPS_REPO_TEST_URL: {{ .Values.global.repoURL }}
+ GIT_OPS_REPO_TEST_REVISION: {{ .Values.global.targetRevision }}
+ GIT_OPS_REPO_PROD_URL: {{ .Values.global.repoURL }}
+ GIT_OPS_REPO_PROD_REVISION: {{ .Values.global.targetRevision }}
+ IOT_CONSUMER_IMAGE: iot-consumer
+ IOT_CONSUMER_YAML_PATH: images.(name==messaging).newTag
+ IOT_CONSUMER_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_CONSUMER_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/messaging/kustomization.yaml
+ IOT_CONSUMER_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/messaging/messaging-is.yaml
+ IOT_FRONTEND_IMAGE: iot-frontend
+ IOT_FRONTEND_YAML_PATH: images.(name==line-dashboard).newTag
+ IOT_FRONTEND_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_FRONTEND_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/line-dashboard/kustomization.yaml
+ IOT_FRONTEND_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/line-dashboard/line-dashboard-is.yaml
+ IOT_SWSENSOR_IMAGE: iot-software-sensor
+ IOT_SWSENSOR_YAML_PATH: images.(name==machine-sensor).newTag
+ IOT_SWSENSOR_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_SWSENSOR_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/machine-sensor/kustomization.yaml
+ IOT_SWSENSOR_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/machine-sensor/machine-sensor-is.yaml
+ IOT_ANOMALY_IMAGE: iot-anomaly-detection
+ IOT_ANOMALY_YAML_PATH: images.(name==anomaly-detection).newTag
+ IOT_ANOMALY_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml
+ IOT_ANOMALY_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/anomaly-detection/kustomization.yaml
+ IOT_ANOMALY_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/anomaly-detection/anomaly-detection-is.yaml
diff --git a/common/examples/kustomize-renderer/values.yaml b/common/examples/kustomize-renderer/values.yaml
new file mode 100644
index 00000000..cb80a03a
--- /dev/null
+++ b/common/examples/kustomize-renderer/values.yaml
@@ -0,0 +1,12 @@
+global:
+ git:
+ provider: github.com
+ account: PLAINTEXT
+ username: PLAINTEXT
+ email: SOMEWHERE@EXAMPLE.COM
+ dev_revision: main
+
+ imageregistry:
+ provider: quay.io
+ account: PLAINTEXT
+
diff --git a/common/examples/medical-diagnosis-hub.yaml b/common/examples/medical-diagnosis-hub.yaml
new file mode 100644
index 00000000..8bde30d0
--- /dev/null
+++ b/common/examples/medical-diagnosis-hub.yaml
@@ -0,0 +1,228 @@
+clusterGroup:
+ name: hub
+ isHubCluster: true
+
+ namespaces:
+ - open-cluster-management
+ - openshift-serverless
+ - opendatahub
+ - openshift-storage
+ - xraylab-1
+ - knative-serving
+ - staging
+ - vault
+ - golang-external-secrets
+
+ subscriptions:
+ amq-streams:
+ name: amq-streams
+ namespace: xraylab-1
+ channel: stable
+
+ grafana:
+ name: grafana-operator
+ namespace: xraylab-1
+ channel: v4
+ source: community-operators
+
+ odf:
+ name: odf-operator
+ namespace: openshift-storage
+ channel: stable-4.11
+
+ severless:
+ name: serverless-operator
+ channel: stable
+
+ opendatahub:
+ name: opendatahub-operator
+ source: community-operators
+
+ projects:
+ - hub
+ - medical-diagnosis
+
+ applications:
+ vault:
+ name: vault
+ namespace: vault
+ project: hub
+ chart: vault
+ repoURL: https://helm.releases.hashicorp.com
+ targetRevision: v0.20.1
+ overrides:
+ - name: global.openshift
+ value: "true"
+ - name: injector.enabled
+ value: "false"
+ - name: ui.enabled
+ value: "true"
+ - name: ui.serviceType
+ value: LoadBalancer
+ - name: server.route.enabled
+ value: "true"
+ - name: server.route.host
+ value: null
+ - name: server.route.tls.termination
+ value: edge
+ - name: server.image.repository
+ value: "registry.connect.redhat.com/hashicorp/vault"
+ - name: server.image.tag
+ value: "1.10.3-ubi"
+
+ golang-external-secrets:
+ name: golang-external-secrets
+ namespace: golang-external-secrets
+ project: hub
+ path: common/golang-external-secrets
+
+ opendatahub:
+ name: odh
+ namespace: opendatahub
+ project: medical-diagnosis
+ path: charts/all/opendatahub
+
+ openshift-data-foundations:
+ name: odf
+ namespace: openshift-storage
+ project: medical-diagnosis
+ path: charts/all/openshift-data-foundations
+
+ openshift-serverless:
+ name: serverless
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/openshift-serverless
+
+ kafka:
+ name: kafka
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/kafka
+
+ kafdrop:
+ name: kafdrop
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/kafdrop
+
+ service-account:
+ name: xraylab-service-account
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/service-account
+
+ xraylab-init:
+ name: xraylab-init
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/xray-init
+
+ xraylab-database:
+ name: xraylab-database
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/database
+
+ xraylab-grafana-dashboards:
+ name: xraylab-grafana-dashboards
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/grafana
+
+ xraylab-image-server:
+ name: xraylab-image-server
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/image-server
+ ignoreDifferences:
+ - group: apps.openshift.io
+ kind: DeploymentConfig
+ jqPathExpressions:
+ - '.spec.template.spec.containers[].image'
+
+ xraylab-image-generator:
+ name: xraylab-image-generator
+ namespace: xraylab-1
+ project: medical-diagnosis
+ path: charts/all/medical-diagnosis/image-generator
+ ignoreDifferences:
+ - group: apps.openshift.io
+ kind: DeploymentConfig
+ jqPathExpressions:
+ - '.spec.template.spec.containers[].image'
+
+ imperative:
+ # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm
+ # The default schedule is every 10 minutes: imperative.schedule
+ # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds
+ # imagePullPolicy is set to always: imperative.imagePullPolicy
+ # For additional overrides that apply to the jobs, please refer to
+ # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations
+ jobs:
+ - name: regional-ca
+ # ansible playbook to be run
+ playbook: ansible/playbooks/on-hub-get-regional-ca.yml
+ # per playbook timeout in seconds
+ timeout: 234
+ # verbosity: "-v"
+
+ managedClusterGroups:
+ region-one:
+ name: region-one
+ helmOverrides:
+ - name: clusterGroup.isHubCluster
+ value: false
+ clusterSelector:
+ matchLabels:
+ clusterGroup: region-one
+
+# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate
+#
+# pipelines:
+# name: pipelines
+# namespace: production
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: stable
+# overrides:
+# - name: myparam
+# value: myparam
+#
+# pipelines_staging:
+# - name: pipelines
+# namespace: staging
+# project: datacenter
+# path: applications/pipeline
+# repoURL: https://github.com/you/applications.git
+# targetRevision: main
+#
+# Additional applications
+# Be sure to include additional resources your apps will require
+# +X machines
+# +Y RAM
+# +Z CPU
+# vendor-app:
+# name: vendor-app
+# namespace: default
+# project: vendor
+# path: path/to/myapp
+# repoURL: https://github.com/vendor/applications.git
+# targetRevision: main
+
+# managedSites:
+# factory:
+# name: factory
+# # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git
+# targetRevision: main
+# path: applications/factory
+# helmOverrides:
+# - name: site.isHubCluster
+# value: false
+# clusterSelector:
+# matchExpressions:
+# - key: vendor
+# operator: In
+# values:
+# - OpenShift
diff --git a/common/examples/secrets/values-secret.v1.yaml b/common/examples/secrets/values-secret.v1.yaml
new file mode 100644
index 00000000..c04e8262
--- /dev/null
+++ b/common/examples/secrets/values-secret.v1.yaml
@@ -0,0 +1,33 @@
+---
+# By default when a top-level 'version: "1.0"' is missing it is assumed to be '1.0'
+# NEVER COMMIT THESE VALUES TO GIT
+
+secrets:
+ # These secrets will be pushed in the vault at secret/hub/test The vault will
+ # have secret/hub/test with secret1 and secret2 as keys with their associated
+ # values (secrets)
+ test:
+ secret1: foo
+ secret2: bar
+
+ # This ends up as the s3Secret attribute to the path secret/hub/aws
+ aws:
+ s3Secret: test-secret
+
+# This will create the vault key secret/hub/testfoo which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files:
+ testfoo: ~/ca.crt
+# These secrets will be pushed in the vault at secret/region1/test The vault will
+# have secret/region1/test with secret1 and secret2 as keys with their associated
+# values (secrets)
+secrets.region1:
+ test:
+ secret1: foo1
+ secret2: bar1
+# This will create the vault key secret/region2/testbar which will have two
+# properties 'b64content' and 'content' which will be the base64-encoded
+# content and the normal content respectively
+files.region2:
+ testbar: ~/ca.crt
diff --git a/common/examples/secrets/values-secret.v2.yaml b/common/examples/secrets/values-secret.v2.yaml
new file mode 100644
index 00000000..eab81a38
--- /dev/null
+++ b/common/examples/secrets/values-secret.v2.yaml
@@ -0,0 +1,114 @@
+# NEVER COMMIT THESE VALUES TO GIT (unless your file only uses generated
+# passwords or only points to files)
+
+# NOTE: If you edit this file, make sure to also reflect the changes in the corresponding
+# schema file
+
+# Needed to specify the new format (missing version means old version: 1.0 by default)
+version: "2.0"
+
+backingStore: vault # 'vault' is the default when omitted
+
+# These are the vault policies to be created in the vault
+# these are used when we let the vault generate the passwords
+# by setting the 'onMissingValue' attribute to 'generate'
+# See https://developer.hashicorp.com/vault/docs/concepts/password-policies
+vaultPolicies:
+ basicPolicy: |
+ length=10
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+
+ advancedPolicy: |
+ length=20
+ rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 }
+ rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 }
+ rule "charset" { charset = "0123456789" min-chars = 1 }
+ rule "charset" { charset = "!@#$%^&*" min-chars = 1 }
+
+# This is the mandatory top-level secrets entry
+secrets:
+ - name: aws
+ fields:
+ - name: aws_access_key_id
+ ini_file: ~/.aws/credentials
+ ini_key: aws_access_key_id
+ # You can actually omit this as it is the default
+ # it is here, because I believe the json schema validator has a bug
+ # (it ignores the default value of onMissingValue in the aallOf if checks)
+ onMissingValue: error
+ # ini_section: default
+ - name: aws_secret_access_key
+ onMissingValue: error
+ ini_file: ~/.aws/credentials
+ ini_key: aws_secret_access_key
+ # ini_section: default
+
+ - name: config-demo
+ vaultMount: secret
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: secret
+ onMissingValue: generate
+ override: true
+ vaultPolicy: basicPolicy
+ - name: secretprompt
+ value: null
+ onMissingValue: prompt
+ prompt: "Please specify the password for application ABC"
+ - name: secretprompt2
+ value: defaultvalue
+ onMissingValue: prompt
+ prompt: "Please specify the API key for XYZ"
+ - name: secretfile
+ path: /tmp/ca.crt
+ onMissingValue: prompt
+ prompt: "Insert path to Certificate Authority"
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error
+ - name: ca_crt_b64
+ path: /tmp/ca.crt
+ base64: true # defaults to false
+ onMissingValue: prompt
+
+ - name: config-demo2
+ vaultPrefixes:
+ - region-one
+ - snowflake.blueprints.rhecoeng.com
+ fields:
+ - name: ca_crt2
+ path: null
+ onMissingValue: prompt
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error
+
+ # This will be uploaded to the 'hub' vaultPrefix as it is the default when
+ # omitted
+ - name: config-demo3
+ fields:
+ - name: ca_crt2
+ path: null
+ onMissingValue: prompt
+ - name: ca_crt
+ path: /tmp/ca.crt
+ onMissingValue: error
+ #
+ # The cluster_xxxx pattern is used for creating externalSecrets that
+ # will be used by ArgoCD to push manifests to other clusters.
+ #
+ # oc extract -n openshift-config cm/kube-root-ca.crt --to=/home/user/ --keys=ca.crt --confirm
+ - name: cluster_foocluster
+ fields:
+ - name: bearerToken
+ value:
+ onMissingValue: error
+ - name: caCert
+ # See command above
+ path: /home/user/ca.crt
+ onMissingValue: error
+ base64: true
diff --git a/common/examples/values-example.yaml b/common/examples/values-example.yaml
new file mode 100644
index 00000000..6c006b00
--- /dev/null
+++ b/common/examples/values-example.yaml
@@ -0,0 +1,159 @@
+global:
+ options:
+ useCSV: False
+ syncPolicy: Automatic
+ installPlanApproval: Automatic
+ multiClusterTarget: all
+
+#enabled: all
+
+clusterGroup:
+ name: example
+ #insecureUnsealVaultInsideCluster: false
+ isHubCluster: true
+ sharedValueFiles:
+ - /values/{{ .Values.global.clusterPlatform }}.yaml
+ - /values/{{ .Values.global.clusterVersion }}.yaml
+
+ namespaces:
+ - open-cluster-management:
+ labels:
+ openshift.io/node-selector: ""
+ kubernetes.io/os: linux
+ annotations:
+ openshift.io/cluster-monitoring: "true"
+ owner: "namespace owner"
+ - application-ci:
+ operatorGroup: true
+ targetNamespaces:
+ - application-ci
+ - other-namespace
+ - exclude-targetns:
+ operatorGroup: true
+ targetNamespaces:
+ - include-ci
+ - exclude-og
+ - totally-exclude-og:
+ operatorGroup: false
+ - include-default-og:
+ operatorGroup: true
+
+ operatorgroupExcludes:
+ - exclude-og
+
+ subscriptions:
+ acm:
+ name: advanced-cluster-management
+ namespace: open-cluster-management
+ channel: release-2.4
+ csv: advanced-cluster-management.v2.4.1
+
+ odh:
+ name: opendatahub-operator
+ source: community-operators
+ csv: opendatahub-operator.v1.1.0
+ disabled: true
+
+ pipelines:
+ name: openshift-pipelines-operator-rh
+ csv: redhat-openshift-pipelines.v1.5.2
+
+ projects:
+ - datacenter
+
+ applications:
+ acm:
+ name: acm
+ namespace: open-cluster-management
+ project: datacenter
+ path: common/acm
+ ignoreDifferences:
+ - group: internal.open-cluster-management.io
+ kind: ManagedClusterInfo
+ jsonPointers:
+ - /spec/loggingCA
+ pipe:
+ name: pipelines
+ namespace: application-ci
+ project: datacenter
+ path: charts/datacenter/pipelines
+ extraValueFiles:
+ - /values/{{ .Values.global.clusterVersion }}/{{ .Values.global.clusterPlatform }}.yaml
+
+ imperative:
+ namespace: imperative
+ # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm
+ # The default schedule is every 10 minutes: imperative.schedule
+ # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds
+ # imagePullPolicy is set to always: imperative.imagePullPolicy
+ # For additional overrides that apply to the jobs, please refer to
+ # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations
+ jobs:
+ - name: regional-ca
+ # ansible playbook to be run
+ playbook: ansible/playbooks/on-hub-get-regional-ca.yml
+ # per playbook timeout in seconds
+ timeout: 234
+ # verbosity: "-v"
+
+ managedClusterGroups:
+ - name: acm-edge
+ # Optional - Point to a different repo
+ # repoURL: https://github.com/hybrid-cloud-patterns/mySite.git
+ # Must contain values-{clustergroupname}.yaml at the top level
+ targetRevision: main
+ helmOverrides:
+ # Values must be strings!
+ - name: clusterGroup.isHubCluster
+ value: "false"
+ acmlabels:
+ - name: clusterGroup
+ value: acm-region
+ - name: acm-provision-edge
+ targetRevision: main
+ helmOverrides:
+ - name: clusterGroup.isHubCluster
+ value: "false"
+ clusterPools:
+ exampleAWSPool:
+ size: 3
+ name: aws-ap
+ openshiftVersion: 4.10.18
+ baseDomain: blueprints.rhecoeng.com
+ controlPlane:
+ count: 1
+ platform:
+ aws:
+ type: m5.xlarge
+ workers:
+ count: 0
+ platform:
+ aws:
+ region: ap-southeast-2
+ exampleAzurePool:
+ name: azure-us
+ openshiftVersion: 4.10.18
+ baseDomain: blueprints.rhecoeng.com
+ platform:
+ azure:
+ baseDomainResourceGroupName: dojo-dns-zones
+ region: eastus
+ clusters:
+ - Two
+ - three
+ acmlabels:
+ - name: clusterGroup
+ value: region
+ - name: argo-edge
+ hostedArgoSites:
+ - name: perth
+ domain: perth1.beekhof.net
+ # The default is secret/data/hub/cluster_
+ #secretsPath: secret/data/hub/cluster_perth
+ - name: sydney
+ domain: syd.beekhof.net
+ # The default is secret/data/hub/cluster_
+ #secretsPath: secret/data/hub/cluster_sydney
+ helmOverrides:
+ - name: clusterGroup.isHubCluster
+ value: "false"
diff --git a/common/golang-external-secrets/.github/workflows/update-helm-repo.yml b/common/golang-external-secrets/.github/workflows/update-helm-repo.yml
new file mode 100644
index 00000000..c12af2b5
--- /dev/null
+++ b/common/golang-external-secrets/.github/workflows/update-helm-repo.yml
@@ -0,0 +1,29 @@
+# This invokes the workflow named 'publish-charts' in the umbrella repo
+# It expects to have a secret called CHARTS_REPOS_TOKEN which contains
+# the GitHub token that has permissions to invoke workflows and commit code
+# inside the umbrella-repo.
+# The following fine-grained permissions were used in testing and were limited
+# to the umbrella repo only:
+# - Actions: r/w
+# - Commit statuses: r/w
+# - Contents: r/w
+# - Deployments: r/w
+# - Pages: r/w
+
+name: vp-patterns/update-helm-repo
+on:
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+
+jobs:
+ helmlint:
+ uses: validatedpatterns/helm-charts/.github/workflows/helmlint.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions:
+ contents: read
+
+ update-helm-repo:
+ needs: [helmlint]
+ uses: validatedpatterns/helm-charts/.github/workflows/update-helm-repo.yml@985ba37e0eb50b1b35ec194fc999eae2d0ae1486
+ permissions: read-all
+ secrets: inherit
diff --git a/common/golang-external-secrets/Chart.yaml b/common/golang-external-secrets/Chart.yaml
new file mode 100644
index 00000000..38549d5c
--- /dev/null
+++ b/common/golang-external-secrets/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+description: A Helm chart to configure the golang-based external-secrets.
+keywords:
+- pattern
+name: golang-external-secrets
+version: 0.0.3
+dependencies:
+ - name: external-secrets
+ version: "0.9.12"
+ repository: "https://charts.external-secrets.io"
+ #"https://external-secrets.github.io/kubernetes-external-secrets"
diff --git a/common/golang-external-secrets/README.md b/common/golang-external-secrets/README.md
new file mode 100644
index 00000000..e12d58f1
--- /dev/null
+++ b/common/golang-external-secrets/README.md
@@ -0,0 +1,14 @@
+# Subchart Update
+
+When updating this sub-chart, please remember to tweak the image tag in values.yaml.
+That is because we want to use -ubi images if possible and there is no suffix option, so
+we just override the tag with the version + "-ubi"
+
+## Steps
+
+1. Edit the version in Chart.yaml
+2. Run `helm dependency update .`
+3. Run `./update-helm-dependency.sh`
+4. Tweak `values.yaml` with the new image versions
+5. Run `make test`
+6. Commit to git
diff --git a/common/golang-external-secrets/charts/external-secrets-0.9.12.tgz b/common/golang-external-secrets/charts/external-secrets-0.9.12.tgz
new file mode 100644
index 0000000000000000000000000000000000000000..368cabd9034a0f823297e5036e4191438ebf7800
GIT binary patch
literal 93006
zcmV)IK)k;niwFP!000001ML0lavMjsCsX}hDlk?){Q|@
z(&^Z-!%6@}vdlsic2$8C?e=5M3(S*!YpuLg);$v_fRvpX=j@h=th}tuT=$FrCG(Sr
zg~QaJC4QEUzv;g{hW5kv-{b%8|M2}|{eNZqX8+mu&-b4_d%pi1`t3j4fByIz@B1yR
zrP5Yu#uCr_W)aLT{m^gh*Y*2X+WhkQA6&8|8{M)+*xMXfBOCR
zPk;CU&j0!T;~&299{09_mi8Avf3^%h`biqZ(F^bT$pf}r=KuM4^nZ=^pFEiR=`0DB
z8U694xGx^RFW!Ph?80C8QRbb;iT8Rng`fS*Paj;y3;%_8nPtoL#dv%XWS6VyXcjNV
zl`B{N!-HUkQ^`*gb~ECiuTnpOpE8)uY^3pm1<{zLDgBtn!!W*p|HG0Cf3%D)9$fji
zH*qpgUpyFkS8D0Q%7TWT{1^685PJCk{OIkE_W!TZDgBimUcyKKz7pJq~`Bxy8
z`HN-901Ew40(<=beE<9B4*&0>wVeOE_h&CpN7c2E|MyRxmGJ)$a2Fi@-$g5*
zzjw(}Z|eJzH}{ueeCyA>bTym#X?ng2!`uJk!Goj71C;3}UKT^giy-n=X%Jl~%5cgD
z+VgJXm3I?_p%?iu3iJjn?5&m_^LUrjEKd9fKr{|QI0Qe@{^})wtXIj*_Z}ft@xL&*
zl>VMKz4f3EAVQdrc@MAID$E}0!+ke;@Zc>D5Jd3df>5Rh|1Z1ry?Jnc?k7m85UZwM
ze6D|)dMuiIm+=j*6o|}uaIu1M;KRj(8H-@6K;Tx;1vU&z^Ij1}dg1;4yEo!I(?5XV
zd4K%zU{jR5)7J9-e}VhH1Zp$NmYeZ{?eBko|MAo3#rOZ@@%N9vckh1}?f2iu|L233
zUzT|J@sS`Of-eLAJ)>*=;Qt)MrT+c*Az*}a;BLG@`Ob{+qk%WfyWzW(&*B}9IQq!K
zmCt*=2Nos?=DzoDFH2U@%zOTn{uwMzSLf%!7jG~ecw#IcmW5A>mU)0r--m}UXCRd1
z{m+Vp!Fd2U2KXe}!05pzpN~!bGn@f8gP3i`fQ$4}e5*OL%C`+;gmb_H@a)525o8=|
zWtTzfJp#;wAUS+@ihU9Pq)UGW=;r7gkX7ijRKO1TQ(yv94n6+2AmfM-a0pBe>x4;Y
z@nW^z5XR0$CuqXW@^Q
zi(4kZb)S2Kzoo;!rGwH~d{Xm!R{zh!?bD7G-;7|#@kr@t#XaG@LN@h7ljEiC`Pr8D
zq=TFPTZkaAp9IJ>1423Y{|&ssF9Z8W6)zSr5G^$|SSs(x*9r)6F-kATw0t6kK{N|j
z^Oh}+_##?5rf`5WpvZ5;L?i7siQP|>l#0K>s~_dU+bD>~N=C|wv-Ziq0b|6OUxG$;
z6fWA%htpd=ki1!^esUem)b}^Q2&h*uBWUuai=Afizt|$ow^4h)ngKq%@u!z@e6@AJ
ztBKqJ)WT)B@ZmYliZ5V&fYxl6kN?6Ohz|!l+1|`gvLT}BBo0GA+5SB%Pw4JjD}3zl
zoe_ZB?q6wu9`7L#oKvwHbCOeD|FCxXT!YqnML7r6uzg@sG8WDJ;@RtbingOpY`ujw
z%dxk%_O5%=#~w;^Y_^x2D=UjGsd%z%HUptOLp~nZGo!mkA5Qpp@0wWdVz<$Zuc9OX
zJ!*zX&d-m&YZ^4tx2$oza`=Jx$G~WScCNNB_-Xgw2#)pL8{pYE+n{Sadr+DTv?GvK
zpQRCK@w0=buZ$sFCIyDW+XLKUyau=*0tQJI2tx#SumD;vfV?Mg6N>EsA+E0k2|Q%i
z@hUM9&}g(GnCVX=F@_|{x#adj3U9n0h-q;e05ZMx9*I9?>;hO5B~ugslyfcLm1n3x
zvpCr%Gc^MCuC-IHZM(JTf6II*Hwd7q*U~+pOaHt7xMcr%^6cqTXaCM%UoK>hJc701yQ!?@+HVysxcbvPlyAR&0iw?VaUe8M;$H`NQ2z*0q#18e
z@!dW`wLc$8iVJ-C3zp3;-)KO#1#?wu&4KWo&nR(~m{Ge=a=Ny|SjO|pDPXIdM9XRX
zJ^xHQ5&nm;u=zf2nKqlz_Hy~$^UOC=*^0biCbJU5aSTsYn1S0f1IXklo^6VI%sk3qNcG!OJ+!-uc;0
zoLseSNb8n=D~vki=p>gg6ga$!mrXBh+O+M~t+#A{|4H@g7hs^SWe4dI-2bC_Fay!0
z#6AydG>!xDMcX6!PS|H^mV>Y4I0i(0+jcaZw_mEh#erCIk?K$y(#g~w%+MPSXUTk+
z`2Vx=SN;&B3b@8B{IQJ+P}d{Tvp6-5CmzBOFU5~GbX_XBnujA|{$o9fuUQAq(%&-F
zXjZEbs?{Nz&yyctG=o*CdV#q55#WL(s}6SQvoO2-jStp#L<|I8KTVI5cxePf=8_J;~7>~j~9*L_oF-lc>>jE7dL(@x{%{e$A|LO4=cDJ
zVbO)}{jV$kcF+4?*SH-tayNhmCP~-?9#s(_f~=p*0V?|ih~W(I1D;h~FR?@+*EfA#
z{h*A=YX68wm=>|+Fvs7=anj1{b>2ssB8jtj7KbmqvxDQ(Z^Fry52O87ylBPsq2KQ-
zzl7rXPTCpMj=M{PuK9}>
zu*JHK*Q_m4XXc$WVWvkO>G5p`M
z;HLd?%abqTFuu4wT_PTVA5u8gpm32JyKKC$<~zCsQel~
zFNM$qudgy*l02I$jgPC7^s7A{F6Yzxph1?JwhlyJ1M@`?O|DrGB0%}_^uM&Cd!x|`
zv}ldxu`eGYR$5@;Z`Mn!8L_a)U8|$NPJ(EG|C1@3trZyDf&ZUA`Jo*D@pS+3zQg~!
zXq)1Hx)jzX!eI&F(|6z8*%WXtG{#4PDulVa_Izk{N62xTfpSWD&&IU(1VS>
zO5)X0Ofd8Y@E068!p)~*rvv)2B#cAf8EiXV$iI_=N*Tj05>^)Y+*2
zxIy%%{%f@a==thDm-5HjwaynoiVIErivXnFb{t+n{7l~${m4o7RSjvh)m9(*rv{Pl|WFxmf>)?0jBr~
z{s6b|t3VjA+!OyC$CR*d*)8;{qQyS;maeA10-qyLSNC{!hE+QTTkf%FgZ)Ywt$W%m
z_&;&7_AFqR{Qu*N6|mt-?_B9ChidnYD}b5o
z$VKnYb}~|i-`wSLG5QH17QZUM~jPPgg-W=c~EaGQI9U8$DL6Rq@fMBls2_tKy{I
z*H#Lq7hGxC(OYm_`~qNtu8TMcawocBX?9HyI5{|(&<|(o2h?r9uKDo|{GH3R;VNNa
zWlQv1dI^HGo8WQo5wd1PuGUoBUK1=
z=wJJxi1;4iMIBN9;&4Y143>(aJQ$kCfzKOu#6iwCG~gf2Yr`!QSWl7#W#3#4Ef@aYep%?kz#S7YDtwPA0ZiOk
zO!i6lkLN4^Kma7+@ZbzYCQU@DUQ8x}QDtxx*Gi3qvF8GWm4UZsh-ZVp}=TAlPC2!mN379uvhjZ6&4
zH=Cac{t3Z&^78a7&zMT*#*ZlPS_2_@S_oMH6ee`VFOe|OsM1Wz^Vx*~(%W=3T?83k
zi~j+rXLyj+fe(m&6FMQ2-n`lc9^3(-#qa%P&vcC@TK#WhX99mlZfCIbmlO8f@
zdfY{1ehMR~H!9N73d+h8TyXqZgV3@0ql=OJ3hjrueVz;xuGGyX@UmW#lLW;3)niEg
z`uy!iSnS*Q8g&T%O=;LBl!gr#TK#MLH`qkpt|LJ!>N}O-|F-xILLe1XQ~czq-^wr~
z8b*DqKr>r0Hc8r!DtMuB(MRXr0tjue3~N@2h$4miAIY?(zPF6$JPk9FO0H+8zsHf!
zvo{mLZ<)REL6p1HJOSB+F6kJ?m<3DV#Y~RpwDggFJ~AC!{v+^~1O*nRTRJSxzY6<&
zQ|o%*dOOfPVmRN`ymG<6q)3Hlt;6b&py)WtSIvl&{Ky0In!sQR3Yvz#rk?@{t{SOt*%AQM9nF%ObD
zGF@#32qAAI`PhQ^tdusZ1TR=HK83^wh9#j#vz29YgTR)qM=t4$J+^GlJGzMYGd|~k
zM>Y&D0diO~ZUP>1320RMZ+YxxCa1m$iV;xA%AHh0HjXOzC0G;N9B4y0jN>ql
z=P-`LI2&}6x3~GQhB?2uF4v3+SHhUt2{oTeAL(4P{B7G}P0@7RBgcGN9+-q|UihIV
zzh>)Lr1KPNDyjCo0Z0^(KfDV9Ng1krFwkr@2clF7y013d*FF&gE8uVi-)ETcGdx!o
zEQrX}CnT-YB$V4xeKu3NY0SIv!*F;Np)NMJ;?RtwkvKXzop|scuMUo@7qW9|JQ_}r
z6!=tK4YowhaW49X(g4uhg3MnSP;F}*!;*xxZ^nd7b&x3iEo?xlj5lXYqHHhheIreq
zk|jfkb`ue7+C|YOVjYvop_;$`
zV&Ka?kHav&!AbJ9Vv!*gy}IYIFva?4z6YFgo7VLr9%4>8cq5sneN}&A)JcO*I=YCz
zeY5}lliI~;N4DpEIDLgWTl!4=>EJSmnE3ZAKUuJ->3IEUwWzitxr>`!|U&O_vNJub1@!A0aK&BP!3)D9nr<}IG)0d;AQScbc(a2n8wOz6WkuX
z<_->d@@t~*WC)dvf^KQp;|oT+@EDE6@<)=KHN$KDYY@4^fJ?d#<~8P#*e9ib;xIV
zvA7*-|AipiLy*~j*07LN*qRCWb3cQ1Yj$V+Wd#4{3VnD1fyi4pMzo43<8n+89ot^A
z%#T=;^0&yf@Q#OhIYk|l^Iv&gNI=KBNzpqI)RA^pu?67TX^C~0p=IgKGyD;lILszSZRZ4em_j_mlsf4@FVC{>3c_
zh?=XAa2t}dH(ON8F{MlS+sV52g+wu
ztl~k{VH9o%y#y8_%L9E5Fkxho691aU(J@gt?$qcPHgigTj(Hm16aRcvQY7$m<0ZJu
z3`~i6s?H|3^s}I5Na^k=Q$yR0P7Dt|I}U>Mzy9OVe~$3GAAcO2VA9sXcmFMV)yxdP
zw@)X1ai@S|Hg(0Vy+$ZZkb;i8QA!D_{xYFArq04TcTd%vyQg#a{4=?G)*_37izn%i
z{FJ0#L~x`*s{O1C!8V&ykuJP#*jHt_sI`C{JQ9?l{0X!a8+Zmo2Qe&Pu6^
zMhd$PYkhbtmW#IB?Rwgk^M$j0*I-j>wIj8ksWO`AMiKdGGpA(%geu!&v;}-_inbP4
zu=%xYdLWgR9OC29pRY79#2A;7-S|HDzj>|7n@ZP`EB`hnVFQ0Y=E~57CT^Iw!&F&V
zRuLW>Ez)~L>>e&`^nb&D|3BZ=ue#YLW@Zmh{6(Dk9lip#ro|IEXvu)XClfcG_9MX4
zF&_7t7oR;6EYw(R@OC^JeTOrg``PTW-9~8cGM0;;$+Z
zqmDlTOZNPkMb{4}Z;W~y0m$O4jGQM60DtY@N^uNv89Dg~mq>}d_Aqv2j6t
zl24P+rqQy@FQ?IP*pU|0=fOy9ZF!UA(-k2!OnN5F)PRyJQe=_KZjVr@2zPAx-5%-}
z)7295ZMBBnSnuh&UNx}xmL$5b-O;wTVdf5lcLJVrYac^DPyP~KcOex!mg7_oTHv@4
z`2gPG{DWY@gp!rV=yY)&q_RQik9gp7P#BlEw>-SZ063lJ!Bq=A^JAxSP>-QIOiJc@
zxPj>Tk-!b(jtA7KVK(mZQ}eI<)g(3-(2m;Jp3T&gKl8N`24?J8brz
z+^@9pHMW-J6MH;gw9*eXIWZL5Au5hg(>~_{mc9?^U>Bg%1A6FiGD=dxfBm8Oe9_cglPq7~@jcxRk%4YfwFukr3-G_oPv)ljt=+IC)}5
zGgyTe)T<*cmrmNrl8dHr20CVGdJ`w}y>dPZAm1kU!+JZWVID2p(N!6{I0I7xhiypb
zWz@05Jyc()L9i9~P3cDb*h=BLx~)l3KaQjIdXQ2=uF>3dXcATnhzcZXlXUIQm-+#VGFYH|PEf3{TjD!4xjBn>}pCPBFo-t?VGy%j!A?g)!Az-*8j)X$2
z3JTiOgZa3|H9@OJa*^u+@(3azUw*yYc&GN&3?os>&RItcs~r-OGMpGXhX?F-%(>zv
z3lafgO!|;^iwM%0_N5qf9{ZgC6H;F;9E=n_0UlG7RlA(V4{Yk~mb0Ipd>r%_#J|5^+S4IRZb*+F{ZB{4=_G7OuG
z(Yr9=Zk~jq5s2IiuN&kxxex%+cdygVp>C~()Es9$Su)*zUz#DjlQ&6_bv&H)sIM4W
z`YXLA{l|F*+{=ZZNmt@U?*1$KNgk(TmbX&Va3ZG3FOf{kEMt7pW(hI}#k((;VIVUS
zhV0f)q~gVUbRHxr1%rkx&DvF>MtM7Ikb^mDSVu`*J`pl})v!NimXb4WenI%d8KN{N
z4QrIxbCoSul;n(wU5!rsWyog!1UjihGMUN_yGom}S>JZr%PmZ0_{dF-4&uc!z#=t<
zQ(0=hWzn;~cEU$P*h0-JLSG^cE%y92=~{>|TZI&-+%CD*H!3Gx>uwv{#Jt*PAor`_UVyM6%O~6C
z5!Whw<`7Ob!>%@-)N#qV@5vLB%jx&sSn?)aNvgK&0;Kz{q|!F3?C>4KaI^A^7q^hC
zxd;t|(_r)FoaV&__mn%Qz(twGi>at?%}KO0fWX3tXGGs3IY~?pxnp!qlcd=~yVmpx
zlbofsfXe+8%4-V>>X7clnr9`i2U#|RXP8Dc9pWjz+18=%pC8paE>c|+I2WXv=3J1R
z3zBm|>gwxUkW_OnNO$Cd)Pqxy$oL?WIyCzR2~xJ-WUMs3<0r)lrTLmIDB&rdZ?M1=
zq386Zr58B2-XaU=u?tMar}SI;RakZ@xjwQmnDZGh^qh}6h|2f!el
zO}zIvQOhHr`=PJ*KEVWvP5G})uUhd1oN?Ws9t>2Cv+jyl=k1JARSMmD9FhJcS1$UA
z*PbInHj`&p0%Pmd6X?Dj(w3Ex3ezj#C94nx??_fP6Sy)f=-doKp!9e8kkXU%v)}CB_=)FbX)Q&zKhFc?yQo8vj2i*ENcu(^aJ}k`Nc*3;IO;4CFeA6SQ
z6?Ywt$?bzB>w
zZ8|P(E84=bs;CUuI_z0q;RZE<23s#)F`RuQ{TxU7no8_YKQ!1(i(ksaD|GJ?c+8su
zvy8QmvOGnZ$oW!QY#+rg+A^xhww@!vHY!f}(FFj>P=lus6)KU;B(+dY(ALLNdPgF<;4P1ylMK)S<3&3vb{48*OS
zy3WG*+EBEJneVbjD#^Ip(}vAPGb2A%8*k1Orni>NLlW!Aq+<&EsKjD~bW??a1m;nw
zsn61D2D!!x-Pd$WvQ~0g8OGallim0Ao9nbR%+a-I!x}zz8e5*O{w*`~7PPg(8?SoJ
z?(3|GH^Tyjdb9SfQ6Gv^o>;C?!?Yi}?UU7J>ZMz3!}TV^57G5*63y58eog4wc_idZ
z?TRR8Dt^>mA}ep^c7S|*3y9gmYRI3Xac?7iKQ>EnhOmsJSrcD3wlP#vqjIaf
zIqc`KU(1IM`&B=3*st0w!D-{8o5x(WSw~KTw
zST?(Sna2zm*>cMt@f`D54+9mIv?H+_az7%3z-tEzkLLU@WC)+UJ8bqaw8>Q5ss)?0
ztW0Fdm%or&OSyw`M}R%(00@1LmwJqZU$yAmGHv2hl>!wjkDoUc9gxk8;pk*93b47#
zL-siw@DH?6?&K`C&b`O$6Z7lM@Xbjm_38AhAfHz!;AitN<G7v|OdWN?DTDy7N|Q5+3{Bh7AWXAp3N|Iop>_GDrrvb*qKc;yk)
zbI!f_iZ;w-)w*ETF%RizF}|iN@3xU3VN5_xD56>Jr*^(`sE(qsQxE=KL?hnpw)TPP
z^U&7D5x;Ox{~hr=-LOA+o}&>E4XnQu`i0*_6o@uV%T!C80*?(o_IY_qc#b~Le2KG`
zsp+0~a~Y?cSSTyv0RC&R*J?=7AK2h18tkdm`h`cM$Qt>44CuGPI)r%>Udi*m46l^q
z$`GJ56c;Ls7c~!5@m94Lq*xf`GDhg-Um!)C=2~?ng+jI`m-w?
zc4$iBENYLXQL9BdN}Za^;_&*VrS1lWap=2(tv{afN~%59#>uOU)g4yL4e93h;6PF=
z@wy3mtOuUe5be@f=Kx*gYv@%fu@!&D8Cu?02QsREV{25sd0F{HvewPs8mV%K5P_M!
z)Vn!n@Q~;4%+ce|^nw1wnecbk{zQ0?51ofs0@dnVYs($)exzy_xW-lD7y0}1(!ycD
zWfU{`b=#o7cR!p6g;+p^=kUFDsnHT+chQM(J(Nx>lw-Ez5y5e9&C0F{AM*vLtsQx50)rxr>o+
znp6U3gOZ9Wdm6?wp78?}CwOGY^sCgsBF_kqxk~(V;29%SQ3G?h#1wiUIDVGV>Ys%-
ztE=bj>UoRi>UsZp>v?M!rn`!FOZJ}*{iGnroq}(J^`w;Ank$%-1poc=^u4ZZeuNtJ
zs~2!=7iicRiT^zaS(+l@tB-v~9d>CTN~Q!mhw~!s8Q0k7nUJ)uqpTdNZo*J{
z8dw-f<`lP8v&jJePPlzg5ilrsu7plNem?Tv%RpO|H-4c)_-n>C$cD5CqOLBTb
z+Yk=N$oXR;I_FzVIc!rIzMtMkGnEi^h3riJ(MNQg>dKd3;{G7l{OZ$W8GJZ-V;sAt
zmh$QoQa|rQ5|-=#6>FDMHUfhhfJTO1w#qK~>+MKLQ(QBArNf)zS#_H_4vQFVMy@9G
zMC3mEh_}m4b+{6%5FUfTG%bR?Wgnt6wCxH`4*5`2b0d@kd;7)8RyK6%EGkHy-f;v8T6~_{qxfq
zMM@gYbY6VjICk-Qqkjz(kWIDVN$iDL1-aLn(OjSDk$jfc4B_TVGa$*5IRT6Hu<(~D
z>2)BlgC+$ja`BFwO)y@PI6FKfSV#;j0d^?JB)1_AFR3K#XVG4E8kTgVapNju2uY`HbMA`0_
zFOSc=CCFPj?xzLRx*Lp7Ga~sVY+gbAx*Gt~Sf>&hFoT|Oo-9wQWBR??9;wU)glrE|
z939X0M|tX}7JgG&5?RbhZAOH01|0K*>8mE(=(DX*3AUOCsEad>o?7>@PsBu}1Pq^H
z0iWT;vS6XY*UNCFvoxNl;cx(us6e{$!*F;N#W#^=6``3a`{~ii=>*+#UmYBKpW@^y
zjM>~fLYqfWD?Hb?j2s`xD_4S79VlBa{bWbjNe-$=*u>$sz+*9;1nIz
z`^!*0+nn!)ac~yI)O-a)CD2H+FfLUGE8SExS>y-{XXwv<&-)C+e&$A4`Aj(uaJnmp9UXRb*l``~
zc+3*Ewc=zBMT+K7WK(k}(smyXMdst&b%IBuZZr1NDwu^+2muR~
z&%J|*H>EjL9^7k8CxL>0?sUQp-sG8$I2elva;Kn9y}Zb;Jt;Z9c=-PV>1gwhv$Nw<
zM50+di$hY;lM3D!X+Cb@$}D*GW^D=Wmj=U~Dm%i_&fc7&UL*pzchr~UWiVfO)7#dh
z=SEj)#*SOOv1F@Y9p!K()$93j@!Edo2i7vJu}4ie*{bY-Cu|Yn{76Y)JMz)0-606Y
zhW2=fKOG%uo(3G-4w}Pg^Je_2O-Mm22@tNS`QB{(In$lHSizT0RYXx#_x2x!b(7?I{H1d>I@EKKH5QFX|xThDDaMI>VCjw
zrnELtm$$^0>kDe7GA7k*%6^M&U3FifC5CwvGL)bUub(6d-Q!
zjewy+PYo(}Yoya2v_gH}WsF(lhUvi!^b)HUkz%O!wzeAL#xT0{)2W^M0s0Hn
zBT}O5TnEvCK}a{DM{{E!J8HuzAkN2I%aYrQtDw>od^+v23P=S%z}QRYSKPNAPP>
z5f5^`1%w5J?>b202vg^Eh;V)$aX*h|=?JD6u?71rjz+V1F^1WOu$kcnX5!%y_XynK
z#R?8!EYA_nX$U<>m)RoxmK&cj7v4}?Tn}oQUvE0AHyCJT;5zY(RqK4;t7y4-=t{-1
z-{3y(0wzqF3z(3rZ~+sJ{k-?==iLWP+QX;s(7QDw~Q@RY+-
zg|#|7=xth>i0SYYXww0gUmBe^SCvoWW*aiC6>WAvZ9fT=qycF
ztrCXOT5Iytw=P%6ze>T#*D7@wBJt`p_ur*tc6yS#mWjH3|D@kvDnQcFnZjIrOsA`|
zDc852blUtjwsa71T3f08+bZ#|uFVwN*!T5EJLNWhg9VR!Oq;m7{DZjo5j<}h0R-K|
zZNNy=-GfYMtwG9~r1Vm?sM;8#7HDRq#u&Cbl`yhHx0iVI0q49EWi>
z>V_zyE7gX8#fu6jX37bgI6F`*k`zdYRV?{72p|J$xF)_<;JtR(^3vR@lx@eY9Ad;%
zaf<`*k=Xusn5SLTgf607U`Zcke#DRu+*2IkUIF3PXRg;K=EBfiT7L&}Ht~057DkaV
zxQKF-?lPWxk5b?F7@H5_KZb$m**2w_lxBR|*uAuYj^5JTZP4B5WynppGxCL7d35eA
z;9QAhR-lZ+d==1p=n6xnShQLOn9De)JUz+sW)2=Uuk5ruu$K0@#)YAEEJ|)x(i2d@
z%DU8_<)iLHYBP2uu{Mrw+i~O$9Y%U~6ftxVu_kQY4j~&y5BKH(vgH_JgA04#_@PaC
zq1}e?8u3Vywwq@T;2#_o^>$K-!qt#6o>YLgOuQUE(gwo|x!r
zCFK1nhWzr
zqPztXClK^M;#;;f@a@s67~0sXtwdZ4E7^5@eZ379VCujB*)u;cTSHHkxemQ1M2u
z`$#Wl%d}BgDZEl(i%#EK8B1Qp@kQv1JOsxf%h1dIu44l>bp8h@^uS>oyC2$luRGrs
z!>{h0B9w2%`L^`11?SuHXYg(Dyql6{qfb;R!2Cv(2JW4U5#w(mrHL9FcrXN^S%P`X
zd@L>s%AmO@C>I6gqM$rS3rp?pqo8bJ`%9G2{0oD`RGL3uPE7h?4~)7kO-N+n)+?ea
zzeU4KBK2^1QAJM7&9rabQhAVtY-%Q!2iY4owUf*FCm&f7uxaS0cO!$BoEP~LhYO(=
zsBvFpa^X_m>ciG$EG64V!BK8Fd)_JkwngZ6dE1@T?WFF%IH%FS)NN|`Gs)$%brPM+
zU96T|lTVT)*c%)5Ru=X=%>Q1pPL@a)b{Tnx;-cXNl1W`S>8D&SJFxM!P_gK}U1!N~Jk=#jz`DoUeslN&8|~
zgf_#*w?`-fMM)S0^UG+yi~~4c{2MuY=C$Kz@^C~Hr!tdE?XN^5_nS4iT21{V^0Q`K
zUVzbS)!m9wV1*C)qxedAhQiWK^cf5GMf1Ax_S?D!k!e@sI8NwfTMM6@cU!SCu85y#j=O4Vj=QqmhvTmDakiKSNugnf
z9I-j%XtouH9DCS;Lyq?vIa;I;)h*jr+%buNsX&Zn-=Lc4KWly3`bn>yuUB1jzFyAP
z%lUeFj;@s2-TQhOcg;+^Q3}_Biq-PyJNmd16N5~AkA!IIv&2u<^O|vDiRZ);Czfom
z1xG&q3}T64O4vo5`R_~)U1cW{|1h9(@Kg9-nL6OAcL{pZhyK(JzDyALL0ib0A61h`{Z9@hH8
zo8ESIHNLkqCTk>HZR%m6Zk7K9-fG0cp|}U5ARb>o9z7fFkC7gazpd@C&%)&;+aDcF
zM0WU0Cy
zdnnis4hVa_Wy^ud4!ZM@$tFEI4Lhr4^b1CtxD_KwYH0GPjEQiE2Pig0E^`NnQ8(q}
zU-nF-@~w5k-T(sIb{L5hHftu71>n4=u2zmB;*(xecK4MzZhfCqeOK)_;pp7+7t8E+
zFZVyrvnvXx&5`$h$+NqJ&=1EirJKvY=zLTxnvVfp!LSJ>Cy}yPx>_#7K&vN!m=2Ji
z4E0ut2FX{1K3B8u)Sb5E!6j>XVFfLg;)P4%)deQ?2YR0c*M%e^eP#ka2f+IY!oXfJ
z|8xGW^gPpslf->;e1tH*Mtu(OGEObw)DliD;nWi5t|S6^tRp`i^G`+OfS?BnJWxbR
zr@=*pu%}-m@A&0gA<*u*m`I1a^tC$N<#1Q2d+%uyG|tJOca9T#v)*A^(^)4jswPUE
zDcmZC@ZKLB9f~+f;;{3i2EFCu#c{2c`avv&MIv^dud*BzA>1K$oaNRedW3Wh9KapD
z5H_1C@MrO2$&zg0w~GZfDCHm@QeGHd2|$~R4y7IVg%rETtMsx{uVi#sel$yNX(Jsz
zt2SGHsSheQP4`ZbV%dQRv@i}U(A)_mlsw#BSHc_;MMb0%^SLInhS5TWLg=4o-ZW&<
z71tJ!9s?G?!YJ$@Y%gdq$C7i4Ao7JOS=TI#U^#FYvCu|H4n|b9&eQ4x4I}MN>qHgJ
zLffI4)NL3HIyxXKGp*`yB-l06{?e&y(@SJo6H?TJ6fcB-EGP)a5oRRBRcLS(8eD~j
z&aSRPgR9VRuPQVUjyFcbA0FApQ3aEPzK-Vd>eG48J@P))48A1|R-eYDC2K!?*_2Tmz>cf7Rr4AuvyNdo0vl8y
zoXZ~Gl+CWNvI{(s1yZlpT#R`GxY8N!At&*|8;HHMbMw4eX`pb>?s~`A)`1ZMM6Y}^
zmx|&pXc2ds`dMbGzg#UVHeen2k%`%vH_P19>*gaeg<#SD`-)o^l*2c1_5j`tt34DU
zE8b~G8M?gHdCQwajSJ=6eY{BmM(hF1Z=9(r^W1o#DJm@7dKBX%vp6vh0C*`JS#bb3
zD~q$TI4jFtiy5;0xvVV6kr;I2G}fS_ndaVNJJxw{46Yc@jZN~9dY#GI6b73^
zon~8csI!MHIMjKsQKxa&UvGNn*8h1^%dB%;r*Y^?z5CBkXCi%q!i3$Ed^#JyKRP_v
zIzOI=DHTnETTQ|s<4nAoIZJluQsk3vF-D`XD3l$0qRh7$p%TYI14wQBXPW(l5Azuv
zkiY7DMGC~ZrCSgl)Ne|oH);8D=pkhM#WGH?{%sZV#*-Hu0vg=~R3eT!KAX=KzmLzR
zc*ZM~QD7dvGE#&KPlnHAiTjLX5YeSlpPG(;{}LDZUzR~a7y-y+9L;xko${HzEFiW8
zxma9+@y#-xwB_wv+$`hidb@%9k1+%D3PV2)zn6jfL6kjzW_4%a3;&|kpEPf4mG#6{
zm(Dy#!-Q5e2_n-tIi?Yq61F#mjV+4BW{qYmyR>*4*mjm44%jH#xr*pqLR(_PH5^0
z6Mb-%+3%czBlO%aSRV1b^3=rno4Q$Jvq8?pw;bCtT{-A?!+X^qNIeTyVy}#*8{`}M
zsyMMpDf4hyS;KG5iR94IbhYqD@D=@K8lY~E8BO%MN!KGwo~1se9TI7{;xL#6W=gVq
z%CrfO;0jb(3n-9NGj<^^hu+GNLheP3njpqWa9%)Aq?L?U8D|~MGFN{lT=Q9m?T9x_
zGsKZE$(EXeNBrV1Q`pqplHcRBsV&q?TPHv&&!K=DrzB;J(^J-{*Amyh$c6%ew~%qT^GL0^MNVdGVXbGGbTfAa>$G}7E)!Y%~+82@k+Cv*5E
zNDOih(H7wfy*Zp5Z{3s6+9rlD-Hl?VZvQ=5#c&h7D`W1XU%t9~l1o6K8cjwy{-i%v
zx7nxWu#Bd0j{HuFcT&7#X)L99NnE#uYCnkaxhn!8H$sT?w_2=NLWTFrBVh}%)barO
zV%11mJvF7EMbuIFw4FDDCx*X~vYn>v)84gCE_1{Cc+W^pk7Q=|b*XwGEjy}8Wq3XA
zBPr>Fj94c@?5EJ7NdZ2aW5OZ?S}DFMFVrW=$TbUtIg$|F4zDL57tpGuxb&_QWq6LkohC_iP0(i<4
zzofM5X|*+^}SR1e67yC*axs()#~g9oozMFiZuW!m_-)3jl@9hwL2LgqtMD
zd>Oo#7Tn9)^|UYqQXy`z!<&@`izQ$aIOXJ4+7U_D6~^&Wkgpkw;1jqB46YvFK|=Xx
zD+}5q2M7)D^!zZ2AF*lZ&jsd1EZ2TZWii^Do0}0uh{7YlQ^H!a^LUnyWs*e_=f;V@
z425X-Eepf=<`)4s*wruB0sG~W_TG6Frg02VYSyV4;ToG8@J>UTJmapKVwCfnDHo81
zht|2zGD`6(j*H)wmr+I~P>|rElRAN+odMt<`q6ce#1WQ|pv#1o;W)|g`NJ&UTj&!E
zMfHr4(CkL(eg~R)i}`Wc6g{1yd>eR~rdYH9{gOxp-FeWy+SfQAXj5LJOCN8=cA`FA
zxMq>IoJN<2%3=KaqtdCv|6j&IWa8U6;JWU?h{(b96C6hP5BSjq)@Pu%6i0d1=D`?+
z(g9yWx;j4(zO+t(OcPc9#^C-FH8`6qrK5M4_cxe4>$lxEy|fnkR2?y+80e#MZ&}6Tnt-uDKR>PrtN$Yf|0i
zdcUhf8?<`LOW#sQS%z0OG8&!y=8ZqSjN>b##PpwdU#ryg%)9U-Kf#(^K3r=;XJXEV
zDNTgmX*@SC`}7lZ-dt61Cz>MxDW^O7YwC_#j^AmIwC1!&PJ86EN1}1XL+jfIrZnIp
z*0CY3Fa$;6PW*!}WYhg{!?`h*l}yUpE_=J7yqDS(J3686g3xr-)-XlWzov0y5^Pym
z$UosDr4@izu}UJ2v-`R7Mh7Pt!Tt*NalR5&l&BLxE4g4P+c^ioO{uTgQmIcs54zm%
zT#STl(fKM8qFYMjj*~j{@dU;xBD@A4Tm&HHc0FkW7j<(#1T!0X0iwCC54m(vQp*%<
z*evX{7G9F21Y*K!f3BY_{Efm4SlGQtI}bu*|H1X#zwVhsN6(&vmabpsUW2>v8C+xW
z7XE_kU6=dl;L(?-pcTB09m-67-i%@+OiO}TIia)%3}K$kcxBE~obmpfGBT?qv}U2;
z8@viL)>^%}x9W>&n>lfat65jcb<2Qme*x+4Y8rnrQg!`{zIemGuu^#OFFpBbA4M6L
z!AoJnF+M`RiqSzBujZJ&SQ&x5M-Z
z^}tc&r3FN_Cu(kzJ4`Wgk_$0#_}k%chrhM9p*g=}hSs0SrSgh0F0l=suNX
zL!LG?2<9Sz^F3!lSZiDELTd~5<%AE@1O=_W!P6%HiSBv^wBaLk{{gzzG*!nfXn|<+
zQ4X7gy2jDaxoTAbM~xcv6a~vu>RphE;we$-Id{`4Vk7#d{36;83F~u^`KR`PT6EUk
z1TAD~c9yUxrSVa9(fktREkKTBh+2_ml}YzgUZ9|O(K4HnmV%{iPErO#)E~6f5bdVd
z&H=hG&`_+g@#jVpq!Dte(|t93So3Fo$zOO`q6}52YQcjAd=#eYkE^MqQW~%Y@V(#v
zc)+)!n>i7aBnNT0TIkn)m|NGp(ZbIdaz7(+3a^q(XA>c<}k==*s)`0l?){Q6an5Mw
zeKVCy#%HD9+o$6lulJ{L8q)hNon871mLD6Sp=fe^^zrFw>4y?hJ>Wcuh9}YIl{*J>
z_jYnP_rGKq^c-^k519cAZ=U9(yeF!Q2^U4Js1PHsDRPwmOqxNg6~r0iLN?PT^vRaN
z$9f0NE`APSkb6EVY9V|okz5db6tk{2&v%Jw=_2t0t1K0F8B4wNGG6h#cGp<2Gx2Bf
zMMSDCKDvqrA(LW8G@GDFhSNQC;;xL1%zomN&&?{*Mx;+_Qot%t;}^;!_#((gSAS2@
z5qK6a7ON=8ZpV~OG?=d76w>kBzxKm%8e9xnGP?x2Hp3u>F@SqWn~CW9ju!K86*8&C
zTs7GCDu^l=w(GE72Ry$G69WpvvN{u)X5i%I>6wOa95csw<$;MUbU>U{7`614;}>a^MnCT#aM~Lh}K<+VFs-HM)Z>pBZAp
zK0S%(%O}{(zb*ZlG4}?23$GD6lFmS}Zd-J{QZep)i@Y>rrWdi-A
zji#IcrQf53lz@XK6uq+o=rY37CFh6wNA9?)HsX^y%_rHCO&C@z@?5v(wFjy#bXCv`l4>+NuVq@Hrr|{kuC9IG=J2{0BV5~DrkxgM*ku%8v
zkN)f>WCRV=Eb=VVJXalCMAgLUEHn>+gng
zAXG>D_L3;9z1^v<5e7jNBGmA3lnpVv%IkGeG@n@>p4ZD@P&JoyVs~H++Yy|jElD+Z
zH1=1ncPNU9^Q3zi1+%Ft*@QaH2gqiXarJAKAf}5cSIh;wnRqQ8ja@2qNcy-Wsg=*$
z5D%p@Fh0XZ_fR}(D$ua?9uKt&X-_B+w8R8dEn(FOn28&)4etx(zY$ii`OY!;yVK$P
z$#wr96+ZZff+Qk(aXF-BH_J9Ef`79H+M(FIh(q1_i$7#
zv^o(Uhe|5(u~03ktZU;mu9iu!P(0=+SGCYp&1R%n%}672Ft4ZE2&{86^<L!h2(%`P2w@sBkR3$e&R*l(OKy%FJ}zV;%dYAI6)m}La~htT
zPv^g8vbxBceFj3Wd&2Ato6;9;sA9id9YDiz`Lw7#U^otQCP&H?jQ`ynkk|ii4p4qK
z2Sz>Se?4Z{ctv*dPiCa%V@_nqHg~)D_wU5V?uvLo+ayuCbp%qEfi2TrHp==R@^5Or
ztfhx23554SK9nIdJf+*My8>&d1X}@P_&P2L!`XvD9LP4x=!mwC-R9$&J(s}b+BOe;
z-!>0L*5oA~^Q$VZvsPzo9F_g}ORO{?L6fUaS4Kb9+Y%yd-Kwj2xy7sIsqWW`1WDN-
z>8JFf7lkL9Ho>dJ5*iV+o+2wJF3IzBnxuXfn0$wAJ_l6PfgtDh&
zKIb;W5Xv;2$HD43rE}%`YA(;dxo5mM?yu~0-a|3LH%205QJ;#g3Kp$tHPCzZqxlUq<85aMiI(YT**U`
zbq7Sn=Ek~381{hOvTTXVIru4^+>9n#9W>vWFnHcPewH7EvCf{Q!ezq-y3J3PTtj`m5ky_QYEU9=w)6}HLN
z$8BlLAyg}{3~IN4Plja?u}8`nTRdGQ`yR8~J6Cxc(kbDlQRUWBw2}H9&y=t00*!0l
zQjpQ)U>D3a`ACKA1q@;dW@WnKo;hI7-5Sb{3}%{w-CT%F8ZwH}XY;~kipHm76k!?p
z#%DB~rM8s_qVN~Vj8kWOCCrEa2&MW}aIzfS%I7vwC8LG`T8zu03TPzPGEIY
zG-CY6aV>(|aPC}(-r)As^^FP9OE(K;lq-9H)kRSs@wE?=JiUR3tt(*m#H)6R*-Q1!
z)?tS>*~5}Vl9+3=g;SMtM#Fh6XXioI@@h!YEAASvQkd078fs1RWxiw8@;uR$mXyRp
zCK^O^#^XuEPkMX)51DVFKBuPNWI6u#gSkU1J_hJF27akB$z_1ep-I3sb6F
z^aYxfO~I1>d?F98CWl{CI?mHfhI4}T^mfMBGsMw=9rW@7QIVT_WLQt+oJK8We<~@E
z!ZD!-Y?zVp8~Q@h09r0a}_yXy6t!!?BL?D1aMleYm|Zr(l&nj0rI8aO?hap1(S4|1_uI1T+&)4)Li0-VSU%hC1Tgit%wg!#o0!VQr{1hd6wI2^
z`t1R|GY()0HE-bEEcXt{$K>AhY%x@j4^m#IF?ZejAp!cJZQq#UR6KpSoz^<&98Z|1
zPUO4pqgC4-(JH8ZnzYY2z2@qSs`_spOhwRps^)99YXWx=QdFSl2Wc7-NbTP#uA1^6
z_n6_yi!RSSmY;rgZNq_(V-Q#_{E5sRC4XsFL4#>f6BS)rjbm(mRr
zW|1+*zi(9wf(40>hTVt~ZT=vvI7BGXfnBh>eOX6^+OtEP2l*(o#=8}>=F4S%-ksxq
z4(bI&5+*_kd+?G5z#L5SG`CWKZ%VK={Eq3c#`SF4c%NbPC^7{OXoC6i)CKd?I{8&~
z#l$A?KMMK^|L3sal!~u1y^E?05PHoPW6}ah^@zr7gC1l*Nzvy`#3rk-L}&52{7GUB
zRs`hqy<-_%+#jw@NL
zcw!^`80kR)|MHlJDdX#35e0Ir|VVP!K)c2zjgwsw9&|xvzhxNx!RcM&+fT
zY4bPVJTYgwM9XeGqozV{@dy4sT4l~ljp+Yh{hVR@U&q|U?7mu5&BL7q78uj`?finY
zkGhsHg$%z0Oq#riCf%oJcT`|lDv;3Ak-I3Mb~-F99dQax4OMPGXj!hq7U^uI7g{@e
zqjz1-*r8li!IwB;G{=8zj$`*t1-CQ5Aqn;7r#J4@QgwbMH~N%yTF@SVk($!tZ{m_T
zx`S%{u(eJZ(IxuaxO8d=(M!2v8hDno5~ApLEw;}+zVq#3S7tAr@Fg{Jh!VMxe7IU*
zh{!)a!dO?;mU{OFle8$$mDFVgzTMj~F(#P|9tudYjT6JcC-zP`6+x6?&@?&9n@ytS
zo|9B1%AP|Owj4|?ZNrO1?c>L7xhdewPzOtc@J$!3vy6%gT8S}L*UqVRruY#E2jsLD
zqe0f9)?50GDl_sLA#>`>od&MV%fsPlo{`<{A5M`k(u1o?ovYT<{0%^t0otrHRpWf@
z)aCsovO~T{Bm@40NNBT3aHZhd^@srCly7lC!cOq!_fHn*Wbv`QHO7gc<>bM5^eGxrOj(q2^sofk
zHGGP4Uy%664Qw2|4r{*?;>0(Z>R$a1hlAbCSl^cEkD^jBkJc}-$%Cc>d+0`-BnvPi
zMe+>UZb;IiGCiV|qxf+nDsfS@eB3O-aH@Bo
z?Vc`me~}`8Vnm5Ohs&8(n&H=$nToXexE}?j-8d~+|4+s9^7UvVkDC9yTnR0+LLvO>
za$dR2NtyDZcuPE4Vn(xxOZT!kf6&5vCA6y2iSz(vinNicgh$XFfUn$V0Q8Q-o+ho=
zMGbPdaoSe?p#p@i*fZOUaP9zVu6{qBL);|5#_TMm`I5vCi+o7BPZ}^2<}^KNGuXw+
z&}SpF7TiS(bLbQ9vaMG0z&qTn1pdG!I;#RL%y36mrf$9G@97mNVrMKl7A2HtGcGn
zO|uP1cr?8=D5;pF(+hIf_soAUD)gOJo@|TYP_6suH8|sWxB@vB$l89C*phP(a~W
zS9=f$T1vw}PY1t=hoVF@Q-wliWT5jerU8zgH3%njjvgQ(kIJm%md%{9KbTQo+*hmH04Sa1=8*HT%)4I{(WvEEq=$h*oU
z1r{l)zekW~+z(jC1bHj8drzIxi@3`NNn`4;tJI3ERevgQb`POyM;GF5M>~t$gNTi{
zY!RMZW=ijajv)OWbO%c`9H#Az#W52K0Nr3Y?+yOi`8&Q#v+)@31|q1d83NyJ+06EZ
z46n&ly1{{){So_u!FBEnf<%n?6NiE;U^?0#-PUeA7El;2lX06ny4Qp|sY}V?G<#@lT0P^A{tKjh|iN{5{5tP0=O8k6+mG&Of&Y7}%MwjkkYd$Y62hrW)EI&GA
zmCmY=+pp~IZ0Gg9EP?t!8AB%^1sHj@9wV4SNRdPoO|526#)*~QglKDlU=I~-;CpBo
z%~M&fI(;-sJ6l07NWQ6x8cnr&o`k01C;`3U&0ZwTJFUI)`OulEFx<;4Uj*=gt-pMp
zJ%V)5!{jA@5#>DzirMfAJHdV}eE?6@CWs}Ft!0-h?%3z;fJ)msGuQ7a3w^`eIx|cw
zmmhSj+sUN$v~ETvbAG7>9WnpJEVNO~
z{5IER^{$c{)(FwK`p`XnM0m{d@BkM*2oy9Pm+lfB$(x}cZn{Jr{zz0zyNNaB&vP4j
z)m)`S$Rosd5L}a^3qlgDzlXmmq9fm0P2VL7SDh#jeCRK&83Y#~?ben&m6eIuf_DPY>6iyp=0%VLZ=tBLO{c{^*|AFQeB4
zdF3AU36Y-tp5%8|`c_DM@TU;s^snP`g9K4~P{4Q~C=yuicVq2%G*u0Vf_`=3`gB<`
z-3e-&a_zUk)b{xdSh^aZ*>H=-m)TBwI7&_9y=1(H`^|-@l8f~k=DID}9<;%inv${+
z#Mn%-)McQ^^talcAqQ}ZdVCFNR+q-?;Zo%
zfu8g)j`X&NDI_DIqgV0i@scTqb}t&8@KVKg(~xAhmL-R1j<@-SH(jh!6|2X0Khe&i
zJzcjQXee3hUu6+I;)bXj`n|b%2&>_&eOB0oq`xcbMH?B3u~buIBQm@y2b)b^R*TZ5
zoz_rPkd}xJJwzr-^OpNzV@vE}4@fXT{}|DjZ-Bo+_iH6ICO<&g
zyNYFHhRBxf{flNi4H7)5XIiuKvAt0@R#jZM2c)cDg6N-x5L{EsZ4LWfU&7z7vGsp*
zEd+@j9PvW^2G*bc@)&%u7Tw6vm{@20V~oz63*=^KgQfW|N(}c^LWwlEC=)~AMok|m
zyE3kiM6$~~DlX3m1pJm~C2!tWLC=}8S7`nZn%;FOkIe-Z`N@__T1Q1CNk(Eq=I>?w
zvR8QMl|{2mDsQ$*C~5`A@q1_>9Qi#o#Qh!`V$fL8wn&yExRx*eCtiKl%KRGOmK
z-8(dfH>yRVT@v;0#}rUCBdF^mD%UMr(nl7bQq-Bn%A?TsnW=8r2gqY{weTkeF#t_V
z;WR*-N)$1;Wx%MjH)^0XiY{_Ksn;JFdB)74O);!X|6a`>c2O?BuQ1cP$QFI1^l=Xp
zhZEo!`aOQJ$2W3@Kn)YB=ZOz%$6`5v9scDM4112)vXzw|`EQP5M5h7g`Wudq)sr%7
zGcblycLnvWUO(Ty5U@9CE}tUmRs`rGX9%kLTj!azsp^5JrAlB;eOYK3{DEYy@eg$8
zQdn?3{oHSE;!3ROjL3&!;mColCD&zxo%`6Y1p&0HdQf76ww)k+uNit%f5Y255HDh@
zs_C{0ioi6r3#DhEIT^@p^-{|l7HUs_L!E)!$^`VT788BwAX^&xK)-`4ee$22PWNBn
z^BqLV0O`52%~ZWb>H4=+9>ozaOYc?>s||d*m`@VPg-mwytbBmHmv~?5cTLazs|vas
zdL830YPm%tt
zf5D8XDXky)#(0g-?>1V)o;|0m^1d>x|J<4r#7p3oo(5hvh0DD+C;0Xk^N01nrTyz!
z7bKuE(S67ErJL}T)XPv!H%e5GkqwUzB30&fC~ZXI$B@9dfpkOro1nC1yiwa4^F#JRAa<82^KwRJFBSw
z7qP}VGg8%ml6o6v(w7kInPss9k`MkU<-{siF#-O9tm=&)kzjBQDopQm_H)>Dhxp{-@B)BX%gWV(+}g%Xc3VI8Z=LVY7mh^?+V@H4(
z_!|qHA4*oG^Meh~#P~>7tSfQUDb!AJ42sn%=ZzG|SIzbP*QU*BTjY|JM_)uBYHKv=!gqD^ZsERnUAa8GYvaWsdgHA-@NCw@b|Z
zxlBo-7+yHV{xOn9we6`il`G2ajAMO>etsyxmnGWnjF3W7HlBu-LlTszLwwp`yu-y!
z9Me8C=pcITR#Y4jCHIk@#!fk+?4+?EZ+HkU;nYE3+1N;9WjJYC?R1B}VbP!@|AAD6
zC_Qz46IOi2NIaD_Ca8ptG*yKZs2QeR3b=!@*9=c6>UYz4gJMZqPL0SpXOwdP-r}8M
zv@GwW!9KK2EUg!-(Yxg-{gVIIq077Q61}LuVLw}3*F9Mp=MrwC|74Fy5f=&O*t*|9
z-FXCujH!pMQ;ehj;g+mwbHZ+xTDAf}vtjg>g@bR7$*U?}THs21zGB)Ox+9v9rtkAH
z#e&g^^q1;JAxdK`5V4%Bj8Q<^`>3)%5!9X{TsXvq6@4g=xg66)4BA-dKzF;1qMItkfH1diQwbOa^$U$N-O$SEVNR@SGu!}8
zr{4%|Yml_D-9~g*8kXwIf5VOCK0@|hq2{Ky
zb#*I!4(>>r#d8dMz5fk2juOA*Rg}DYK>NCk`07aF1i<%u_o43Cr*c!S=>V?ccaHfbDR$
zAxiD3Cegc>5$MAb9A=a4S+V1rV0*XJL0n|3BR?06uO6K
z6wb*{)=81~Ddd6cIxwUD3hlIoL&1%k&eAE4t{pnP=B^99cHM
zEce1M%RO-Rf>ol4YGj(7zB@&{POW0DZ9cPme0%PxoZY*YsVKXETD<+3Qtz-!v+p6N
zt=n2++Oj+JnI?R_ea(FV=v+~_ptqM#+cD0oF1z&PFekpQVHK&^09V;$_16Yrn#z1&O6|Xu|6y0B2i6BXo5IXB*BMnzle$D~OVtI%<0}S6vhu6`Hwd
zbtp7v^uIhB$W6*r3?)^cW+o#v$4v&5^ft8XCibhT{q?8Jl%7rA%&w^h{Hy4S1BdJR
zU>&y#tU`j-A5k$Qf}7nbqPMJ|;zLMmF$mWCmmoMGxy|`s%5!Mg&i+*(sB}Xr_C!r1
z^signEtj*T8Ek=KzWOSs2)<thiORbi2$Ux8_Hp(GzL`(OsmYjJUc?
z+Q8!g&r?Rix*l_YCDdV9-Iq`)*;pC3aO`&7$o_A@8E%C`>%>m`M2)M@*&NDLPd5o}
zgOoH~(Rv@YE^kEL<4lgMqPjo)0)~`RGYOHwgmLUY#AttBbcAZN0O)}`x1=b6d-Q6+
z7n*3&&!O493r;>5C$=H&vTB9ib!^}p-MUfFE|L!*2F#JL%&$u;pXbYB8;8Meuon?n
z6b6fTErvJpkuWjyF^t9yu!F@)4*usQ&PioptYm?3cZ?9m5Wt
zJv2*m9)k@y$5T=&ROXRm>wz}Gtx2kE_k|HI5HrJ`zGe-UmuZ@>2?#PDge@WhUc}_YO9p-(6
zk`@~L@)t%P->EE;3&S#xHXAA-xFm3$-X^qZ022*)dmLuJsH-ojhTL0yUc=!5>p=}Q
zZ08k{=3k^&ThY!u8N`X>5rHwHr&?fp)2S*m7(04+d{BmY-*L#{XDw^@yo?Nxrk|>y
zRzrX0TgvQ+={|Cls}un*^DZ@?u3IT5l$dDBhscg+(e2vf&}X5
z(>tq2R?+dBSH}M?RYh-;&KJL938o^b*cLa+`5Z4!yXB8>GETDG-z}M8yw!e{`X?(U
zRd{%Np)@hof{<4ck^)=4VS7;@iRb-}h+u*5tXnAyJoIEL5fA84iN3+VT_k}^CC*;BJ($6u=JF4y38j;A4m(&V`2c(`dwm7uG44X
z0h#rDVx|7{D#F&y{Nvoe;}wQ49cy@Xyy({d#dWd!+xkipG%8aj5LC^juu+a^*U5r0
zUn^XQR?~w|@zg?_jWFANpTF?LwP#caMDM$i5=<>M85-}c#F+|E0KvRrEl4nG7_%CT
zciuDE=!@fz!MeB*FAYBjq_ZaXQVI4l?rm`4wDQw
z@?DK5y`{WLu5%8N5K2W7;~ncCAI@1n7I9MMm}wPr68E!FZq%?ODRhultn=344ZN-+
z{*Fc?5PemlQKiW3b=`}t3cxwI19;b2q$~fmMs!BuwLY2J4d1gZv?`&{K2lL_TT-_9
zx=^w?Dm=hgHK4+I;P-0uKXgqMz^*o96UYQXvG(NZ$F@19qv*62w5jM=m2q*b*F4e4
zzI54vpk*cNNH%6OtdrIkAZs^|yyRB=KhEey^BeZ_UuQI+@r*$u1fEnU1#C6kYesM%
z4!o+X)uJS6bmxn_+lKWjoC6a!j9<|zu(H(W<>f01q$b+TzTF@bC5<|eUGPifmr$_h
zpY5tdYC(b*<{T`zbU%n)$bSS49O*-4oCP;UEq(-OI!GhI0_s=fKo6|S{{jp2S!k{6
zQ}~T)&mNnb&Qs==a_S7_gSTD<2%_VQHj){+_&iq8MS`($eI}BMqapwU71VhgusL#J
zBv&lmrqqI%zq2;?Bdj1UpZq9QdyFS&AFZR*q;74gqEFzrp$#JYS$g^=qaUk%7HD63nny
zY=;QD3?X?SCNP%A>V*y0NIY8IyveN)^i0PO3FzqqMsEO?TlE^9j=$*?5133f$DJ4O
z`+H$UD9c8oReftx)Mo%G3natQmYk#ah|A-L+?UHX3Dq!NOMQ<@=4?8Qm!D0yhcT}iS3YRgDnrihc|dTCfA;q^`5x#SK<
zH`uGKQ9BZgI7Zd&p>|i8NUQKr=JAOM8i_yDJOnSUpsjL;6pX
zo$u?^QNp1?UBsa9#sRk-CQ6kf)H94D@I@=w>_KjJJ5q!30fTN_H4WutgOAu9LKgBW!cw-$K
zMaC9~JT_~0$ts$N{pzy5W~lX#!0K{o=B#gHq+);1`MbERz=~*|6XJf+{Ow}72{R34
zCmUZc`YCMn3%E|`n7!SX5xs)rRMVfvN6Cqnt@p1y*RGwVJXj5|lC7#i{Di^9d&dgu
zb$1?{&og5jU6v_b_FLJ`8l+G10Q55@5mY8ax~^yw37%#o3hQP}{@BIV=zrnClq
z31ldi=##3#vf9hse-Fnd+gx?;&|(}NKe`LoI}V>br<}`8pN_;*twht%PbL8fXw=6B
zT%Ma5QsAOE^^L7
z_!U%aL-n!3&Z~PpX6EdoQPfNsAAv2qC4VWf4|@89KS8lc{-`=bl;P~t
z*C*rHxLJB29|q|%pt|APc%@%|t^NN8*EZKlmdG$+AmUfNy^W8f;nWrGhe|YXMJM7
z5@3u1W?X(K=`oF{$`NFs_CA{$pm#wGsvF*?0is{-pnZ{_TT(p@{-v~|p^N1~M8X3?
zIMx~Ud|{I9uhtrBF_b$QO(~|`exxzem60?WdUx1juh4JwZ@q3Q&HOu;5336B?$U@`y=fTeDn_+aId^rvTkEyV5
zgYGP1K%tUHvR}fG_IQH$MVUUG2m9C}J}-wFOl9$XTF$hEf{X7s$xi9|wddN1)L*tD
zyr@s2LBS^aiK-?-377S|B2K4kReL~9R~%
zpI!2UY2_lWO7mOJUD(DZtRfPs=V3nyJ){ozaEe}*vzuPX@8^N?*B^KX;&R(;b?c7(
z94G&m*1p@3yM#PgTVnn;AiU_@)2$@l9hJ?NTWFvD4HN>jA{%vL5rudmws3ur=XX*u$xxR-WE(e@jsrr}om4w%tU>&4HJnTO*2A)sv
zh3_%75dY#(H)%6xd1X&>D!|LACVyj}^dbN13k45J=F=W;qKR~+HXwN*!
zSX=H_FdQD7K$xv?6JUof8@`tI-~}pNsFC@%>?+R0YOI(agBe$Up&hRA2C71H*c?mG
zj%8XLNTXa0;yods@wkb-`^oS2E-q95ic{08pq(Quu4oN&_;BvkjL24m=f0Z=nqBC^
z(@++8v+eKLXTC8ju_e3YMu0s`NNG=kieO{Ms|(z0@O`L3bTl7ZdpnkVASZrg2)yO@
zWbOepon(^W9|b_})&3n7#W7T~TD001pWpten;v+?-=Ql%l0c*bxXw?L&M?D>wS@26
z?QcyULI`MWFtodU#U{;ZO_5#JX94mL=dGo<
z)2_>*0mpYhvGN*e`k_Zk;WKAq^L5J_&kNhFE;&%}v>~ef+Tl#C!pzMwZWr)Qo3!fA4F7Gv_mRD*gr$A
z926jYr?!uvt#vUnGF1pAu|k4)|9JL{3VS65RzoK_l27k?B1hqpc
z1SDiQ{e-(hz7k)(Ce?(6#=JUKLuBI-InRP8A0~V!-W2S1@6P=tH&g03Gv_y-0Eo*qKV60KybS
zGf+reE7*iHIeguvk(}Vp3dw=RVwT8mxv1r)_`vQ)vJ;*edm`MTc1Dk^TZnV0*B`MJ
zznfT5j|Wd6b5~XEHfDRcs~4s2g&02+8(WQ3NJ2@IR08j5dahH9UY^1!
zAt6PP$hVe=lbU2Y3nLC?-Q-Y2;GN{O%TMz8`d@W+Nri|vo!y;j6b{T8trytPO-A2U9tA=^u{f(
z6)}x3KZ*fX5x37co@o#7X@Npg9gaH!5W~`kk7f^x6_}iMKd<-jaQ<1&mCgG;r`fH1
zx<@jDlND-*h)Ea#g5C)mh|Yp|7{X{XR??~O#vacyyqIsUvZ4-Q;lB=t7LvV&@0fsS
zsGA5JSVj%I1k=X=E}Hxtpe>*89%6j$cdUI17
zzbW%P`}`m**pwVwE0b)ZBZv}CI=kl{PhnKscEuQ56uH*N28ta5Jdz-V}0%1?)=QSK2=n$;5%RFBKrc)TU9u88d{P$
zfHaNt|EiBM|ElUE(^J?_BxQ>t@{~=BDGhM%eVv@DA!m+0`Y3BGEM%v*I5WhDBJ<|j
zYj;A)|CU@X`-GhL$?$jr!u6A8KuWhi068{1ZIhty2dyDK#5x+(m(e~pfEy1M*yNfJgqskES8It3xe8#3RTS_W?bSnz
z9t~;1Lm@*%N2?j<$OgosMqWU0h9)LJ_9yMtYWjfEfkAPmVJ?IDff)35?v(ud4iQ|Q
zbM*nN!fBVMKi}eK=O+_xcN!M$(e#}U`jam;aGhbt0UW4&0>iWCO}tVbDh^4{+{iRI
zTA0cAk32X9{WD!vbd7Lymh?RmZuX8ASS>fMjl~qiY{GGwc=MUSqcc?Gx^nX?QXEi0
z#udqmGWYxs)QM|vHgIDPYYEBH%znf3&`Ag#=Z;n8im0G+*}HXBi=Ru<+^N=11RW!o
zmZD?L20*a@;@#5ZlY=-b^u`caHWuQ30G&;d&awtXsDnyp2(4F|g*!Qqh^d
z;fOuPB$j!$K=6%i{xEk}G27Qa`XV5OdZ@V*iUdGwpj-%7EFN0}Jtd3FwzWSex7z-x+lFGAo*=+61u$DZHMU30mtk)a>SO1kzz%=s<;Cvs3}w5#w@n!giW
z4az&q@O@Y=Sk8-ezng`zKw#-s(X9t`KDg9C;@wRjm`9n^W7@HIU5aF21B+=o)~f^9
z4(cm9)tAo)O*AkVZLcCu+d(lqTlT@X?v6UsRVA7NqF|*$K9MkVorWxEdotQ&Hw$9)K%95Y7j{7LgA+u-!`eftG%%b5ER#+sBot(v!cAexc5
zgGXWb(y2`U6Z=&77(?}CoM!9XsyC76X0F}|VUqk!TjlJHp1SP8WnPu6Uvb8pMgU?B
zVO~$9h(w8etUyNfWuF`nU$j{Z#9lp3k-Y3(*0{1ywfCg9uyHspQbGTRr|ddI3sfmF
z_zdlKK?_W*(JUzgE$
zV;ARa4T~dohUwr;oCG~LeI5u3w-i_cq?OZ{d^LQalA
zk{8n0DF>keWrHxh;qHSYOv!JO-O?uh#*`g*q8r=o!CsR3jknl9v81FV#A)HtA|ww2-azd_L!13W
z>VR?xDz_D$uI_Yl8!)5K{}f%wIayw6{gz-|;DB+>5xoO+Lwnlm(=ASlzn9@u_Vy9f
z19115JE>4xi#s!Jkayg>;Mvbq{i7$f7wFymUfsJf%_sd6o=>)YtOOTQ$8aZozBNoG
z!3c^@?vFiy`esdQunO^VsCOV6jPsaMfv}*d7i!`&yb9YFbtkXv&lxGVQ6%hH6kSml
zK%G0ADIb9}Wq!p!k`}7L3N%SvQlm{O4~;f4tLs-;_{QJl$)iu)CeJBOR1rbKPJuZ7
zPr`JeX*_|kzL23xd{WmZ%7z@u}Y
z`*ITwIsv=M`A1TjW6$lw-5|2MLkcE;Czn*g=GB07z;=7+_P{e%eQS$Zy!mt|m-9wL
z?2O`tl(d-6E82Eq+hbYxXh|z2ORoxf0{4b!@UaaS-LZ^0B1`I-PwPj=D=w}!jDs4M
z;Z|%*fIXSj$|2(^9C@->1S!mUH95h)cjdn87zc*hSnoakLG(FiTd+s->(%x$vV3Rvx
z_dom!>r%V0v%f`13|wR1Gwk^VZ!yPgPfpCa8G9T~6P=)8`xlayc#U#MPeoM#g8=M(
z3TQReReWa~a*9If*Hd{McL^AA7(T)vq=w?8#Cs?C2p}A@kM{FnPFzHPS0ECZL-dYH
z&&LABRM?iZcE8%i1CC3doyoH?6yAM0N!z}-fo*pCX9`zViC!L&zG61&_ivw7BqqrW
zB0dd9m9PVdvkR;xl=uxMj4?**9|=32kjtoV@v;LurO&u$k6P5fmN8jP-v3SIG*sQ!
z!MHQ}EY_lIWjrw
zVs{}s8%x~AVPoRn)_xUY^QPFFPlcOXsnRix)WV=1cpY8zZaY99`#TTV9F$$EVFRi3
zh$WVC!W8z!1zLAe4&t}WgR4xDawibO20y)aL%CD@0m^j1UR2puSM@;dTH6rmT&JRW
zDbdsND+a_S`K^OUP2qDiE6(VX!InHXI_z|PRXRj^cLzr6{Hv*Sma<9X^y9Cz-ko&S
zXbfRPBs7D9CRA?JG};i#@oZy1tDFt(y1lz?4-Uxwsot3ehUnWWL{zkogC^vK@oqN?
zY2Xn?Lu|PYKefHooFbd*lO)*ISC`*e{EuL}MfHb^O1-E-8Oi>~d-V}!Ni0nB6%0GH
z4WF0tqV-=R?6npT_sO|_6X?nLo)!;QhU3#~MY~69;}ETNouuaFJX)&8k9i~qQE7H%
z(n|};&ebNoch^aHZ2n?K=}?jQw`GyIPp;qKd={v4hkKGFTg$~P79H!2v5Q4$FbB45
zbV~DB+1*gJr&Z%PYppA;BvqwWfMx3LI*1Q#pR=|mai6nhzj5lWhV2V!AIrZLn;yw%
z=J!v(34t?0j9}L+#9yFLhhfO4g)0Aw#T<
zzr%z;YN0rD%lERer49azY;qMQ0;5#+qNT;fF!OrXgrAND9Mc&-$K)Y>BH*ah7u17W
zWHE+TSNisS-kKc>QXJF1$kz@6{3d??S3}{ZE95)@F@pQ!i#@HiyqMMw9E9^)u
zS`0@gIXG}HWZ~Wmcerwd8)w=vI1p;~mr&%?-zKwcmEFz-%94DS{de`V^Pknv3j+P{
zf31E7FW+YxbB7*Me!|tsXEj#*xsPsuK%-6k*tZj`xf#v}jvopj?0vJO$|r_%27*ta
zcA-iy?0(afs#k+HzF!SHX`iV9iDu$CJ8ugR>&F+A5vJcP{`B2e;Ad3SVNT|2tgZ(;b5WL7dj{oWhVOq?~&!3_<;#z_N@AT1pAKs?u1b-
zJvo#-Lb+j-sds(*6U?vp?*v2QP2ig%dq}JWbvFK_jW(_)uiQofB1o;JQ#`zFV9Jd5
zoly}}9peiH*GXhs{x}%JFxp(VO}w5y2r7Z!R&mz>VEL6C2(5S%Ehd=0AKN7}qBX
ztcvjw7_1cJb87;*Lc%gxB}PR6s!Sk>!OPPN@D|pRZ!1*5py-$G3>_A5+=3wM>XBbuIG~
zEwhG?VU9i5Ug@v(2rBbdfp+yw@yNdQ{)3L>%WZ_{`s+@{P(m`eEfEVm6?$Ug7aheX
z|FJ?~G``BXFIWtWd>ALv%kfxjetlR{sbps8x$$Agx&cKouPZYMOCsX!g>$J^#P#co
z<5OM2eu)V)0NIYZ-|ZKgbY;p^z8qnygo%6(@|^p!eP;%^;x|Mq)zDB$Zcl&dY9<(&
zqB|kS7_#yfI!ZjEc_WL~^gWlJkRJ@*O10>4#bw_xsn{nGk;B2Y;m(nB2AUesqGN+}
zRd5l;U^z#h^o97in%}gq?q_*l#C2{3K7_0pzL0-=Utd9-?pSK#W327k{q|D
zKMV7*0NCxK0;Z)+Fd^O&qu=J#E6+^uTKcKkfT|$zga8GXAW?)uvg;%ZjpE;{tn0NT
z)(WEC#}U2r>2bC=`HER$&!jrpP$VIb1r2AR{X7eV7OVw9KnOf-fiV?zc%kMKyJY;W
z9=1@N{^INM;sf~psV66WpZ>bP&iphbpIr6H_$)T~j*a$SPQvG`Ho50q`cBaFJjkYY
z!E1aFfsxgZ+i1%Fe66bFa&Tn{#AKneh1x6znJO}s7j)>eQYp%PvyOK=z3M3?hNQ~pE0)yl!GsWqtB4rcqdmjcN$M!{kh?MojVQn=l7NtM1+TWbI2
zH(bpXDK~ATVq4UM?(dts#3f^m3fxvN`+G@?;_+C|O}p?(gh+&H+uN3&KZ1Wkw}z@%fJuU07#6
zj~Db)5coa2MvAyt-FXf!vWE;)fn;M`hIp8cn!!2&A+l;66|qY#`ZhJpl=vz6tc8>R
zy6`IhSF70A`|pKU>mMq`%p}L%g&W;;s@4n{G1g)f2d-ScL>V!CjZ!qC!g}3mj4{vk
zlYq27Q%ehtTZ-j4t+2nDo`j=Zdd>n*BI?`dH0g_Fd=@HiN%Ck6*F?Va3FjiR234~o
zs*-cLYWDTv;i;aQd-T7pYN*591h)0`+4`JXYJd^n%bY3t%i2G?u*;1{2@5*100i`j
zZ8a;E#W*CIYH0^B_9}MW?7$*5vuI5BnK0QJ8bf9{RwvyQ=CC_i6(NC}Guj6G`*89&!V942h`JyQ
ze=py&@mN3-M(lpvO9Oj7SJ|15Lt9Fr87So#hi`ZU%dD2PF>Sdg#g
zHVduE(-_TiJ63dF%RiPPi~N#N^<`t^7XiP%MipqF*$#vwYANF$7*aDGvd&>>k~J#g
zSAFi5pUOJVp3E9?J%6KX(w$oBjxGOSR_skt90NgFPE)n;z40Cmy1a3TMs~YXVsaub
z*>fCA6l7@_VMJk9g<>(3g`DpA@TaIKAiUDDc~ztX%8dd>C~uJny+tS4n}M1A7CkrH@TDB$X!%8V!H9bIy2W;BC?
zFh@=dpDg+c(BBLJKB#g#!N>SU;(dVt{!u&$Ok&jvJ36&+`V>7$wcj0B@HiI<9dx{p
zF<;WphF|odb)%vSE{T^y95B@K-MQ`~FVSH;)tVRsW*+M)ghd
z+U`Jd3msKB@yyrYHuRc6Fuw>=T$IUIlpn_fU-)ksCq)DqRl>lKIlAv*Y;x3|*$I8p
zs`Vu|@YoGLR}6cGf|TeW@hiO+L`cxi(UzBRy6K#nYJ5&-mHaDLgY%>Xj8SsBa3kh8
zGRe^+??kvm3K|7gS^oKng;q?z6}`~-7AC5@X7NBeH*S%?Ftg*pDjBZ|R$(f9X2*MBEOTeZ?m{UJt*a|ElDDDDOc%`MyRf%B(5HE&_cl
z>s8nD7OJ`p1>~Fcm|GpnQ~65?m;D!9r@|vkoV+~^uA?#J)cG#l7*D-sQVVSvdwEei
z(B)4rv$cNU&pQOSIQJ(!?N6RZj^gjkFk)e0OGjhVdLtNVJvwt6ie%#Xv!eZtbkwoS
zumLNjGN{Qqcv2!UUImudEX5CkXc8lHamGlr*Z{}w%xMOy!dcwCWrtlZ8^!mNev?Ym
z^A>C8gk}xF)^m-9)5Qz1siVRuSW8mZWpOrA(?3h1SGjBsvd*#XVS~*Jf;Oc=8}1$8
ztTAxs_^#GD%na8k_TFb;Yd-wkorZ
zFfN+$R9WC?+;F`HN{TCX$z+XzxJx+*I7@bF`_x0&WRKpHmY-=G3i;UGY>$5;E#9$l
z6Br$rz5Ld~46Aen8fnu?j998K=~wdRsNa^-w~Br#>vdSKO0S$@wGP%UX%VUuvm_>*=avFQ^fp=r`q
z;4+hSnP%mLKuT>5p8V*tjWOMEGq`G^lZ$zM6!Zvwa^18vmj~+PxeFxed!>cI%Y)lnfw;$v0s@KKhQKlEWyCJpd
z;{SF(Ip7S3I#vxy24O3$+11Pk%RF}&6#mf`rP43$eP`;T1RpyUHWSLI-7v)`i`25_
zFfVCYZE*@<7+AJ=X!_$euQ?`66{=O6rF=}HR4P^W6bO!X8x;H9y=4VU+filj8N{^;
zE!V$T8kxMq47%rtH9fe~pCc(K+umH;@IzZTNKLh}Yc1&55|`<7&cP)Xk>!W3jr9xk
z)N9c^+NcLa=AzoqWm`;EYQyGUzN~P%$<lO?>=$ZJ-txzjh@}p~B)SU@ciIZ!z3lpAlRnF<8*6eHk@~?esf7Gu#_)`Ky-)dm~<-Mmt!s=!mh^WtuIwp=&8(R
zXia~&ha|I_@k+I2g+0Q5*(+UdDe|nZ#7wd
z-Q=dk0PtoSV%Q=iMQ*OA0;zSZ-SvnEVVg23u6pFmyf){>Nlku#)VbcUVTJdF90^+p(PYz}oDqVi9mSnazSuk}JlUqnMKRhE+X*$^
zKO70hH*_8!a7cahe6oAJJ!y0J#O5n(W-$N`=~t#*0K2;uF?{ehstkXo$LmhV4mts@
zF89Z`pHsu@@apOKe_d1!EJJ}iw2P~
z-j`vVX$4kLV0Q802DX+~(~mPX+!_!+SRj6O&!0jE>?k@vc$zI@;f!>+1ym`g7iwd(
z9pBNqDk(mL(9O5($vC@9UP1lguT&Z$;cmKUURhFpMzcCuZl(HdrpU%B+0VKA=aT!^
zAUlM9N80n=%IdDZTC>3^{wV8%R6Ra%U5HQoVO&rrhaj2w8n&{Ic7YAB9zgmh)f*{z
zdvM@1SIk2If5aS`PbMz;me#Sl_~dJ%jVzjGXD%Kj`Oyz=02vlp3cQ?LwP88
z>IK_P@q1(ECy-!37PivWl2|d1SKISC7J{A#&H^-`gSD|#PHBLQuh9#wkR4LKC!(Hvu>Gc(~`a-orV=(Nl
zOnXjyZ~f^R*c_(*(ONf?KxAnwTfJ;JhdZd62yV@Br2oO
zw+?K$pHT1ND1lJ?pymk^b+%zy7&7oqT6xySQ5NBj`+wuHwI%u47@`3I)HbpDL?PdJSg!LCVG^0~`>%FTxguOk{n+{`8L?@JHG!%tte^Qv!
zb{Pa+MU$Z80-m!4NToqO|N0Ltv`due|Dc~fRI-UG3G*A_6FW>{RiY;n(X3gF%bKR;
z)aln#Vl|FEHblY85ROKH$A+X(MA3@Dh4X&aPWl0U>p0-|-Qf2*BI@PYg^;vM*_24n
znf{mdB=3P4Zuu=BLF9Ilcz*Z))+c3}p3>(2Z|aj8s0wa3JQLo!hF82GzZqeLrkf}y
z3t0B&hpr0YoF0;nYyz%C=ml-sGGpmC#sI7rKVzQQ|9yRu&R$Ikc?&;#;QyEYq}dKR
zuu_DV0=^JX5HA7p_RqD=<)+R0K
zdKUbw{~oVR^{TC=Pk_BMh{Y;9`ruxRZR;uf!|SH4j@qy=68=mPg>g)RlgB*vIgN=C;h9J$xg&S
z>S!fV!to-^P<4Sr7p6V#X|M{?IyllcS`IWUjsfy0dDaUH!mi_m-Qj7%a(Mb+_b-Kv
zF10dCkNoAbcC`E+X#6|T+r%4K02kwg8T}&s$a(*=*%^*#
zzG}!mV`g-$`7W-ITNBB3p3nE1QXT1A_5dwKDojWV^38MjvUjWrotN{v$-r0(iEkFi
zb#Vd5)wmsDSo;t_)`E)@=!fSta2%PnqK4;ussRhZxn^}}5F;$pY^rQAME+Lr!yND8
zXPxy~dTV%$*ZHMe-FD>FnNgj5|9FR^P1)G~{K#~bmg<&|S_OtyemLv;U{6X0p8A3Z
zHM?LMnq6_sx<7zC9ZD)gv#JLZ<@JFzfZU%@6V7qJor=>oCPVV2Z-K^{OZk9&$r>s{
z8S{r(5FJDJV_-!~(P4bHg29Ub&?D%N?o24ms?AoJ?ckZWHui%+`9dPcR!n-E&W7{T
z8ys=Lml{D0K!t&{8?NE5cno6iyu7gRvNe@j5^Gvp=oi?e`VS97&>lgApS#PO+kFM{`~OpWD321GfkZbL{g1!fSs0?b
zm11WBNj*TK?*UZ?&vlo))40QAbFou~>dubO6!V+vK+*dW(J;e^Ih~Eqk0!Y4TiD4G
zmcYM{cOp3VJAt)E6`XmVS3Vi0s;DyQ#Mhs3rmj%A_))qN?8!c}uO(_M_#kcm`X~}I
zM8>sFjatEeDzZNqaS8aa&MJ}N;;JG4RGDm+{<0nokX*X!gVvxKgh?JKhmkr%c;JPp
zf)N*bn*jBK_kBhQ^y@VEI{wf+buWS=4}Cv)fz&d}&9CH?m?p)C%2rzYyn
zb(9@80>qDi?3VL40ni|Y)R*lR*#ZzN1adYV8DrsHPc5BW#&@d}p10;n%rfHnuW`tq
zOgj4;it1&wl%x~56kC*GcRTqwxKQom2K0}`;XKBN|4(Ga1-Gs-Cxt+Z;04EcrT7En
zDK>}iFAqX1Hszh^y}>xr)jnGc7%RCke%^u(h7M$3HVj;eUpFw6F43x|#>G0LCp8~t
zNgHz_TwW^HO&<#QN;YW#N!(x^2$GpTj>^Bt3ip=wsz6SXurM*wBS&c?0I>o(XohdP
z!+a!QLs7a8ybe#dy$?3O}iN@ltSdC@TXQAS9QSwL~SwRT|r4Hn`njB&j@r2ml@|hhf
zmKJEZOl|ETaMj#h9c42D9NUOsK8diLmx2&;zj6gBo?=`$s3n(}_kw!#OXu*-DinuG
zxpKdhNStjmvBl}vE^9YEPi
zesd)AbWo;i>lm;bwi9&&up92TD&|dY6RVz_x+64Oh*x7yLSQ9fu-U}
zA)wMRAI0S4UVfj%9)gWA*81)i4;rxolCsUAsC?9V`&&^-t>uwoNZiEU0O!Yz0!Q86
z4?7ifQ)7FTq;_;>qjyJj^%!EtlOgj8z`|s*bI_&Yeg{*FG3#gXm_t@4XW@2Wz3Z8@
z?dUsX_0Q?;oOl20Qyp&ZMfmy~v}!t#(a*kVx_!Fe+tn4^5bj-s4mr!e<#I%?_(n=t
zwh*p=j$)>OqgctkPQJ{3Rrl7zROR=5QIh&`i_HE^qQ`S~_LR^YXb@C8`KuF~w~v!s
zjk7m5f+AHKXmeU1|4^zMt7Fo|RN)Bura^>=zC6XRC!thZW&zTu0PvW#7l6VQ04QYU
zBJf#!4MY9wDA`QFU9z1ZVmo(tI3qan5^kKK2T*PvM3U2UfU>l~lnCKCda-3wd*f$I
z-$tlXSB0pn|668r1^0MN7f05GNb<+d{G62Vna->~faUC*ys{giv
zaj0zp-A^~>HXmPaZc!hae{=zP#y12%LJRiB_NE7U450A*nmC*4!T1s`=QH10qB;+)
zq|0=V3WlmQLl!EscaS1xAhoiE{;7!?K8Zc4;$nWwR&zYc>3iIlH0+32zb*kLEhC^c>#8rpOHR@INzYEacUJ&S4sIDmoByk+$x?_l#XUGFNH
zJ&X*RTAB84HqcbTvC+E9=tNp`y(+*?hd{>oT&d|f$hh^JYCWs-cZBH`oe*jIW#-HR
z3fbEZRsA+6i0DbHZVmUI`(bJ5J-c)LkV!yBs4Zucaht=;b?|c;UyKKO#efc$H2Vnq
zgR;Fryu#6LU0v{Y8p>#H;i<)L^bYmgILq%%JZ1o9IKE0WiCglI#4nOLyFq;hn27|V
z=LB;Uyk4}DI&?XLVv`F2E%?qv!k~?COjiDg_?-^l95HIFYNjc!G~=o-sg|R9*mWe^
z6qCQix^c9uV9_swfXkNBR?7KMe8QBSrVwuhIrLxNbN{u-j(lFgK8k>w{iKt|hqLg;
zoxfguLR1QMj25$T;$BBtEzM5GxK&yZAn|0$|LrAI3QDY)MUEl%Fae_9LJRJhpGB9QN31wfXA^F2cN^wKq^3@`>?l7f|cEk&MAS
zz{{iUL-TOt;M{5>&u)~4)o=SbD7E?JSr@iT=Ak;YA=>Tb{7jPU`Qr^2RQ
zc2XZpdLQQ%u)i*F8=g~>xQ%PEfm!rM+;>z36|f0JJhg39s||#{P897!Hz+*>dKfH=
zZSb&=;14g!zcK%13oZ-mLu~v@dbPOIGE?g)`1v*~HVtod;_6)CQ9#2*DRJxuaM)3p
zgTShl@n4b66^sY=VjB(=hHOyqQq(Dtjf=HJ2X`bPY{S)&{mlT7z^~a>&V#n9K!O8q
z^MZ>Hawlv}O^1vBPSm-fhY%@W#gW=^V9Dm<{?!EX7dR&wM-+m=mW2xe76rXYp!60G
zKfiadJ*Cyk0TX+)C%hDTM>7zIbFyQZu+EiUV*0C(B$y9lqS8_0@2F(XM(KGdO0Pij
zpZ)Z}s4v5&wVyz_SI*BuhRa-v-K6;BEbS+`zcX%;WMGi4Dqb@cH#zpR{P+F=?UPv+
za(Xj=rGRu=^+!J-DPXu|d+I+6AHQ3<8j@f2Q=qwnjNRQaen6)qta8>tv5UvNGP%*y
zwj?*6o9jas2yEzuuv5dKOZJf2jg3||5XV(fN9-38=rW`ClG~wRY5~a9fp@bi=nbOq
zh}b))ullHb2JJ$t2z
z9(+4V4{TFYu3H82G){Wic~2#^kD0Sjp~&3F%ErEv8O-87q|G`=am=nrM*xFR>6mSt
zu&0cJ(M?UCA0d%L&?`vp3@rEyq=yfHt0V{|rXl1{hNA7akK4TuQSWG;A;%0m!y?XI
zjhy?0XSqke->;+e;1YgiyJ3>9H3NDhjfHNz&-v3>ZO=PX)FG10yA^gR^ry{klE|7f
zk`5uU|K(A%P%-4YJ0f6YpxM61c{2bhHsG9`BjN`CCLg9^`nN4*)Lq4703&oENLQef
zvp1fXc_wfwS=mSAv@ZQerfHZ;As&OSz^cN=ASH1;)dCP|RK>=Qo!`%7;@<@?^f5iW
zgd$dlQW~1q@WaT!qW%1@>YaQebGsym|vPDTUfdlNoWuCC5OJo^~D#88Khna3Q^2
z;+4RN_gILYe&=vH87ij=9lYPzdff&jymr{k?aE1MX8{|q)Rxi8={AN9mg>S^xiOOi
z`4hre(YIqA5-Kx;K<15c=?BV2|IV2C(gwpZe0=U+ZuJ&amN!n8!bI1*aP=WNyyxZ%
zzbF`jd_x5!8()NoKkYa_ez3}J?Rfa?IRC8#ybr#W2U?D_S6SZXuS*4OMhPdZx5c4SyYffZz2AC?zz8*#el@ysX9qcfgH%%g?&!$Ovt5v0s}gewdMwe%sw
z`Z8Z2bVrKbcY_@bX{b{Znzf%?1l{QyDP5fY+2fv;RO4A|w)_U6;e
z@>9G0{FRXU)`2xBl9B{k>mXo`fZ4~LdOMa;$g%&)Zfv9?ZZ;{<%kG9|Qqkz)XuF6p
z&*r#?s9IkYcb){a`2{?;`Oj{(;Oq_K&uy@ee&NlYuE*UxZx|C@KyqK8p;w?6T7Ce7
z&LpE}EfcVNG)3JtAuLzgZZ$D{hWvpkeHM@B+fBQ)ZQ36=@g5m|5+VX;M}Y&7OG(Ce
zyH(0$Z9Sf*3eXF@6zdF98UO8%SIch~>E=aA-JiiJKngxe=S8j&XIe_<%{y93nc)_m
z?`7Ibt~si7av_%#$@VReH^t)mTinIXBe!&&NO-_S9x0w^iCdbkXMXu3k}!WQWEVC_
zTvH78NH7d`ZkqeI;e9Nv1s%PIVJrFKB@Zd9Pqf1App*9lX8ur(26J1RAx;PbAA3&u
z*DN({1TCRoj6>lUK|yQ$2`HBSLfL$THR~5W`2umOG%t(NoB^Ia%Ew`Ya*oW;bzCe1
zj>2X=p@dE