diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml index ce1761d3c..0645fb8b2 100644 --- a/.github/workflows/image_build_push_squid.yaml +++ b/.github/workflows/image_build_push_squid.yaml @@ -1,6 +1,7 @@ name: Build Squid images on: + workflow_dispatch: push: paths: - .github/workflows/image_build_push_squid.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82034495d..c3a384baa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ repos: - repo: git@github.com:Yelp/detect-secrets - rev: v1.4.0 + rev: v1.5.0 hooks: - id: detect-secrets args: ['--baseline', '.secrets.baseline'] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.5.0 + rev: v4.6.0 hooks: - id: no-commit-to-branch args: [--branch, develop, --branch, master, --pattern, release/.*] diff --git a/.secrets.baseline b/.secrets.baseline index 0c4eba0a8..ededd2dff 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,5 +1,5 @@ { - "version": "1.4.0", + "version": "1.5.0", "plugins_used": [ { "name": "ArtifactoryDetector" @@ -26,6 +26,9 @@ { "name": "GitHubTokenDetector" }, + { + "name": "GitLabTokenDetector" + }, { "name": "HexHighEntropyString", "limit": 3.0 @@ -36,6 +39,9 @@ { "name": "IbmCosHmacDetector" }, + { + "name": "IPPublicDetector" + }, { "name": "JwtTokenDetector" }, @@ -49,9 +55,15 @@ { "name": "NpmDetector" }, + { + "name": "OpenAIDetector" + }, { "name": "PrivateKeyDetector" }, + { + "name": "PypiTokenDetector" + }, { "name": "SendGridDetector" }, @@ -67,6 +79,9 @@ { "name": "StripeDetector" }, + { + "name": "TelegramBotTokenDetector" + }, { "name": "TwilioKeyDetector" } @@ -246,6 +261,15 @@ "line_number": 154 } ], + "files/lambda/test-security_alerts.py": [ + { + "type": "AWS Access Key", + "filename": "files/lambda/test-security_alerts.py", + "hashed_secret": "4e041fbfd5dd5918d3d5e968f5f739f815ae92da", + "is_verified": false, + "line_number": 5 + } + ], "files/scripts/psql-fips-fix.sh": [ { "type": "Secret Keyword", @@ -640,78 +664,6 @@ "line_number": 25 } ], - "gen3/test/terraformTest.sh": [ - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", - "is_verified": false, - "line_number": 156 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb", - "is_verified": false, - "line_number": 163 - }, - { - "type": "Base64 High Entropy String", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_verified": false, - "line_number": 172 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_verified": false, - "line_number": 172 - }, - { - "type": "Base64 High Entropy String", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_verified": false, - "line_number": 175 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_verified": false, - "line_number": 175 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768", - "is_verified": false, - "line_number": 311 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6", - "is_verified": false, - "line_number": 312 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4", - "is_verified": false, - "line_number": 313 - }, - { - "type": "Secret Keyword", - "filename": "gen3/test/terraformTest.sh", - "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598", - "is_verified": false, - "line_number": 314 - } - ], "kube/services/access-backend/access-backend-deploy.yaml": [ { "type": "Secret Keyword", @@ -745,49 +697,49 @@ "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 64 + "line_number": 65 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 67 + "line_number": 68 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 70 + "line_number": 71 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 77 + "line_number": 78 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 80 + "line_number": 81 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 83 + "line_number": 84 }, { "type": "Secret Keyword", "filename": "kube/services/arborist/arborist-deploy.yaml", "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 86 + "line_number": 87 } ], "kube/services/argo/workflows/fence-usersync-wf.yaml": [ @@ -858,7 +810,7 @@ "filename": "kube/services/audit-service/audit-service-deploy.yaml", "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc", "is_verified": false, - "line_number": 64 + "line_number": 65 } ], "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [ @@ -867,7 +819,7 @@ "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml", "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c", "is_verified": false, - "line_number": 46 + "line_number": 47 } ], "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [ @@ -965,7 +917,7 @@ "filename": "kube/services/dicom-server/dicom-server-deploy.yaml", "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb", "is_verified": false, - "line_number": 40 + "line_number": 41 } ], "kube/services/fence/fence-canary-deploy.yaml": [ @@ -1039,63 +991,63 @@ "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 71 + "line_number": 72 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 74 + "line_number": 75 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 77 + "line_number": 78 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 87 + "line_number": 88 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 90 + "line_number": 91 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 93 + "line_number": 94 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 96 + "line_number": 97 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 99 + "line_number": 100 }, { "type": "Secret Keyword", "filename": "kube/services/fence/fence-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 102 + "line_number": 103 } ], "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [ @@ -1241,28 +1193,28 @@ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 54 + "line_number": 55 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", "is_verified": false, - "line_number": 57 + "line_number": 58 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", "is_verified": false, - "line_number": 60 + "line_number": 61 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 66 + "line_number": 67 } ], "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [ @@ -1271,28 +1223,28 @@ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 54 + "line_number": 55 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", "is_verified": false, - "line_number": 57 + "line_number": 58 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", "is_verified": false, - "line_number": 60 + "line_number": 61 }, { "type": "Secret Keyword", "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 66 + "line_number": 67 } ], "kube/services/gdcapi/gdcapi-deploy.yaml": [ @@ -1398,14 +1350,14 @@ "filename": "kube/services/guppy/guppy-deploy.yaml", "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", "is_verified": false, - "line_number": 65 + "line_number": 66 }, { "type": "Secret Keyword", "filename": "kube/services/guppy/guppy-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 68 + "line_number": 69 } ], "kube/services/indexd/indexd-canary-deploy.yaml": [ @@ -1444,28 +1396,28 @@ "filename": "kube/services/indexd/indexd-deploy.yaml", "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 63 + "line_number": 64 }, { "type": "Secret Keyword", "filename": "kube/services/indexd/indexd-deploy.yaml", "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 66 + "line_number": 67 }, { "type": "Secret Keyword", "filename": "kube/services/indexd/indexd-deploy.yaml", "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", "is_verified": false, - "line_number": 72 + "line_number": 73 }, { "type": "Secret Keyword", "filename": "kube/services/indexd/indexd-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 75 + "line_number": 76 } ], "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [ @@ -1506,14 +1458,14 @@ "filename": "kube/services/jenkins/jenkins-deploy.yaml", "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", "is_verified": false, - "line_number": 157 + "line_number": 144 }, { "type": "Secret Keyword", "filename": "kube/services/jenkins/jenkins-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 160 + "line_number": 147 } ], "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [ @@ -1554,14 +1506,14 @@ "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", "is_verified": false, - "line_number": 153 + "line_number": 140 }, { "type": "Secret Keyword", "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 156 + "line_number": 143 } ], "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [ @@ -2870,21 +2822,21 @@ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78", "is_verified": false, - "line_number": 61 + "line_number": 62 }, { "type": "Secret Keyword", "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7", "is_verified": false, - "line_number": 64 + "line_number": 65 }, { "type": "Secret Keyword", "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 67 + "line_number": 68 } ], "kube/services/metadata/metadata-deploy.yaml": [ @@ -2893,14 +2845,14 @@ "filename": "kube/services/metadata/metadata-deploy.yaml", "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", "is_verified": false, - "line_number": 61 + "line_number": 62 }, { "type": "Secret Keyword", "filename": "kube/services/metadata/metadata-deploy.yaml", "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 66 + "line_number": 67 } ], "kube/services/monitoring/grafana-values.yaml": [ @@ -2982,28 +2934,28 @@ "filename": "kube/services/peregrine/peregrine-deploy.yaml", "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 67 + "line_number": 68 }, { "type": "Secret Keyword", "filename": "kube/services/peregrine/peregrine-deploy.yaml", "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 70 + "line_number": 71 }, { "type": "Secret Keyword", "filename": "kube/services/peregrine/peregrine-deploy.yaml", "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", "is_verified": false, - "line_number": 76 + "line_number": 77 }, { "type": "Secret Keyword", "filename": "kube/services/peregrine/peregrine-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 79 + "line_number": 80 } ], "kube/services/pidgin/pidgin-deploy.yaml": [ @@ -3028,28 +2980,28 @@ "filename": "kube/services/portal/portal-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 55 + "line_number": 56 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-deploy.yaml", "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 58 + "line_number": 59 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-deploy.yaml", "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 61 + "line_number": 62 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-deploy.yaml", "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 67 + "line_number": 68 } ], "kube/services/portal/portal-root-deploy.yaml": [ @@ -3058,28 +3010,28 @@ "filename": "kube/services/portal/portal-root-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 55 + "line_number": 56 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-root-deploy.yaml", "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 58 + "line_number": 59 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-root-deploy.yaml", "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 61 + "line_number": 62 }, { "type": "Secret Keyword", "filename": "kube/services/portal/portal-root-deploy.yaml", "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 67 + "line_number": 68 } ], "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [ @@ -3171,7 +3123,7 @@ "filename": "kube/services/requestor/requestor-deploy.yaml", "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b", "is_verified": false, - "line_number": 61 + "line_number": 62 } ], "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [ @@ -3223,21 +3175,21 @@ "filename": "kube/services/revproxy/revproxy-deploy.yaml", "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b", "is_verified": false, - "line_number": 74 + "line_number": 75 }, { "type": "Secret Keyword", "filename": "kube/services/revproxy/revproxy-deploy.yaml", "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c", "is_verified": false, - "line_number": 77 + "line_number": 78 }, { "type": "Secret Keyword", "filename": "kube/services/revproxy/revproxy-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 80 + "line_number": 81 } ], "kube/services/sftp/sftp-deploy.yaml": [ @@ -3285,28 +3237,28 @@ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 63 + "line_number": 64 }, { "type": "Secret Keyword", "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 66 + "line_number": 67 }, { "type": "Secret Keyword", "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 72 + "line_number": 73 }, { "type": "Secret Keyword", "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 75 + "line_number": 76 } ], "kube/services/shiny/shiny-deploy.yaml": [ @@ -3324,7 +3276,7 @@ "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml", "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d", "is_verified": false, - "line_number": 61 + "line_number": 62 } ], "kube/services/superset/superset-deploy.yaml": [ @@ -3415,7 +3367,7 @@ "filename": "kube/services/wts/wts-deploy.yaml", "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e", "is_verified": false, - "line_number": 65 + "line_number": 66 } ], "packer/buildAll.sh": [ @@ -3737,5 +3689,5 @@ } ] }, - "generated_at": "2024-03-07T21:26:14Z" + "generated_at": "2024-08-27T21:36:15Z" } diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 6eeb8f4fd..9401e6a4b 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -83,21 +83,21 @@ RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor apt-get install -y postgresql-client-13 # Copy sh script responsible for installing Python -COPY install-python3.8.sh /root/tmp/install-python3.8.sh +COPY install-python3.9.sh /root/tmp/install-python3.9.sh -# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python -RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ - bash /root/tmp/install-python3.8.sh && \ - rm -rf /root/tmp/install-python3.8.sh && \ +# Run the script responsible for installing Python 3.9.19 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ + bash /root/tmp/install-python3.9.sh && \ + rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ - ln -s /usr/local/bin/python3.8 /usr/bin/python3 + ln -s /usr/local/bin/python3.9 /usr/bin/python3 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install --upgrade pip setuptools && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade +RUN set -xe && python3.9 -m pip install --upgrade pip setuptools && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade && python3.9 -m pip install datadog --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ diff --git a/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh deleted file mode 100755 index a01d59420..000000000 --- a/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz -tar xf Python-3.8.0.tar.xz -rm Python-3.8.0.tar.xz -cd Python-3.8.0 -./configure -make -make altinstall diff --git a/Docker/jenkins/Jenkins-CI-Worker/install-python3.9.sh b/Docker/jenkins/Jenkins-CI-Worker/install-python3.9.sh new file mode 100755 index 000000000..88b7596ae --- /dev/null +++ b/Docker/jenkins/Jenkins-CI-Worker/install-python3.9.sh @@ -0,0 +1,8 @@ +#!/bin/bash +wget https://www.python.org/ftp/python/3.9.19/Python-3.9.19.tar.xz +tar xf Python-3.9.19.tar.xz +rm Python-3.9.19.tar.xz +cd Python-3.9.19 +./configure +make +make altinstall diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index 04ebe5864..aae48e7b7 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.426.3-lts-jdk21 +FROM jenkins/jenkins:2.462.1-lts-jdk21 USER root @@ -68,21 +68,21 @@ RUN DISTRO="$(lsb_release -c -s)" \ && rm -rf /var/lib/apt/lists/* # Copy sh script responsible for installing Python -COPY install-python3.8.sh /root/tmp/install-python3.8.sh +COPY install-python3.9.sh /root/tmp/install-python3.9.sh -# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python -RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ - ./root/tmp/install-python3.8.sh && \ - rm -rf /root/tmp/install-python3.8.sh && \ +# Run the script responsible for installing Python 3.9.19 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ + ./root/tmp/install-python3.9.sh && \ + rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ - ln -s /Python-3.8.0/python /usr/bin/python3 + ln -s /Python-3.9.0/python /usr/bin/python3 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade +RUN set -xe && python3.9 -m pip install --upgrade pip && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ diff --git a/Docker/jenkins/Jenkins/install-python3.8.sh b/Docker/jenkins/Jenkins/install-python3.8.sh deleted file mode 100755 index df21c66e5..000000000 --- a/Docker/jenkins/Jenkins/install-python3.8.sh +++ /dev/null @@ -1,7 +0,0 @@ -wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz -tar xf Python-3.8.0.tar.xz -rm Python-3.8.0.tar.xz -cd Python-3.8.0 -./configure -make -make altinstall diff --git a/Docker/jenkins/Jenkins/install-python3.9.sh b/Docker/jenkins/Jenkins/install-python3.9.sh new file mode 100755 index 000000000..83d7f17cd --- /dev/null +++ b/Docker/jenkins/Jenkins/install-python3.9.sh @@ -0,0 +1,7 @@ +wget https://www.python.org/ftp/python/3.9.19/Python-3.9.19.tar.xz +tar xf Python-3.9.19.tar.xz +rm Python-3.9.19.tar.xz +cd Python-3.9.19 +./configure +make +make altinstall diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index e6b73bc76..c4bf93dfa 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.426.3-lts-jdk21 +FROM jenkins/jenkins:2.462.1-lts-jdk21 USER root @@ -69,21 +69,21 @@ RUN DISTRO="$(lsb_release -c -s)" \ && rm -rf /var/lib/apt/lists/* # Copy sh script responsible for installing Python -COPY install-python3.8.sh /root/tmp/install-python3.8.sh +COPY install-python3.9.sh /root/tmp/install-python3.9.sh -# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python -RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ - ./root/tmp/install-python3.8.sh && \ - rm -rf /root/tmp/install-python3.8.sh && \ +# Run the script responsible for installing Python 3.9.19 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ + ./root/tmp/install-python3.9.sh && \ + rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ - ln -s /Python-3.8.0/python /usr/bin/python3 + ln -s /Python-3.9.19/python /usr/bin/python3 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade +RUN set -xe && python3.9 -m pip install --upgrade pip && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ diff --git a/Docker/jenkins/Jenkins2/install-python3.8.sh b/Docker/jenkins/Jenkins2/install-python3.8.sh deleted file mode 100755 index df21c66e5..000000000 --- a/Docker/jenkins/Jenkins2/install-python3.8.sh +++ /dev/null @@ -1,7 +0,0 @@ -wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz -tar xf Python-3.8.0.tar.xz -rm Python-3.8.0.tar.xz -cd Python-3.8.0 -./configure -make -make altinstall diff --git a/Docker/jenkins/Jenkins2/install-python3.9.sh b/Docker/jenkins/Jenkins2/install-python3.9.sh new file mode 100755 index 000000000..83d7f17cd --- /dev/null +++ b/Docker/jenkins/Jenkins2/install-python3.9.sh @@ -0,0 +1,7 @@ +wget https://www.python.org/ftp/python/3.9.19/Python-3.9.19.tar.xz +tar xf Python-3.9.19.tar.xz +rm Python-3.9.19.tar.xz +cd Python-3.9.19 +./configure +make +make altinstall diff --git a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile index 5134ce440..9b883b0ab 100644 --- a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile +++ b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14-alpine as build-deps +FROM golang:1.21.8-alpine as build-deps RUN apk update && apk add --no-cache git gcc curl bash diff --git a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile index 651bc1e7e..c4a934df5 100644 --- a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile +++ b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile @@ -1,6 +1,6 @@ # python2.7 microservice base image -FROM alpine:3.7 +FROM alpine:3.16.9 ENV DEBIAN_FRONTEND=noninteractive diff --git a/Docker/sidecar/Dockerfile b/Docker/sidecar/Dockerfile index ad784ba55..5e07ceaf4 100644 --- a/Docker/sidecar/Dockerfile +++ b/Docker/sidecar/Dockerfile @@ -1,4 +1,4 @@ -FROM nginx:1.15.6-alpine +FROM nginx:1-alpine COPY nginx.conf /etc/nginx/nginx.conf COPY uwsgi.conf.template /etc/nginx/gen3.conf.d/uwsgi.conf.template diff --git a/doc/README.md b/doc/README.md index c3c6602b3..4fc893935 100644 --- a/doc/README.md +++ b/doc/README.md @@ -85,7 +85,7 @@ For example - `gen3 help aws` opens `aws.md` * [utility vm](../tf_files/aws/modules/utility-vm/README.md) * [explorer infrastructure](https://github.com/uc-cdis/cdis-wiki/blob/master/dev/gen3/data_explorer/README.md) * [automation for gcp](../tf_files/gcp/commons/README.md) -* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/google_architecture.md) +* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/additional_documentation/google_architecture.md) * [authn and authz with fence](https://github.com/uc-cdis/fence/blob/master/README.md) * [jenkins](../kube/services/jenkins/README.md) * [jupyterhub configuration](../kube/services/jupyterhub/README.md) diff --git a/doc/dbbackup.md b/doc/dbbackup.md new file mode 100644 index 000000000..9e21f2bde --- /dev/null +++ b/doc/dbbackup.md @@ -0,0 +1,52 @@ +# TL;DR + +This script facilitates the management of database backup and restore within the Gen3 environment. It can establish policies, service accounts, roles, and S3 buckets. Depending on the command provided, it can initiate a database dump, perform a restore, migrate databases to a new RDS instance on Aurora, or clone databases to an RDS Aurora instance. + +## Usage + +```sh +gen3 dbbackup [dump|restore|va-dump|create-sa|migrate-to-aurora|copy-to-aurora] +``` + +### Commands + +#### dump + +Initiates a database dump and pushes it to an S3 bucket, creating the essential AWS resources if they are absent. The dump operation is intended to be executed from the namespace/commons that requires the backup. + +```sh +gen3 dbbackup dump +``` + +#### restore + +Initiates a database restore from an S3 bucket, creating the essential AWS resources if they are absent. The restore operation is meant to be executed in the target namespace where the backup needs to be restored. + +```sh +gen3 dbbackup restore +``` + +#### create-sa + +Creates the necessary service account and roles for DB copy. + +```sh +gen3 dbbackup create-sa +``` + +#### migrate-to-aurora + +Triggers a service account creation and a job to migrate a Gen3 commons to an AWS RDS Aurora instance. + +```sh +gen3 dbbackup migrate-to-aurora +``` + +#### copy-to-aurora + +Triggers a service account creation and a job to copy the databases Indexd, Sheepdog & Metadata to new databases within an RDS Aurora cluster from another namespace in same RDS cluster. + +```sh +gen3 dbbackup copy-to-aurora +``` + diff --git a/doc/gitops.md b/doc/gitops.md index 64c30597b..e860fb390 100644 --- a/doc/gitops.md +++ b/doc/gitops.md @@ -176,3 +176,10 @@ It takes a module as argument, like: vpc, eks. ``` gen3 gitops tfapply eks ``` + +### update-fence-cronjobs +Checks and updates the image for specific fence-related cronjobs (`fence-delete-expired-clients` and `fence-cleanup-expired-ga4gh-info`), if they do not match the image specified in the `manifest-versions` ConfigMap. + +``` +gen3 gitops update-fence-cronjobs +``` diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user index 4b35fecd9..fa891e516 100644 --- a/files/authorized_keys/squid_authorized_keys_user +++ b/files/authorized_keys/squid_authorized_keys_user @@ -17,6 +17,7 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDT5VxB1A2JOc3MurPSVH9U6x49PCZfaHgJD1FbKXgP ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC/lrIPuGPfQzkm2FRMFn/+8MAY5q9godnJxbLJCQ1aKwenXlENHqHDmq+lrP+4S5KGARpTxnIC+i4jR995pDFJmeHsgS6O2GdBBTNi7DznIlqEGd7K4GwsNsLTi06ueuLLIy4tdbRtCYTIlSte5VbyQ1/KfUjTBvj5qXChY/wOG1O766GR681UFN0qk5BPLtEUWIfJCgKuHicxd6eWVoLrYbSj9e1Wug6aJVjngm+ufqAH+yH5PImHo+r0jaj9TiGXzDACAVrW8WipKZ6YlTRs+RCkVmUWgf0+aWfSEcFtrSCM+UzeID5E3T7dTSeWXMDYJSF9rZqCKqh8AIbtt2lH6Ukz19u+nr3zhznOA5AhdgrXSAYQqtss1lptQRn4It0wTq/dmRytbIXOnu4osNmyCs1xAv+b2YHbS0R8SiSPzqkUd1Z8/qNBWrXiBmITh86xFHJy4Nj70n9ZBkSQvPEgEevGtO7BTgH4ziyMYKeunF8IoA8mR9s3iHSSzSsNaWP6ICkTj4CRJsLfx6R600s1Fukwo2CieBs0gV1x4wvsesRtpZY2aTDHTLjrSXD8ZnOCqOtUHCsto+S9gGKgWONcrb7ofe7u1R/F6er67nVnjnfeSCaRYU49GHB94A9UmxBJssExIjpaKaO5ZSPKzG4OrvXaagMyYxWxKHCedj9otw== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTpJ2l8nfOvhJ4Y3pjadFU69nfJBRuE0BaHE22LK9qflFWdhGW+T/x8Qy9406DFXCh6KED+q9lC+N4nR92AfgFNaBmkXZkzWLoXcqO1IWRexHwTqAUcrtLjpx5wNdCr3+vv9hWhXtvYg8ewnrZc+WxYde4EUmXbhzPXbg0SkBXTr6bpYhs6inyttfBeJNxbeydrW7cmhFiAdOkm03o3AXdH86PNlWVfVHy8OHHzf4fbvlJlOx7OeB+wOyQUr3DW+IWBLQFJk4uyagn/ECV9OIQpxoJFTQjcSrJ6v/GqlY5PImM6YxL8NlZu46CDIxagaZkum+iJ8dtPYr6tJuLiP5Ny0Gsl1X5DoKlstgyqqPNYTnZVS4GSS5Hyxm6HmodZ78OR5+vAoyWKZ3unXU5Dbkz0Qxq9VtrGo2xd0M+dDi/7YazRpLL0tc39w48Wl7KD3jFzoesZp1JHeEGLdGXlGCw8AM1FT0WDf28ShTRds6uWPGvMtM3XkVDPMLFwroKv1RCErmqLYod4HOMuwlmdRvtDGYb3NYsliOnHPiT9nhu2J6KmT1jj8uFOLyTaJCArtBqIsXscP3R4o0wBlQl3FniMdiK7ESkv8DUaOr1Co+/3wX9n/p/BW5bxuq1R9HpNyKsrALyNJUkquVT+5aPcNKXvmAeHAw/D0TYzy6ZKBpnDw== kyuleburton@Kyules-MacBook-Pro.local diff --git a/files/openvpn_management_scripts/create_ovpn.sh b/files/openvpn_management_scripts/create_ovpn.sh index 4e6ba7bf5..4d351464b 100755 --- a/files/openvpn_management_scripts/create_ovpn.sh +++ b/files/openvpn_management_scripts/create_ovpn.sh @@ -29,8 +29,8 @@ set -e set -u -USER_CERT_PATH="$KEY_PATH/$1.crt" -USER_KEY_PATH="$KEY_PATH/$1.key" +USER_CERT_PATH="$KEY_PATH/issued/$1.crt" +USER_KEY_PATH="$KEY_PATH/private/$1.key" #HEADER diff --git a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh index 1794a3b69..c7ac6ce3a 100755 --- a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh +++ b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh @@ -30,8 +30,8 @@ username=${username// /_} # now, clean out anything that's not alphanumeric or an underscore username=${username//[^a-zA-Z0-9_-.]/} -USER_CERT_PATH="$KEY_PATH/$1.crt" -USER_KEY_PATH="$KEY_PATH/$1.key" +USER_CERT_PATH="$KEY_PATH/issued/$1.crt" +USER_KEY_PATH="$KEY_PATH/private/$1.key" #make a temp dir TEMP_NAME="$username-$CLOUD_NAME-seperated" @@ -47,6 +47,7 @@ cp $USER_KEY_PATH $TEMP_DIR/client.key #This is because EXTHOST is a defined variable in the template while read r; do eval echo $r; done < $TEMPLATE_DIR/client_ovpn_seperate.settings >> $TEMP_DIR/${username}-${CLOUD_NAME}.ovpn +mkdir -p $KEY_DIR/ovpn_files_seperated tar -C $TEMP_DIR/../ -zcvf $KEY_DIR/ovpn_files_seperated/${username}-${CLOUD_NAME}-seperated.tgz $TEMP_NAME echo -e "Exiting ${BOLD}$_${CLEAR}" diff --git a/files/openvpn_management_scripts/create_vpn_user.sh b/files/openvpn_management_scripts/create_vpn_user.sh index 2f3ef406b..39be17fcb 100755 --- a/files/openvpn_management_scripts/create_vpn_user.sh +++ b/files/openvpn_management_scripts/create_vpn_user.sh @@ -49,13 +49,16 @@ export KEY_EMAIL=$email export KEY_ALTNAMES="DNS:${KEY_CN}" #This create the key's for the road warrior -echo -e "running ${YELLOW} build-batch-key" -build-key-batch $username &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +echo -e "running ${YELLOW} easyrsa build-client-full" +( + cd $EASYRSA_PATH + easyrsa build-client-full $username nopass &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +) #&& echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR -echo "Backup certs so we can revoke them if ever needed" -[ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/ -cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +# echo "Backup certs so we can revoke them if ever needed" +# [ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/ +# cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR echo "Create the OVPN file for $username" $VPN_BIN_ROOT/create_ovpn.sh $KEY_CN $KEY_EMAIL > $KEY_DIR/ovpn_files/${username}-${CLOUD_NAME}.ovpn 2> /dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR diff --git a/files/openvpn_management_scripts/install_ovpn.sh b/files/openvpn_management_scripts/install_ovpn.sh index 795ac17f2..180d0274c 100644 --- a/files/openvpn_management_scripts/install_ovpn.sh +++ b/files/openvpn_management_scripts/install_ovpn.sh @@ -12,13 +12,13 @@ VARS_PATH="$EASYRSA_PATH/vars" #EASY-RSA Vars - KEY_SIZE=4096 - COUNTRY="US" - STATE="IL" - CITY="Chicago" - ORG="CDIS" - EMAIL='support\@datacommons.io' - KEY_EXPIRE=365 +KEY_SIZE=4096 +COUNTRY="US" +STATE="IL" +CITY="Chicago" +ORG="CDIS" +EMAIL='support\@gen3.org' +KEY_EXPIRE=365 #OpenVPN diff --git a/files/openvpn_management_scripts/reset_totp_token.sh b/files/openvpn_management_scripts/reset_totp_token.sh index b844af8f2..e937876a2 100755 --- a/files/openvpn_management_scripts/reset_totp_token.sh +++ b/files/openvpn_management_scripts/reset_totp_token.sh @@ -40,11 +40,15 @@ update_password_file() { } generate_qr_code() { - uuid=$(uuidgen) - qrcode_out=/var/www/qrcode/${uuid}.svg + mkdir -p /etc/openvpn/pki/qrcodes + qrcode_out=/etc/openvpn/pki/qrcodes/${vpn_username}.png string=$( python -c "import pyotp; print( pyotp.totp.TOTP('$totp_secret').provisioning_uri('$vpn_username', issuer_name='$CLOUD_NAME') )" ) - $( python -c "import pyqrcode; pyqrcode.create('$string').svg('${qrcode_out}', scale=8)" ) - vpn_creds_url="https://${FQDN}/$uuid.svg" + $( python -c "import qrcode; qrcode.make('$string').save('${qrcode_out}')" ) + # vpn_creds_url="https://${FQDN}/$uuid.svg" + s3Path="s3://${S3BUCKET}/qrcodes/${vpn_username}.png" + aws s3 cp ${qrcode_out} ${s3Path} + signedUrl="$(aws s3 presign "$s3Path" --expires-in "$((60*60*48))")" + vpn_creds_url=${signedUrl} } print_info() { diff --git a/files/openvpn_management_scripts/revoke_user.sh b/files/openvpn_management_scripts/revoke_user.sh index 0ffe5c364..89d102f38 100755 --- a/files/openvpn_management_scripts/revoke_user.sh +++ b/files/openvpn_management_scripts/revoke_user.sh @@ -25,18 +25,15 @@ set -e username=${1} -#Source the settings for EASY RSA -source $EASYRSA_PATH/vars #Override exports export KEY_CN=$username -set +e -#revoke-full $username || echo -e "${RED}${BOLD}${BLINK}FAILED TO REVOKE ${username}${CLEAR}" -revoke-full $username -#Apparently it doesn't exist like I expected, and says failed even when it succeeded. - -set -e +( + cd $EASYRSA_PATH + ./easyrsa revoke $username + ./easyrsa gen-crl +) sed -i "/${username},/d" $USER_PW_FILE || echo -e "${RED}${BOLD}${BLINK}Failed to remove $username from file ${USER_PW_FILE}${CLEAR}" /etc/openvpn/bin/push_to_s3.sh diff --git a/files/openvpn_management_scripts/send_email.sh b/files/openvpn_management_scripts/send_email.sh index 38ec6651a..0686af206 100755 --- a/files/openvpn_management_scripts/send_email.sh +++ b/files/openvpn_management_scripts/send_email.sh @@ -14,7 +14,7 @@ RED="\033[31m" echo -e "Entering ${BOLD}$_${CLEAR}" -S3BUCKET=WHICHVPN +export S3BUCKET=WHICHVPN if [ "${1}" == "" ] then diff --git a/files/openvpn_management_scripts/templates/network_tweaks.sh.template b/files/openvpn_management_scripts/templates/network_tweaks.sh.template index a137a8c6f..1caa8c36a 100644 --- a/files/openvpn_management_scripts/templates/network_tweaks.sh.template +++ b/files/openvpn_management_scripts/templates/network_tweaks.sh.template @@ -14,3 +14,5 @@ iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT # Masquerade iptables -t nat -A POSTROUTING -s #VPN_SUBNET# -d #VM_SUBNET# -o $vpnserver_int -j MASQUERADE echo 1 > /proc/sys/net/ipv4/ip_forward + +service iptables save diff --git a/files/openvpn_management_scripts/templates/openvpn.conf.template b/files/openvpn_management_scripts/templates/openvpn.conf.template index d539015fe..7e692113e 100644 --- a/files/openvpn_management_scripts/templates/openvpn.conf.template +++ b/files/openvpn_management_scripts/templates/openvpn.conf.template @@ -10,16 +10,16 @@ persist-key persist-tun #certificates -ca easy-rsa/keys/ca.crt -cert easy-rsa/keys/#FQDN#.crt -key easy-rsa/keys/#FQDN#.key # This file should be kept secret -dh easy-rsa/keys/dh4096.pem -tls-auth easy-rsa/keys/ta.key 0 # This file is secret -crl-verify easy-rsa/keys/crl.pem # Revocation files +ca /etc/openvpn/easy-rsa/pki/ca.crt +cert /etc/openvpn/easy-rsa/pki/issued/#FQDN#.crt +key /etc/openvpn/easy-rsa/pki/private/#FQDN#.key # This file should be kept secret +dh /etc/openvpn/easy-rsa/pki/dh.pem +tls-auth /etc/openvpn/easy-rsa/pki/ta.key 0 # This file is secret +crl-verify /etc/openvpn/easy-rsa/pki/crl.pem # Revocation files #Password script -auth-user-pass-verify bin/auth-user-pass-verify.sh via-env -script-security 3 execve +auth-user-pass-verify /etc/openvpn/bin/auth-user-pass-verify.sh via-env +script-security 3 # execve #Cipher suite cipher AES-256-CBC diff --git a/files/openvpn_management_scripts/templates/settings.sh.template b/files/openvpn_management_scripts/templates/settings.sh.template index 2d5f46ef6..c58e8b98c 100644 --- a/files/openvpn_management_scripts/templates/settings.sh.template +++ b/files/openvpn_management_scripts/templates/settings.sh.template @@ -1,6 +1,7 @@ export VPN_SETTINGS_LOADED="1" export CLOUD_NAME='#CLOUD_NAME#' export FQDN="#FQDN#" +export EXTHOST='#CLOUD_NAME#.planx-pla.net' ## EXTHOST is set in the easy-rsa/vars env settings. I think these values have to maych so removing from here #sendemail vars @@ -28,7 +29,7 @@ export OPENVPN_MY_BIN="/etc/openvpn/bin" #CDIS OpenVPN scripts contants export TEMPLATE_DIR="/etc/openvpn/bin/templates" -export KEY_PATH="/etc/openvpn/easy-rsa/keys/" +export KEY_PATH="/etc/openvpn/easy-rsa/pki/" export CA_PATH="$KEY_PATH/ca.crt" export TA_KEY_PATH="$KEY_PATH/ta.key" export ARCHIVE_CERT_DIR="$KEY_DIR/user_certs/" @@ -37,6 +38,6 @@ export USER_PW_FILE="/etc/openvpn/user_passwd.csv" export VPN_BIN_ROOT="/etc/openvpn/bin" export VPN_USER_CSV="/etc/openvpn/user_passwd.csv" export VPN_FILE_ATTACHMENTS="-a$VPN_BIN_ROOT/OpenVPN_for_PLANX_Installation_Guide.pdf" - +export KEY_DIR="$EASYRSA_PATH/pki" export PATH=$PATH:$EASYRSA_PATH:$OPENVPN_MY_BIN source /etc/openvpn/bin/.venv/bin/activate diff --git a/files/openvpn_management_scripts/templates/vars.template b/files/openvpn_management_scripts/templates/vars.template index 0afa0c554..311f05605 100644 --- a/files/openvpn_management_scripts/templates/vars.template +++ b/files/openvpn_management_scripts/templates/vars.template @@ -1,81 +1,25 @@ -# easy-rsa parameter settings -export EXTHOST="#EXTHOST#" +# EasyRSA 3 vars file -# NOTE: If you installed from an RPM, -# don't edit this file in place in -# /usr/share/openvpn/easy-rsa -- -# instead, you should copy the whole -# easy-rsa directory to another location -# (such as /etc/openvpn) so that your -# edits will not be wiped out by a future -# OpenVPN package upgrade. +# This is a user-customized vars file for EasyRSA 3. +# Adjust these values to suit your needs. -# This variable should point to -# the top level of the easy-rsa -# tree. -export EASY_RSA="#EASY_RSA_DIR#" +# Key Size - Increase to 2048 if you are paranoid. This affects performance. +set_var EASYRSA_KEY_SIZE #KEY_SIZE# -# -# This variable should point to -# the requested executables -# -export OPENSSL="openssl" -export PKCS11TOOL="pkcs11-tool" -export GREP="grep" +# CA and Certificate Expiry - Set these to your desired expiry in days +set_var EASYRSA_CA_EXPIRE 3650 +set_var EASYRSA_CERT_EXPIRE #KEY_EXPIRE# +# Fields for the request Distinguished Name (DN) +# Adjust these to match your organization's information +set_var EASYRSA_REQ_COUNTRY "#COUNTRY#" +set_var EASYRSA_REQ_PROVINCE "#STATE#" +set_var EASYRSA_REQ_CITY "#CITY#" +set_var EASYRSA_REQ_ORG "#ORG#" +set_var EASYRSA_REQ_EMAIL "#EMAIL#" +set_var EASYRSA_REQ_OU "#OU#" -# This variable should point to -# the openssl.cnf file included -# with easy-rsa. -export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA` -# Edit this variable to point to -# your soon-to-be-created key -# directory. -# -# WARNING: clean-all will do -# a rm -rf on this directory -# so make sure you define -# it correctly! -export KEY_DIR="$EASY_RSA/keys" +set_var EASYRSA_BATCH "1" -# Issue rm -rf warning -echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR - -# PKCS11 fixes -export PKCS11_MODULE_PATH="dummy" -export PKCS11_PIN="dummy" - -# Increase this to 2048 if you -# are paranoid. This will slow -# down TLS negotiation performance -# as well as the one-time DH parms -# generation process. -export KEY_SIZE=#KEY_SIZE# - -# In how many days should the root CA key expire? -export CA_EXPIRE=3650 - -# In how many days should certificates expire? -export KEY_EXPIRE=#KEY_EXPIRE# - -# These are the default values for fields -# which will be placed in the certificate. -# Don't leave any of these fields blank. -export KEY_COUNTRY="#COUNTRY#" -export KEY_PROVINCE="#STATE#" -export KEY_CITY="#CITY#" -export KEY_ORG="#ORG#" -export KEY_EMAIL="#EMAIL#" -export KEY_OU="#OU#" - -# X509 Subject Field -export KEY_NAME="#KEY_NAME#" - -# PKCS11 Smart Card -# export PKCS11_MODULE_PATH="/usr/lib/changeme.so" -# export PKCS11_PIN=1234 - -# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below -# You will also need to make sure your OpenVPN server config has the duplicate-cn option set -# export KEY_CN="CommonName" +# Note: Do not leave any of the fields blank as it may cause the script to fail. diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 362cfbfd5..5c0936867 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -35,7 +35,6 @@ jenkins-niaid EOF cat - > jenkins-envs-releases.txt < /dev/null; then + echo "jq could not be found. Please install jq to run this script." + exit 1 +fi + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +# Backup the $HOME/Gen3Secrets directory +backup_dir="$HOME/Gen3Secrets-$(date +%Y%m%d%H%M%S)" +cp -r "$HOME/Gen3Secrets" "$backup_dir" +echo "Backup of Gen3Secrets created at $backup_dir" + +# Function to update JSON file +update_json_config() { + local file_path=$1 + local service=$2 + local db_host=$3 + local db_username=$4 + local db_database=$5 + + echo "Updating JSON config for service: $service" + echo "File path: $file_path" + echo "db_host: $db_host" + echo "db_username: $db_username" + echo "db_database: $db_database" + + if [[ -f $file_path ]]; then + local tmp_file + tmp_file=$(mktemp) + + if [[ $service == "fence" || $service == "userapi" ]]; then + jq --arg db_host "$db_host" --arg db_username "$db_username" --arg db_database "$db_database" \ + '(.fence.db_host = $db_host) | (.fence.db_username = $db_username) | (.fence.db_database = $db_database) | + (.fence.fence_database = $db_database) | + (.userapi.db_host = $db_host) | (.userapi.db_username = $db_username) | (.userapi.db_database = $db_database) | + (.userapi.fence_database = $db_database) | + (.sheepdog.fence_host = $db_host) | (.sheepdog.fence_username = $db_username) | (.sheepdog.fence_database = $db_database) | + (.gdcapi.fence_host = $db_host) | (.gdcapi.fence_username = $db_username) | (.gdcapi.fence_database = $db_database) | + (.peregrine.fence_host = $db_host) | (.peregrine.fence_username = $db_username) | (.peregrine.fence_database = $db_database)' \ + "$file_path" > "$tmp_file" && mv "$tmp_file" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(jq -r '.fence.db_host' "$file_path") + updated_username=$(jq -r '.fence.db_username' "$file_path") + updated_database=$(jq -r '.fence.db_database' "$file_path") + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated JSON config for service: $service successfully." + else + gen3_log_err "Failed to update JSON config for service: $service." + fi + + elif [[ $service == "sheepdog" || $service == "gdcapi" ]]; then + jq --arg db_host "$db_host" --arg db_username "$db_username" --arg db_database "$db_database" \ + '(.sheepdog.db_host = $db_host) | (.sheepdog.db_username = $db_username) | (.sheepdog.db_database = $db_database) | + (.gdcapi.db_host = $db_host) | (.gdcapi.db_username = $db_username) | (.gdcapi.db_database = $db_database)' \ + "$file_path" > "$tmp_file" && mv "$tmp_file" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(jq -r '.sheepdog.db_host' "$file_path") + updated_username=$(jq -r '.sheepdog.db_username' "$file_path") + updated_database=$(jq -r '.sheepdog.db_database' "$file_path") + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated JSON config for service: $service successfully." + else + gen3_log_err "Failed to update JSON config for service: $service." + fi + + elif [[ $service == "indexd" ]]; then + jq --arg db_host "$db_host" --arg db_username "$db_username" --arg db_database "$db_database" \ + '(.indexd.db_host = $db_host) | (.indexd.db_username = $db_username) | (.indexd.db_database = $db_database)' \ + "$file_path" > "$tmp_file" && mv "$tmp_file" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(jq -r '.indexd.db_host' "$file_path") + updated_username=$(jq -r '.indexd.db_username' "$file_path") + updated_database=$(jq -r '.indexd.db_database' "$file_path") + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated JSON config for service: $service successfully." + else + gen3_log_err "Failed to update JSON config for service: $service." + fi + + elif [[ $service == "peregrine" ]]; then + jq --arg db_host "$db_host" --arg db_username "$db_username" --arg db_database "$db_database" \ + '(.peregrine.db_host = $db_host) | (.peregrine.db_username = $db_username) | (.peregrine.db_database = $db_database)' \ + "$file_path" > "$tmp_file" && mv "$tmp_file" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(jq -r '.peregrine.db_host' "$file_path") + updated_username=$(jq -r '.peregrine.db_username' "$file_path") + updated_database=$(jq -r '.peregrine.db_database' "$file_path") + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated JSON config for service: $service successfully." + else + gen3_log_err "Failed to update JSON config for service: $service." + fi + + else + jq --arg db_host "$db_host" --arg db_username "$db_username" --arg db_database "$db_database" \ + '(.db_host = $db_host) | (.db_username = $db_username) | (.db_database = $db_database)' \ + "$file_path" > "$tmp_file" && mv "$tmp_file" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(jq -r '.db_host' "$file_path") + updated_username=$(jq -r '.db_username' "$file_path") + updated_database=$(jq -r '.db_database' "$file_path") + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated JSON config for service: $service successfully." + else + gen3_log_err "Failed to update JSON config for service: $service." + fi + fi + else + echo "File $file_path does not exist." + fi +} + +# Function to update other files +update_other_files() { + local file_path=$1 + local db_host=$2 + local db_username=$3 + local db_database=$4 + + echo "Updating other files at $file_path" + echo "db_host: $db_host" + echo "db_username: $db_username" + echo "db_database: $db_database" + + if [[ -f $file_path ]]; then + if [[ "$file_path" == *".env" ]]; then + sed -i "s|DB_HOST=.*|DB_HOST=$db_host|" "$file_path" + sed -i "s|DB_USER=.*|DB_USER=$db_username|" "$file_path" + sed -i "s|DB_DATABASE=.*|DB_DATABASE=$db_database|" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(grep 'DB_HOST=' "$file_path" | cut -d'=' -f2) + updated_username=$(grep 'DB_USER=' "$file_path" | cut -d'=' -f2) + updated_database=$(grep 'DB_DATABASE=' "$file_path" | cut -d'=' -f2) + else + sed -i "s|DB_HOST:.*|DB_HOST: $db_host|" "$file_path" + sed -i "s|DB_USER:.*|DB_USER: $db_username|" "$file_path" + sed -i "s|DB_DATABASE:.*|DB_DATABASE: $db_database|" "$file_path" + + # Verify the update + local updated_host updated_username updated_database + updated_host=$(grep 'DB_HOST:' "$file_path" | cut -d':' -f2 | xargs) + updated_username=$(grep 'DB_USER:' "$file_path" | cut -d':' -f2 | xargs) + updated_database=$(grep 'DB_DATABASE:' "$file_path" | cut -d':' -f2 | xargs) + fi + + if [[ "$updated_host" == "$db_host" && "$updated_username" == "$db_username" && "$updated_database" == "$db_database" ]]; then + gen3_log_info "Updated file at $file_path successfully." + else + gen3_log_err "Failed to update file at $file_path." + fi + else + echo "File $file_path does not exist." + fi +} + +# Function to update fence-config.yaml +update_fence_config() { + local creds_json_path="$HOME/Gen3Secrets/creds.json" + local file_path=$1 + local db_host=$2 + local db_username=$3 + local db_database=$4 + + echo "Updating fence-config.yaml at $file_path" + echo "db_host: $db_host" + echo "db_username: $db_username" + echo "db_database: $db_database" + + if [[ -f $file_path ]]; then + local current_password + current_password=$(jq -r '.fence.db_password' "$creds_json_path") + + sed -i "s|DB: postgresql://.*:.*@.*:5432/.*|DB: postgresql://$db_username:$current_password@$db_host:5432/$db_database|" "$file_path" + + # Verify the update + local updated_entry + updated_entry=$(grep 'DB: postgresql://' "$file_path") + if [[ "$updated_entry" == *"$db_host"* && "$updated_entry" == *"$db_username"* && "$updated_entry" == *"$db_database"* ]]; then + gen3_log_info "Updated fence-config.yaml at $file_path successfully." + else + gen3_log_err "Failed to update fence-config.yaml at $file_path." + fi + else + echo "File $file_path does not exist." + fi +} + +# Function to parse the migration file and apply updates +parse_and_update() { + local migration_file=$1 + local creds_json_path="$HOME/Gen3Secrets/creds.json" + local namespace + namespace=$(gen3 db namespace) + local new_db_host + new_db_host=$(grep "INFO" "$migration_file" | awk '{print $8}') + + gen3_log_info "New db_host identified: $new_db_host" + while read -r line; do + if [[ $line == Source_Database* || $line == User* ]]; then + echo "Processing line: $line" + + IFS=' ' read -r -a parts <<< "$line" + local db_host="$new_db_host" + local db_username + local db_database + + if [[ $line == Source_Database* ]]; then + db_username="${parts[9]}" + echo "db_username='${parts[9]}'" + db_database="${parts[7]}" + echo "db_database='${parts[7]}'" + elif [[ $line == User* ]]; then + db_username="${parts[1]}" + echo "db_username='${parts[1]}'" + db_database="${parts[7]}" + echo "db_database='${parts[7]}'" + else + continue + fi + + # Extract the service name from db_username + if [[ $db_username =~ ^([a-zA-Z]+)_user_ ]]; then + local service="${BASH_REMATCH[1]}" + else + echo "Skipping line: $line due to improper db_username format" + continue + fi + + gen3_log_info "Updating service: $service with db_username: $db_username and db_database: $db_database" + + # Update specific config files for each service + case $service in + arborist) + update_json_config "$HOME/Gen3Secrets/g3auto/arborist/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + ;; + audit) + update_json_config "$HOME/Gen3Secrets/g3auto/audit/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + update_other_files "$HOME/Gen3Secrets/g3auto/audit/audit-service-config.yaml" "$db_host" "$db_username" "$db_database" + ;; + metadata) + update_json_config "$HOME/Gen3Secrets/g3auto/metadata/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + update_other_files "$HOME/Gen3Secrets/g3auto/metadata/metadata.env" "$db_host" "$db_username" "$db_database" + ;; + ohdsi) + update_json_config "$HOME/Gen3Secrets/g3auto/ohdsi/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + ;; + orthanc) + update_json_config "$HOME/Gen3Secrets/g3auto/orthanc/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + ;; + requestor) + update_json_config "$HOME/Gen3Secrets/g3auto/requestor/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + update_other_files "$HOME/Gen3Secrets/g3auto/requestor/requestor-config.yaml" "$db_host" "$db_username" "$db_database" + ;; + wts) + update_json_config "$HOME/Gen3Secrets/g3auto/wts/dbcreds.json" "$service" "$db_host" "$db_username" "$db_database" + ;; + fence) + update_fence_config "$HOME/Gen3Secrets/apis_configs/fence-config.yaml" "$db_host" "$db_username" "$db_database" + update_json_config "$creds_json_path" "$service" "$db_host" "$db_username" "$db_database" + ;; + sheepdog | peregrine | indexd) + update_json_config "$creds_json_path" "$service" "$db_host" "$db_username" "$db_database" + ;; + esac + fi + done < "$migration_file" +} + +# Run the script +parse_and_update "migration.txt" diff --git a/files/scripts/ecr-access-job.md b/files/scripts/ecr-access-job.md index 9659b186b..5f8dff767 100644 --- a/files/scripts/ecr-access-job.md +++ b/files/scripts/ecr-access-job.md @@ -59,7 +59,7 @@ Trust policy (allows Acct2): } ``` -- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`): +- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-cronjob.sh`): ``` { "Version": "2012-10-17", diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index e0c4b3c46..0e7cf8ef3 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,6 +1,5 @@ import argparse import copy -import json import sys import requests import pydash @@ -50,6 +49,50 @@ "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", } +CLINICAL_TRIALS_GOV_FIELDS = [ + "NCTId", + "OfficialTitle", + "BriefTitle", + "Acronym", + "StudyType", + "OverallStatus", + "StartDate", + "StartDateType", + "CompletionDate", + "CompletionDateType", + "IsFDARegulatedDrug", + "IsFDARegulatedDevice", + "IsPPSD", + "BriefSummary", + "DetailedDescription", + "Condition", + "DesignAllocation", + "DesignPrimaryPurpose", + "Phase", + "DesignInterventionModel", + "EnrollmentCount", + "EnrollmentType", + "DesignObservationalModel", + "InterventionType", + "PrimaryOutcomeMeasure", + "SecondaryOutcomeMeasure", + "OtherOutcomeMeasure", + "Gender", + "GenderBased", + "MaximumAge", + "MinimumAge", + "IPDSharing", + "IPDSharingTimeFrame", + "IPDSharingAccessCriteria", + "IPDSharingURL", + "SeeAlsoLinkURL", + "AvailIPDURL", + "AvailIPDId", + "AvailIPDComment", + "PatientRegistry", + "DesignTimePerspective", +] + def is_valid_uuid(uuid_to_test, version=4): """ @@ -74,7 +117,13 @@ def is_valid_uuid(uuid_to_test, version=4): def update_filter_metadata(metadata_to_update): - filter_metadata = [] + # Retain these from existing filters + save_filters = ["Common Data Elements"] + filter_metadata = [ + filter + for filter in metadata_to_update["advSearchFilters"] + if filter["key"] in save_filters + ] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): filter_field_values = pydash.get(metadata_to_update, metadata_field_key) if filter_field_values: @@ -97,7 +146,12 @@ def update_filter_metadata(metadata_to_update): filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata # Retain these from existing tags - save_tags = ["Data Repository"] + save_tags = [ + "Data Repository", + "Common Data Elements", + "RequiredIDP", + "Additional Acknowledgement", + ] tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags] # Add any new tags from advSearchFilters for f in metadata_to_update["advSearchFilters"]: @@ -112,7 +166,7 @@ def update_filter_metadata(metadata_to_update): def get_client_token(client_id: str, client_secret: str): try: - token_url = f"http://revproxy-service/user/oauth2/token" + token_url = "http://fence-service/oauth2/token" headers = {"Content-Type": "application/x-www-form-urlencoded"} params = {"grant_type": "client_credentials"} data = "scope=openid user data" @@ -135,7 +189,7 @@ def get_related_studies(serial_num, guid, hostname): if serial_num: mds = requests.get( - f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000" + f"http://metadata-service/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000" ) if mds.status_code == 200: related_study_metadata = mds.json() @@ -164,6 +218,21 @@ def get_related_studies(serial_num, guid, hostname): return related_study_result +def get_clinical_trials_gov_metadata(nct_id): + if not nct_id: + return None + ct_metadata = {} + try: + ct_metadata_result = requests.get(f"https://clinicaltrials.gov/api/v2/studies/{nct_id}?fields={'|'.join(CLINICAL_TRIALS_GOV_FIELDS)}") + if ct_metadata_result.status_code != 200: + raise Exception(f"Could not get clinicaltrials.gov metadata, error code {ct_metadata_result.status_code}") + else: + ct_metadata = ct_metadata_result.json() + except Exception as exc: + raise Exception(f"Could not get clinicaltrials.gov metadata: {exc}") from exc + return ct_metadata + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") @@ -210,7 +279,7 @@ def get_related_studies(serial_num, guid, hostname): # Get the metadata from cedar to register print("Querying CEDAR...") cedar = requests.get( - f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", + f"http://cedar-wrapper-service/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", headers=token_header, ) @@ -227,24 +296,27 @@ def get_related_studies(serial_num, guid, hostname): returned_records = len(metadata_return["metadata"]["records"]) print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: - # get the appl id from cedar for querying in our MDS - cedar_appl_id = pydash.get( - cedar_record, "metadata_location.nih_application_id" + # get the CEDAR instance id from cedar for querying in our MDS + cedar_instance_id = pydash.get( + cedar_record, + "metadata_location.cedar_study_level_metadata_template_instance_ID", ) - if cedar_appl_id is None: - print("This record doesn't have appl_id, skipping...") + if cedar_instance_id is None: + print("This record doesn't have CEDAR instance id, skipping...") continue - # Get the metadata record for the nih_application_id + # Get the metadata record for the CEDAR instance id mds = requests.get( - f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true" + f"http://metadata-service/metadata?gen3_discovery.study_metadata.metadata_location.cedar_study_level_metadata_template_instance_ID={cedar_instance_id}&data=true" ) if mds.status_code == 200: mds_res = mds.json() # the query result key is the record of the metadata. If it doesn't return anything then our query failed. if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: - print("Query returned nothing for", cedar_appl_id, "appl id") + print( + f"Query returned nothing for template_instance_ID={cedar_instance_id}&data=true" + ) continue # get the key for our mds record @@ -253,7 +325,6 @@ def get_related_studies(serial_num, guid, hostname): mds_res = mds_res[mds_record_guid] mds_cedar_register_data_body = {**mds_res} mds_discovery_data_body = {} - mds_clinical_trials = {} if mds_res["_guid_type"] == "discovery_metadata": print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": @@ -266,15 +337,17 @@ def get_related_studies(serial_num, guid, hostname): ) continue - if "clinicaltrials_gov" in cedar_record: - mds_clinical_trials = cedar_record["clinicaltrials_gov"] - del cedar_record["clinicaltrials_gov"] - # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values cedar_record_other_study_websites = cedar_record.get( "metadata_location", {} ).get("other_study_websites", []) + # this ensures the nih_application_id, cedar_study_level_metadata_template_instance_ID and study_name are not alterable from CEDAR side del cedar_record["metadata_location"] + cedar_record["minimal_info"]["study_name"] = ( + mds_res["gen3_discovery"]["study_metadata"] + .get("minimal_info", {}) + .get("study_name", "") + ) mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ @@ -282,7 +355,7 @@ def get_related_studies(serial_num, guid, hostname): ] = cedar_record_other_study_websites # setup citations - doi_citation = mds_res["gen3_discovery"]["study_metadata"].get( + doi_citation = mds_res["gen3_discovery"].get( "doi_citation", "" ) mds_res["gen3_discovery"]["study_metadata"]["citation"][ @@ -311,11 +384,9 @@ def get_related_studies(serial_num, guid, hostname): repository.update( {"repository_study_link": repository_study_link} ) - if ( - repository_citation_additional_text - not in repository_citation - ): - repository_citation += repository_citation_additional_text + if (repository.get("repository_study_link", None) and repository_citation_additional_text + not in repository_citation): + repository_citation += repository_citation_additional_text if len(data_repositories): data_repositories[0] = { **data_repositories[0], @@ -343,7 +414,9 @@ def get_related_studies(serial_num, guid, hostname): related_study_result = get_related_studies( serial_num, mds_record_guid, hostname ) - mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result) + mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy( + related_study_result + ) # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] @@ -358,18 +431,34 @@ def get_related_studies(serial_num, guid, hostname): mds_res["gen3_discovery"] ) + clinical_trials_id = None + try: + clinical_trials_id = ( + mds_res["gen3_discovery"]["study_metadata"] + .get("metadata_location", {}) + .get("clinical_trials_study_ID", "") + ) + except Exception: + print("Unable to get clinical_trials_study_ID for study") + if clinical_trials_id: + try: + ct_gov_metadata = get_clinical_trials_gov_metadata(clinical_trials_id) + if ct_gov_metadata: + print(f"Got clinicaltrials.gov metadata for {mds_record_guid} with NCT ID {clinical_trials_id}") + mds_cedar_register_data_body["clinicaltrials_gov"] = copy.deepcopy(ct_gov_metadata) + except Exception as ex: + print(f'{ex}') + # This means the old clinicaltrials_gov section is actually from CEDAR not clinicaltrials.gov, so remove it + elif "clinicaltrials_gov" in mds_cedar_register_data_body: + del mds_cedar_register_data_body["clinicaltrials_gov"] + mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body - if mds_clinical_trials: - mds_cedar_register_data_body["clinicaltrials_gov"] = { - **mds_cedar_register_data_body.get("clinicaltrials_gov", {}), - **mds_clinical_trials, - } mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" print(f"Metadata {mds_record_guid} is now being registered.") mds_put = requests.put( - f"http://revproxy-service/mds/metadata/{mds_record_guid}", + f"http://metadata-service/metadata/{mds_record_guid}", headers=token_header, json=mds_cedar_register_data_body, ) @@ -394,3 +483,6 @@ def get_related_studies(serial_num, guid, hostname): offset = offset + limit if (offset + limit) > total: limit = total - offset + + if limit < 0: + break diff --git a/files/scripts/psql-fips-fix.sh b/files/scripts/psql-fips-fix.sh index fcbb6e20c..8cb0ed049 100644 --- a/files/scripts/psql-fips-fix.sh +++ b/files/scripts/psql-fips-fix.sh @@ -16,7 +16,7 @@ for name in indexd fence sheepdog peregrine; do update_pass $name $username $password done -for name in wts metadata gearbox audit arborist access-backend argo_db atlas argo thor; do +for name in wts metadata gearbox audit arborist access-backend argo_db requestor atlas ohdsi argo thor; do if [[ ! -z $(gen3 secrets decode $name-g3auto dbcreds.json) ]]; then username=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_username) password=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_password) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 6896314ab..42095986a 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -7,6 +7,7 @@ achecker.ca apache.github.io api.epigraphdb.org api.monqcle.com +app.getambassador.io awslabs.github.io biodata-integration-tests.net marketing.biorender.com @@ -14,6 +15,7 @@ clinicaltrials.gov charts.bitnami.com ctds-planx.atlassian.net data.cityofchicago.org +data.stage.qdr.org dataguids.org api.login.yahoo.com apt.kubernetes.io @@ -31,7 +33,7 @@ centos.mirrors.hoobly.com centos.mirrors.tds.net centos.mirrors.wvstateu.edu cernvm.cern.ch -charts.bitnami.com +charts.authelia.com charts.helm.sh cloud.r-project.org coredns.github.io @@ -123,6 +125,7 @@ neuro.debian.net neurodeb.pirsquared.org nginx.org nvidia.github.io +ohsu-comp-bio.github.io opportunityinsights.org orcid.org pgp.mit.edu @@ -137,6 +140,7 @@ registry.terraform.io releases.rancher.com rendersnake.googlecode.com repec.org +repo.broadcom.com repo-prod.prod.sagebase.org repo-staging.prod.sagebase.org repo.continuum.io @@ -156,6 +160,7 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu +uc-cdis.github.io us-east4-docker.pkg.dev us-central1-docker.pkg.dev www.google.com @@ -167,4 +172,5 @@ www.uniprot.org vpodc.org yahoo.com idp.stage.qdr.org -stage.qdr.org \ No newline at end of file +stage.qdr.org +data.qdr.syr.edu diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1374c5d67..7c8d7f233 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -40,6 +40,7 @@ .dockerproject.org .dph.illinois.gov .elasticsearch.org +.eramba.org .erlang-solutions.com .external-secrets.io .extjs.com @@ -58,6 +59,7 @@ .immport.org .jenkins.io .jenkins-ci.org +.jetstack.io .k8s.io .kegg.jp .kidsfirstdrc.org diff --git a/flavors/vpn_nlb_central/vpnvm.sh b/flavors/vpn_nlb_central/vpnvm.sh index 879488eab..548962231 100644 --- a/flavors/vpn_nlb_central/vpnvm.sh +++ b/flavors/vpn_nlb_central/vpnvm.sh @@ -102,7 +102,7 @@ export FQDN="$SERVERNAME.planx-pla.net"; export cloud="$CLOUDNAME"; export SERVE #export FQDN="raryatestvpnv1.planx-pla.net"; export cloud="planxvpn1"; export SERVER_PEM="/root/server.pem"; bash /root/openvpn_management_scripts/install_ovpn.sh -#export FQDN="raryatestvpnv1.planx-pla.net"; export cloud="planxvpn"; export EMAIL="support@datacommons.io"; export SERVER_PEM="/root/server.pem"; export VPN_SUBNET="192.168.192.0/20"; export VM_SUBNET="10.128.0.0/20"; bash install_ovpn.sh +#export FQDN="raryatestvpnv1.planx-pla.net"; export cloud="planxvpn"; export EMAIL="support@gen3.org"; export SERVER_PEM="/root/server.pem"; export VPN_SUBNET="192.168.192.0/20"; export VM_SUBNET="10.128.0.0/20"; bash install_ovpn.sh ### need to install lighttpd @@ -174,4 +174,4 @@ sudo chmod 755 /etc/init.d/awslogs sudo systemctl enable awslogs sudo systemctl restart awslogs -echo "Install is completed" \ No newline at end of file +echo "Install is completed" diff --git a/flavors/vpn_nlb_central/vpnvm_new.sh b/flavors/vpn_nlb_central/vpnvm_new.sh new file mode 100644 index 000000000..627672694 --- /dev/null +++ b/flavors/vpn_nlb_central/vpnvm_new.sh @@ -0,0 +1,533 @@ +#!/bin/bash + +############################################################### +# variables +############################################################### + +MAGIC_URL="http://169.254.169.254/latest/meta-data/" +AVAILABILITY_ZONE=$(curl -s ${MAGIC_URL}placement/availability-zone) +PRIVATE_IPV4=$(curl -s ${MAGIC_URL}local-ipv4) +PUBLIC_IPV4=$(curl -s ${MAGIC_URL}public-ipv4) +REGION=$(echo ${AVAILABILITY_ZONE::-1}) +#DOCKER_DOWNLOAD_URL="https://download.docker.com/linux/ubuntu" +AWSLOGS_DOWNLOAD_URL="https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb" +#TERRAFORM_DOWNLOAD_URL="https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip" +DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release) +if [[ $DISTRO == "Ubuntu" ]]; then + WORK_USER="ubuntu" +else + WORK_USER="ec2-user" +fi +HOME_FOLDER="/home/${WORK_USER}" +SUB_FOLDER="${HOME_FOLDER}/cloud-automation" + +OPENVPN_PATH='/etc/openvpn' +BIN_PATH="${OPENVPN_PATH}/bin" +EASYRSA_PATH="${OPENVPN_PATH}/easy-rsa" +VARS_PATH="${EASYRSA_PATH}/vars" + +#EASY-RSA Vars +KEY_SIZE=4096 +COUNTRY="US" +STATE="IL" +CITY="Chicago" +ORG="CTDS" +EMAIL='support\@gen3.org' +KEY_EXPIRE=365 + +#OpenVPN +PROTO=tcp + + +############################################################### +# get any variables we want coming from terraform variables +############################################################### +if [ $# -eq 0 ]; +then + echo "No arguments supplied, something is wrong" + exit 1 +else + #OIFS=$IFS + echo $1 + IFS=';' read -ra ADDR <<< "$1" + echo ${ADDR[@]} + for i in "${ADDR[@]}"; do + echo $i + if [[ $i = *"cwl_group"* ]]; + then + CWL_GROUP="${CWL_GROUP:-$(echo ${i} | cut -d= -f2)}" + elif [[ ${i} = *"vpn_nlb_name"* ]]; + then + VPN_NLB_NAME="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"cloud_name"* ]]; + then + CLOUD_NAME="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"csoc_vpn_subnet"* ]]; + then + CSOC_VPN_SUBNET="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"csoc_vm_subnet"* ]]; + then + CSOC_VM_SUBNET="$(echo ${i} | cut -d= -f2)" + elif [[ $i = *"account_id"* ]]; + then + ACCOUNT_ID="$(echo ${i} | cut -d= -f2)" + elif [[ $i = *"alternate_cwlg"* ]]; + then + CWL_GROUP="$(echo ${i} | cut -d= -f2)" + fi + done + echo $1 +fi + +S3_BUCKET="vpn-certs-and-files-${VPN_NLB_NAME}" + +function logs_helper(){ + echo -e "****************** ${1} ******************" +} + +function install_basics() { + + logs_helper "Installing Basics" + if [[ $DISTRO == "Ubuntu" ]]; then + apt -y install python3-pip build-essential sipcalc wget curl jq apt-transport-https ca-certificates software-properties-common fail2ban libyaml-dev + apt -y install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools + apt -y install openvpn bridge-utils libssl-dev openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv + # For openVPN + debconf-set-selections <<< "postfix postfix/mailname string planx-pla.net" + debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Internet Site'" + else + amazon-linux-extras install epel + yum -y -q install epel-release iptables-services + yum -y -q install python3-pip python3-devel gcc sipcalc wget curl jq ca-certificates software-properties-common fail2ban libyaml-dev + yum -y -q install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools + yum -y -q install openvpn bridge-utils openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv + fi + pip3 install awscli + useradd --shell /bin/nologin --system openvpn + + logs_helper "Basics installed" +} + + +function configure_basics() { + + logs_helper "Configuring Basics" + + local dest_path="/root/openvpn_management_scripts" + local src_path="${SUB_FOLDER}/files/openvpn_management_scripts" + cp -r ${src_path} /root + + # Different buckets for different CSOC vpn environments + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/push_to_s3.sh + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/recover_from_s3.sh + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/send_email.sh + + # Replace the User variable for hostname, VPN subnet and VM subnet + #sed -i "s/SERVERNAME/${VPN_NLB_NAME}/" ${dest_path}/csoc_vpn_user_variable + #sed -i "s/CLOUDNAME/${CLOUD_NAME}/" ${dest_path}/csoc_vpn_user_variable + + #VPN_SUBNET=${CSOC_VPN_SUBNET} + #VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + #VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + #sed -i "s/VPN_SUBNET/$VPN_SUBNET_BASE\/$VPN_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable + + #VM_SUBNET=${CSOC_VM_SUBNET} + #VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + #VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + #sed -i "s/VM_SUBNET/$VM_SUBNET_BASE\/$VM_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable + + echo "aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh" + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh + + logs_helper "Copying modified scripts to /etc/openvpn" + cp -vr /root/openvpn_management_scripts /etc/openvpn/ + + logs_helper "Basics configured" + +} + + +function configure_awscli() { + + logs_helper "Configuring AWS" + mkdir -p ${HOME_FOLDER}/.aws + cat < ${HOME_FOLDER}/.aws/config +[default] +output = json +region = us-east-1 + +[profile csoc] +output = json +region = us-east-1 +EOT + + mkdir -p /root/.aws + cat > /root/.aws/config <> ${config_json} < /root/server.pem + fi + + export FQDN=${CLOUD_NAME} + export cloud=${VPN_NLB_NAME} + export SERVER_PEM="/root/server.pem" + export VM_SUBNET=${CSOC_VM_SUBNET} + export VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + export VM_SUBNET_MASK=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' ) + export VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + export VPN_SUBNET=${CSOC_VPN_SUBNET} + export VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + export VPN_SUBNET_MASK=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' ) + export VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + export server_pem="/root/server.pem" + echo "*******" + echo "${FQDN} -- ${cloud} -- ${SERVER_PEM} -- ${VPN_SUBNET} -- ${VPN_SUBNET_BASE} -- ${VPN_SUBNET_MASK_BITS} --/ ${VM_SUBNET} -- ${VM_SUBNET_BASE} -- ${VM_SUBNET_MASK_BITS}" + echo "*******" + #export FQDN="$SERVERNAME.planx-pla.net"; export cloud="$CLOUDNAME"; export SERVER_PEM="/root/server.pem"; + + #cp /etc/openvpn/bin/templates/lighttpd.conf.template /etc/lighttpd/lighttpd.conf + #mkdir -p --mode=750 /var/www/qrcode + #chown openvpn:www-data /var/www/qrcode + #mkdir -p /etc/lighttpd/certs + #cp /root/server.pem /etc/lighttpd/certs/server.pem + #service lighttpd restart + + #systemctl restart openvpn + + logs_helper "openVPN init complete" + +} + +function install_easyrsa() { + + logs_helper "Installing easyRSA" + if [[ -f $EASYRSA_PATH/easyrsa ]]; + then + logs_helper "easyRSA already installed" + return + fi + easyRsaVer="3.1.7" + wget https://github.com/OpenVPN/easy-rsa/releases/download/v3.1.7/EasyRSA-${easyRsaVer}.tgz + # extract to a folder called easyrsa + tar xvf EasyRSA-${easyRsaVer}.tgz + mv EasyRSA-${easyRsaVer}/ $EASYRSA_PATH + rm EasyRSA-${easyRsaVer}.tgz + cp "$OPENVPN_PATH/bin/templates/vars.template" $VARS_PATH + +# local easy_rsa_dir="$EASYRSA_PATH" +# local exthost="$FQDN" +# local ou="$cloud" +# local key_name="$ou-OpenVPN" + + perl -p -i -e "s|#EASY_RSA_DIR#|${EASYRSA_PATH}|" $VARS_PATH + perl -p -i -e "s|#EXTHOST#|${FQDN}|" $VARS_PATH + perl -p -i -e "s|#KEY_SIZE#|${KEY_SIZE}|" $VARS_PATH + perl -p -i -e "s|#COUNTRY#|${COUNTRY}|" $VARS_PATH + perl -p -i -e "s|#STATE#|${STATE}|" $VARS_PATH + perl -p -i -e "s|#CITY#|${CITY}|" $VARS_PATH + perl -p -i -e "s|#ORG#|${ORG}|" $VARS_PATH + perl -p -i -e "s|#EMAIL#|${EMAIL}|" $VARS_PATH + perl -p -i -e "s|#OU#|${cloud}|" $VARS_PATH + perl -p -i -e "s|#KEY_NAME#|${cloud}-OpenVPN|" $VARS_PATH + perl -p -i -e "s|#KEY_EXPIRE#|${KEY_EXPIRE}|" $VARS_PATH + + sed -i 's/^subjectAltName/#subjectAltName/' $EASYRSA_PATH/openssl-*.cnf + logs_helper "easyRSA complete" +} + +function install_custom_scripts() { + + logs_helper "installing custom scripts" + cd $OPENVPN_PATH + + #pull our openvpn scripts + #cp -r /root/openvpn_management_scripts /etc/openvpn/ + ln -sfn openvpn_management_scripts bin + cd $BIN_PATH + python3 -m venv .venv + #virtualenv .venv + #This is needed or else you get : .venv/bin/activate: line 57: PS1: unbound variable + set +u + # ( source .venv/bin/activate; pip install pyotp pyqrcode bcrypt ) + ( source .venv/bin/activate; pip3 install pyotp qrcode bcrypt ) + set -u + + logs_helper "custom scripts done" +} + +install_settings() { + + logs_helper "installing settings" + SETTINGS_PATH="$BIN_PATH/settings.sh" + cp "$OPENVPN_PATH/bin/templates/settings.sh.template" "$SETTINGS_PATH" + perl -p -i -e "s|#FQDN#|$FQDN|" $SETTINGS_PATH + perl -p -i -e "s|#EMAIL#|$EMAIL|" $SETTINGS_PATH + perl -p -i -e "s|#CLOUD_NAME#|${cloud}|" $SETTINGS_PATH + + logs_helper "settings installed" +} + +build_PKI() { + + logs_helper "building pki" + cd $EASYRSA_PATH + # ln -s openssl-1.0.0.cnf openssl.cnf + echo "This is long" + # ./easyrsa clean-all nopass + ./easyrsa init-pki + ./easyrsa build-ca nopass + ./easyrsa gen-dh + ./easyrsa gen-crl + ./easyrsa build-server-full $CLOUD_NAME nopass + # ./easyrsa gen-req $VPN_NLB_NAME.planx-pla.net nopass + openvpn --genkey --secret ta.key + mv ta.key $EASYRSA_PATH/pki/ta.key + + #This will error but thats fine, the crl.pem was created (without it openvpn server crashes) + set +e + ./revoke-full client &>/dev/null || true + set -e + logs_helper "pki done" + +} + +configure_ovpn() { + + logs_helper "configuring openvpn" + if [[ $DISTRO == "Ubuntu" ]]; then + OVPNCONF_PATH="/etc/openvpn/openvpn.conf" + else + OVPNCONF_PATH="/etc/openvpn/server/server.conf" + fi + cp "$OPENVPN_PATH/bin/templates/openvpn.conf.template" "$OVPNCONF_PATH" + + perl -p -i -e "s|#FQDN#|$FQDN|" $OVPNCONF_PATH + + perl -p -i -e "s|#VPN_SUBNET_BASE#|$VPN_SUBNET_BASE|" $OVPNCONF_PATH + perl -p -i -e "s|#VPN_SUBNET_MASK#|$VPN_SUBNET_MASK|" $OVPNCONF_PATH + + perl -p -i -e "s|#VM_SUBNET_BASE#|$VM_SUBNET_BASE|" $OVPNCONF_PATH + perl -p -i -e "s|#VM_SUBNET_MASK#|$VM_SUBNET_MASK|" $OVPNCONF_PATH + + perl -p -i -e "s|#PROTO#|$PROTO|" $OVPNCONF_PATH + + if [[ $DISTRO == "Ubuntu" ]]; then + systemctl restart openvpn + else + systemctl enable openvpn-server@server + systemctl start openvpn-server@server + fi + + logs_helper "openvpn configured" +} + +tweak_network() { + + logs_helper "tweaking network" + local nettweaks_path="$OPENVPN_PATH/bin/network_tweaks.sh" + cp "$OPENVPN_PATH/bin/templates/network_tweaks.sh.template" "${nettweaks_path}" + perl -p -i -e "s|#VPN_SUBNET#|$VPN_SUBNET|" ${nettweaks_path} + perl -p -i -e "s|#VM_SUBNET#|$VM_SUBNET|" ${nettweaks_path} + perl -p -i -e "s|#PROTO#|$PROTO|" ${nettweaks_path} + + chmod +x ${nettweaks_path} + ${nettweaks_path} + + # Disable firewall in amazonlinux + systemctl stop firewalld + systemctl disable firewalld + + #cp /etc/rc.local /etc/rc.local.bak + #sed -i 's/^exit/#exit/' /etc/rc.local + #echo /etc/openvpn/bin/network_tweaks.sh >> /etc/rc.local + #echo exit 0 >> /etc/rc.local + + + logs_helper "network tweaked" + +} + +install_webserver() { + + + logs_helper "installing webserver" + #Webserver used for QRCodes + if [[ $DISTRO == "Ubuntu" ]]; then + apt -y install lighttpd + else + yum -y install lighttpd + fi + cp "$OPENVPN_PATH/bin/templates/lighttpd.conf.template" /etc/lighttpd/lighttpd.conf + + mkdir -p --mode=750 /var/www/qrcode + chown openvpn:www-data /var/www/qrcode + + if [ -f $SERVER_PEM ] + then + mkdir --mode=700 /etc/lighttpd/certs + cp $SERVER_PEM /etc/lighttpd/certs/server.pem + service lighttpd restart + fi + + logs_helper "webserver installed" +} + + +install_cron() { + cp "$OPENVPN_PATH/bin/templates/cron.template" /etc/cron.d/openvpn +} + +misc() { + + logs_helper "installing misc" + cd $OPENVPN_PATH + mkdir -p easy-rsa/pki/ovpn_files + ln -sfn easy-rsa/pki/ovpn_files + + #If openvpn fails to start its cause perms. Init needs root rw to start, but service needs openvpn rw to work + mkdir --mode 775 -p clients.d/ + mkdir --mode 775 -p clients.d/tmp/ + chown root:openvpn clients.d/tmp/ + + mkdir -p easy-rsa/pki/ovpn_files_seperated/ + mkdir -p easy-rsa/pki/ovpn_files_systemd/ + mkdir -p easy-rsa/pki/ovpn_files_resolvconf/ + + touch user_passwd.csv + + mkdir -p environments + mkdir -p client-restrictions + + chown -R openvpn:openvpn easy-rsa/ user_passwd.csv clients.d/tmp/ + #ahhem. + chown :root /etc/openvpn/clients.d/tmp + chmod g+rwx /etc/openvpn/clients.d/tmp + # systemctl restart openvpn + + logs_helper "misc done" +} + +function main() { + install_basics + configure_awscli + configure_basics + + if [[ $DISTRO == "Ubuntu" ]]; then + install_awslogs + fi + install_openvpn + + set -e + set -u + install_custom_scripts + # if [! -d "/etc/openvpn/easy-rsa"]; then + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || install_easyrsa + + install_settings + + # if [! -d "/etc/openvpn/easy-rsa"]; then + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || build_PKI + #fi + misc + configure_ovpn + tweak_network + + install_cron + + + mkdir -p --mode=750 /var/www/qrcode + + logs_helper "openvpn setup complete" + +} + +main diff --git a/flavors/vpn_nlb_central/vpnvm_ubuntu18.sh b/flavors/vpn_nlb_central/vpnvm_ubuntu18.sh index af5efdfaf..e2f8210ea 100644 --- a/flavors/vpn_nlb_central/vpnvm_ubuntu18.sh +++ b/flavors/vpn_nlb_central/vpnvm_ubuntu18.sh @@ -28,7 +28,7 @@ COUNTRY="US" STATE="IL" CITY="Chicago" ORG="CTDS" -EMAIL='support\@datacommons.io' +EMAIL='support\@gen3.org' KEY_EXPIRE=365 #OpenVPN diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index dd19ea7a4..b9e9f9514 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -14,7 +14,6 @@ gen3_load "gen3/gen3setup" gen3_awsrole_help() { gen3 help awsrole } - # # Assume-role policy - allows SA's to assume role. # NOTE: service-account to role is 1 to 1 @@ -71,7 +70,8 @@ function gen3_awsrole_ar_policy() { "${issuer_url}:aud": "sts.amazonaws.com", "${issuer_url}:sub": [ "system:serviceaccount:*:${serviceAccount}", - "system:serviceaccount:argo:default" + "system:serviceaccount:argo:default", + "system:serviceaccount:argo:argo-argo-workflows-server" ] } } diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh index eb9611a90..df0139d3b 100644 --- a/gen3/bin/dbbackup.sh +++ b/gen3/bin/dbbackup.sh @@ -1,37 +1,32 @@ #!/bin/bash #################################################################################################### -# Script: dbdump.sh +# Script: dbbackup.sh # # Description: # This script facilitates the management of database backups within the gen3 environment. It is -# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the -# command provided, it will either initiate a database dump or perform a restore. +# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the +# command provided, it will either initiate a database dump, perform a restore, migrate to Aurora, +# or copy to Aurora. # # Usage: -# gen3 dbbackup [dump|restore] +# gen3 dbbackup [dump|restore|va-dump|create-sa|migrate-to-aurora|copy-to-aurora|encrypt|setup-cron ] # -# dump - Initiates a database dump, creating the essential AWS resources if they are absent. -# The dump operation is intended to be executed from the namespace/commons that requires -# the backup. -# restore - Initiates a database restore, creating the essential AWS resources if they are absent. -# The restore operation is meant to be executed in the target namespace, where the backup -# needs to be restored. -# -# Notes: -# This script extensively utilizes the AWS CLI and the gen3 CLI. Proper functioning demands a -# configured gen3 environment and the availability of the necessary CLI tools. +# dump - Initiates a database dump, creating the essential AWS resources if they are absent. +# The dump operation is intended to be executed from the namespace/commons that requires +# the backup. +# restore - Initiates a database restore, creating the essential AWS resources if they are absent. +# The restore operation is meant to be executed in the target namespace, where the backup +# needs to be restored. +# va-dump - Runs a va-testing DB dump. +# create-sa - Creates the necessary service account and roles for DB copy. +# migrate-to-aurora - Triggers a service account creation and a job to migrate a Gen3 commons to an AWS RDS Aurora instance. +# copy-to-aurora - Triggers a service account creation and a job to copy the databases Indexd, Sheepdog & Metadata to new databases within an RDS Aurora cluster. The source_namespace must be provided. The job should be run at the destination, not at the source. +# encrypt - Perform encrypted backup. +# setup-cron - Set up a cronjob for encrypted backup. # #################################################################################################### -# Exit on error -#set -e - -# Print commands before executing -#set -x - -#trap 'echo "Error at Line $LINENO"' ERR - source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" @@ -40,21 +35,36 @@ account_id=$(aws sts get-caller-identity --query "Account" --output text) vpc_name="$(gen3 api environment)" namespace="$(gen3 db namespace)" sa_name="dbbackup-sa" -bucket_name="gen3-db-backups-${account_id}" +bucket_name_encrypted="gen3-db-backups-encrypted-${account_id}" +kms_key_alias="alias/gen3-db-backups-kms-key" + +cluster_arn=$(kubectl config current-context) +eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}') -gen3_log_info "policy_name: $policy_name" gen3_log_info "account_id: $account_id" gen3_log_info "vpc_name: $vpc_name" gen3_log_info "namespace: $namespace" gen3_log_info "sa_name: $sa_name" -gen3_log_info "bucket_name: $bucket_name" +gen3_log_info "bucket_name_encrypted: $bucket_name_encrypted" +gen3_log_info "kms_key_alias: $kms_key_alias" +gen3_log_info "eks_cluster: $eks_cluster" +# Create or get the KMS key +create_or_get_kms_key() { + kms_key_id=$(aws kms list-aliases --query "Aliases[?AliasName=='$kms_key_alias'].TargetKeyId" --output text) + if [ -z "$kms_key_id" ]; then + gen3_log_info "Creating new KMS key with alias $kms_key_alias" + kms_key_id=$(aws kms create-key --query "KeyMetadata.KeyId" --output text) + aws kms create-alias --alias-name $kms_key_alias --target-key-id $kms_key_id + else + gen3_log_info "KMS key with alias $kms_key_alias already exists" + fi + kms_key_arn=$(aws kms describe-key --key-id $kms_key_id --query "KeyMetadata.Arn" --output text) +} # Create an S3 access policy if it doesn't exist create_policy() { - # Check if policy exists if ! aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep -q "arn:aws:iam"; then - # Create the S3 access policy - policy document access_policy=$(cat <<-EOM { "Version": "2012-10-17", @@ -69,15 +79,14 @@ create_policy() { "s3:DeleteObject" ], "Resource": [ - "arn:aws:s3:::gen3-db-backups-*" + "arn:aws:s3:::gen3-db-backups-*", + "arn:aws:s3:::gen3-db-backups-encrypted-*" ] } ] } EOM ) - - # Create the S3 access policy from the policy document policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document "$access_policy" --query "Policy.Arn" --output text) gen3_log_info "policy_arn: $policy_arn" else @@ -87,20 +96,12 @@ EOM fi } - # Create or update the Service Account and its corresponding IAM Role create_service_account_and_role() { - cluster_arn=$(kubectl config current-context) - eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}') oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///') role_name="${vpc_name}-${namespace}-${sa_name}-role" role_arn="arn:aws:iam::${account_id}:role/${role_name}" local trust_policy=$(mktemp -p "$XDG_RUNTIME_DIR" "tmp_policy.XXXXXX") - gen3_log_info "trust_policy: $trust_policy" - gen3_log_info "eks_cluster: $eks_cluster" - gen3_log_info "oidc_url: $oidc_url" - gen3_log_info "role_name: $role_name" - cat > ${trust_policy} <&1; then - gen3_log_info "Updating existing role: $role_name" aws iam update-assume-role-policy --role-name $role_name --policy-document "file://$trust_policy" else - gen3_log_info "Creating new role: $role_name" aws iam create-role --role-name $role_name --assume-role-policy-document "file://$trust_policy" fi @@ -144,30 +138,34 @@ EOF if ! kubectl get serviceaccount -n $namespace $sa_name 2>&1; then kubectl create serviceaccount -n $namespace $sa_name fi - # Annotate the KSA with the IAM role ARN - gen3_log_info "Annotating Service Account with IAM role ARN" + # Annotate the KSA with the IAM role ARN kubectl annotate serviceaccount -n ${namespace} ${sa_name} eks.amazonaws.com/role-arn=${role_arn} --overwrite - } -# Create an S3 bucket if it doesn't exist +# Create an S3 bucket with SSE-KMS if it doesn't exist create_s3_bucket() { + local bucket_name=$1 + local kms_key_arn=$2 # Check if bucket already exists if aws s3 ls "s3://$bucket_name" 2>&1 | grep -q 'NoSuchBucket'; then - gen3_log_info "Bucket does not exist, creating..." aws s3 mb "s3://$bucket_name" - else - gen3_log_info "Bucket $bucket_name already exists, skipping bucket creation." + # Enable SSE-KMS encryption on the bucket + aws s3api put-bucket-encryption --bucket $bucket_name --server-side-encryption-configuration '{ + "Rules": [{ + "ApplyServerSideEncryptionByDefault": { + "SSEAlgorithm": "aws:kms", + "KMSMasterKeyID": "'"$kms_key_arn"'" + } + }] + }' fi } - # Function to trigger the database backup job db_dump() { gen3 job run psql-db-prep-dump } - # Function to trigger the database backup restore job db_restore() { gen3 job run psql-db-prep-restore @@ -177,36 +175,214 @@ va_testing_db_dump() { gen3 job run psql-db-dump-va-testing } +# Function to create the psql-db-copy service account and roles +create_db_copy_service_account() { + cat <&1; then + cat <&1; then + cat <" + exit 1 + fi + gen3_log_info "Copying databases within Aurora..." + copy_to_aurora "$2" + ;; + encrypt) + gen3_log_info "Performing encrypted backup..." + check_prerequisites + encrypt_backup + ;; + setup-cron) + gen3_log_info "Setting up cronjob for encrypted backup..." + check_prerequisites + setup_cronjob + ;; *) - echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]" + echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump|create-sa|migrate-to-aurora|copy-to-aurora|encrypt|setup-cron ]" return 1 ;; esac } -main "$1" +main "$@" diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 36af791ef..288e7fcde 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -8,6 +8,7 @@ gen3_load "gen3/gen3setup" repoList=$(aws ecr describe-repositories | jq -r .repositories[].repositoryName) accountList=( +433568766270 053927701465 199578515826 222487244010 diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index bc0358499..975cf6e0d 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -461,7 +461,7 @@ gen3_gitops_sync() { # update fence ConfigMap before roll-all if [[ "$fence_roll" = true ]]; then gen3 update_config manifest-fence "$(gen3 gitops folder)/manifests/fence/fence-config-public.yaml" - fi + fi if [[ "$covid_cronjob_roll" = true ]]; then if g3k_config_lookup '.global."covid19_data_bucket"'; then @@ -503,6 +503,10 @@ gen3_gitops_sync() { fi curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: ${resStr} - Syncing dict and images on ${tmpHostname}\", \"attachments\": [{${dictAttachment}}, {${versionsAttachment}}, {${portalAttachment}}, {${fenceAttachment}}, {${etlAttachment}}, {${covidAttachment}}]}" "${slackWebHook}" fi + # update fence jobs + if [[ "$versions_roll" = true ]]; then + gen3_gitops_update_fence_cron_jobs + fi else echo "no changes detected, not rolling" fi @@ -527,6 +531,43 @@ gen3_gitops_rsync() { ssh "$target" "bash -ic 'gen3 gitops sync'" } +# +# Update fence cronjobs +# +gen3_gitops_update_fence_cron_jobs() { + # Fetch the manifest-versions ConfigMap and extract the fence image + local fence_manifest_image=$(kubectl get cm manifest-versions -o jsonpath='{.data.fence}') + + # List of fence-related cronjobs + local fence_cronjobs=("fence-delete-expired-clients" "fence-cleanup-expired-ga4gh-info") + + # Function to check and update cronjobs + update_cronjob() { + local cronjob_name=$1 + local manifest_image=$2 + + gen3_log_info "Checking cronjob $cronjob_name..." + + # Extract cronjob schedule directly using kubectl with jsonpath + local cronjob_schedule=$(kubectl get cronjobs.batch $cronjob_name -o jsonpath='{.spec.schedule}') + + # Check if the cronjob exists + if [[ -z "$cronjob_schedule" ]]; then + gen3_log_info "Cronjob $cronjob_name does not exist." + return + fi + + # Update cronjob with the image in manifest-versions ConfigMap + gen3_log_info "Updating cronjob $cronjob_name to use image $manifest_image..." + gen3 job cron $cronjob_name "$cronjob_schedule" + } + + # Loop through each fence-related cronjob and check/update if needed + for cronjob in "${fence_cronjobs[@]}"; do + update_cronjob "$cronjob" "$fence_manifest_image" + done +} + # # Get the local manifest and cloud-automation folders in sync with github # @@ -1105,6 +1146,9 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then "sync") gen3_gitops_sync "$@" ;; + "update-fence-cronjobs") + gen3_gitops_update_fence_cron_jobs "$@" + ;; "taglist") gen3_gitops_repo_taglist "$@" ;; diff --git a/gen3/bin/jupyter.sh b/gen3/bin/jupyter.sh index 169ec59dc..b5c1c5390 100644 --- a/gen3/bin/jupyter.sh +++ b/gen3/bin/jupyter.sh @@ -210,7 +210,7 @@ gen3_jupyter_idle_pods() { fi # Get the list of idle ambassador clusters from prometheus - local promQuery="sum by (envoy_cluster_name) (rate(envoy_cluster_upstream_rq_total{kubernetes_namespace=\"${namespace}\"}[${ttl}]))" + local promQuery="sum by (envoy_cluster_name) (rate(envoy_cluster_upstream_rq_total{namespace=\"${namespace}\"}[${ttl}]))" local tempClusterFile="$(mktemp "$XDG_RUNTIME_DIR/idle_apps.json_XXXXXX")" gen3 prometheus query "$promQuery" "${tokenKey#none}" | jq -e -r '.data.result[] | { "cluster": .metric.envoy_cluster_name, "rate": .value[1] } | select(.rate == "0")' | tee "$tempClusterFile" 1>&2 if [[ $? != 0 ]]; then @@ -245,7 +245,7 @@ gen3_jupyter_idle_pods() { current_time=$(date +%s) age=$((current_time - pod_creation)) - # potential workspaces to be reaped for inactivity must be at least 60 minutes old + # potential workspaces to be reaped for inactivity must be at least 60 minutes old if ((age >= 3600)); then gen3_log_info "try to kill pod $name in $jnamespace" g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2 diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 1dca87c68..6357f0788 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -51,20 +51,20 @@ fi gen3 kube-setup-networkpolicy disable # -# Hopefull core secrets/config in place - start bringing up services +# Hopefully core secrets/config in place - start bringing up services # -if g3k_manifest_lookup .versions.indexd 2> /dev/null; then - gen3 kube-setup-indexd & -else - gen3_log_info "no manifest entry for indexd" -fi - if g3k_manifest_lookup .versions.arborist 2> /dev/null; then gen3 kube-setup-arborist || gen3_log_err "arborist setup failed?" else gen3_log_info "no manifest entry for arborist" fi +if g3k_manifest_lookup .versions.indexd 2> /dev/null; then + gen3 kube-setup-indexd & +else + gen3_log_info "no manifest entry for indexd" +fi + if g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then gen3 kube-setup-audit-service else @@ -264,9 +264,9 @@ fi gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then - if g3k_manifest_lookup .global.argocd 2> /dev/null; then - gen3 kube-setup-prometheus - fi + # if g3k_manifest_lookup .global.argocd 2> /dev/null; then + # gen3 kube-setup-prometheus + # fi # Internal k8s systems gen3 kube-setup-fluentd & # If there is an entry for karpenter in the manifest setup karpenter diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh index bbb3ae663..60d4758c5 100644 --- a/gen3/bin/kube-setup-access-backend.sh +++ b/gen3/bin/kube-setup-access-backend.sh @@ -210,8 +210,10 @@ authz: - /programs/tutorial - /programs/open_access role_ids: - - reader - - storage_reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - description: full access to indexd API id: indexd_admin resource_paths: @@ -226,18 +228,22 @@ authz: - /programs/open_access role_ids: - creator - - reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - updater - deleter - storage_writer - - storage_reader - description: '' id: all_programs_reader resource_paths: - /programs role_ids: - - reader - - storage_reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - id: 'all_programs_writer' description: '' role_ids: @@ -328,12 +334,37 @@ authz: service: '*' id: creator - description: '' - id: reader + id: guppy_reader permissions: - action: method: read - service: '*' - id: reader + service: 'guppy' + id: guppy_reader + - description: '' + id: fence_reader + permissions: + - action: + method: read + service: 'fence' + id: fence_reader + - action: + method: read-storage + service: 'fence' + id: fence_storage_reader + - description: '' + id: peregrine_reader + permissions: + - action: + method: read + service: 'peregrine' + id: peregrine_reader + - description: '' + id: sheepdog_reader + permissions: + - action: + method: read + service: 'sheepdog' + id: sheepdog_reader - description: '' id: updater permissions: @@ -355,13 +386,6 @@ authz: method: write-storage service: '*' id: storage_creator - - description: '' - id: storage_reader - permissions: - - action: - method: read-storage - service: '*' - id: storage_reader - id: mds_user permissions: - action: diff --git a/gen3/bin/kube-setup-ambassador.sh b/gen3/bin/kube-setup-ambassador.sh index 5f92af5cc..06ae1ee56 100644 --- a/gen3/bin/kube-setup-ambassador.sh +++ b/gen3/bin/kube-setup-ambassador.sh @@ -68,11 +68,9 @@ case "$command" in ;; "hatchery") deploy_hatchery_proxy "$@" - gen3 kube-setup-prometheus prometheus ;; *) deploy_hatchery_proxy "$@" deploy_api_gateway "$@" - gen3 kube-setup-prometheus prometheus ;; esac \ No newline at end of file diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index 4c6c55eee..1a25a98c8 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -132,6 +132,40 @@ EOF ] } EOF + +# Create a cluster role with specific permissions for Argo +cat < /dev/null 2>&1; then @@ -154,20 +188,34 @@ EOF roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) gen3_log_info "Role annotate" g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace + g3kubectl annotate serviceaccount argo-argo-workflows-server eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace else gen3 awsrole create $roleName argo $nameSpace -all_namespaces roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace + g3kubectl annotate serviceaccount argo-argo-workflows-server eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace fi - # Grant admin access within the current namespace to the argo SA in the current namespace - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$nameSpace:argo -n $nameSpace || true + # Grant access within the current namespace to the argo SA in the current namespace + g3kubectl create rolebinding argo-rolebinding --clusterrole=argo-cluster-role --serviceaccount=$nameSpace:argo -n $nameSpace || true aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile || true if [[ -z $internalBucketName ]]; then aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile || true fi + # Create a secret for the slack webhook + alarm_webhook=$(g3kubectl get cm global -o yaml | yq .data.slack_alarm_webhook | tr -d '"') + + if [ -z "$alarm_webhook" ]; then + gen3_log_err "Please set a slack_alarm_webhook in the 'global' configmap. This is needed to alert for failed workflows." + exit 1 + fi + + g3kubectl -n argo delete secret slack-webhook-secret + g3kubectl -n argo create secret generic "slack-webhook-secret" --from-literal=SLACK_WEBHOOK_URL=$alarm_webhook + + ## if new bucket then do the following # Get the aws keys from secret # Create and attach lifecycle policy diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index f13a4d411..5a1f5ac0e 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -2,7 +2,7 @@ # # Deploy aws-es-proxy into existing commons # https://github.com/abutaha/aws-es-proxy -# +# source "${GEN3_HOME}/gen3/lib/utils.sh" @@ -11,16 +11,33 @@ gen3_load "gen3/lib/kube-setup-init" # Deploy Datadog with argocd if flag is set in the manifest path manifestPath=$(g3k_manifest_path) es7="$(jq -r ".[\"global\"][\"es7\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" +esDomain="$(jq -r ".[\"global\"][\"esDomain\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" +envname="$(gen3 api environment)" [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then - envname="$(gen3 api environment)" - - if [ "$es7" = true ]; then - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ + if [ "$esDomain" != "null" ]; then + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${esDomain}" --query "DomainStatusList[*].Endpoints" --output text)" \ + && [[ -n "${ES_ENDPOINT}" && -n "${esDomain}" ]]; then + gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" + gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." + else + # + # probably running in jenkins or job environment + # try to make sure network policy labels are up to date + # + gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up." + gen3 kube-setup-networkpolicy service aws-es-proxy + g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true + fi + elif [ "$es7" = false ]; then + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else @@ -33,9 +50,10 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi else - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else @@ -50,6 +68,76 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then fi gen3 job cron es-garbage '@daily' else - gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy - secret is not configured" - exit 1 -fi + gen3_log_info "kube-setup-aws-es-proxy" "No secret detected, attempting IRSA setup" + deploy=true + + # Let's pre-calculate all the info we need about the cluster, so we can just pass it on later + if [ "$esDomain" != "null" ] && [ -n "$esDomain" ]; then + ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${esDomain}" --query "DomainStatusList[*].Endpoints" --output text)" + ES_ARN="$(aws es describe-elasticsearch-domains --domain-names "${esDomain}" --query "DomainStatusList[*].ARN" --output text)" + elif [ "$es7" = true ]; then + if [ -n "$envname" ]; then + ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" + ES_ARN="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].ARN" --output text)" + else + deploy=false + fi + else + if [ -n "$envname" ]; then + ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" + ES_ARN="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].ARN" --output text)" + else + deploy=false + fi + fi + # Let's only do setup stuff if we're going to want to deploy... otherwise, we take the CI env actions + if [ "$deploy" = "true" ]; then + # Put that ARN into a template we get from terraform + policyjson=$(cat < /dev/null 2>&1; then return 1 fi -gen3_log_info "Checking cedar-client creds" -setup_creds +if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_info "Skipping cedar-client creds setup in non-adminvm environment" +else + gen3_log_info "Checking cedar-client creds" + setup_creds +fi if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" diff --git a/gen3/bin/kube-setup-cluster-level-resources.sh b/gen3/bin/kube-setup-cluster-level-resources.sh new file mode 100644 index 000000000..f4349398f --- /dev/null +++ b/gen3/bin/kube-setup-cluster-level-resources.sh @@ -0,0 +1,41 @@ +#!/bin/bash +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +# Set default value for TARGET_REVISION +TARGET_REVISION="master" + +# Ask for TARGET_REVISION +read -p "Please provide a target revision for the cluster resources chart (default is master): " user_target_revision +# If user input is not empty, use it; otherwise, keep the default +TARGET_REVISION=${user_target_revision:-$TARGET_REVISION} + +# Ask for CLUSTER_NAME (no default value) +read -p "Enter the name of the cluster: " CLUSTER_NAME + +# Check if CLUSTER_NAME is provided +if [ -z "$CLUSTER_NAME" ]; then + echo "Error: CLUSTER_NAME cannot be empty." + exit 1 +fi + +# Create a temporary file +temp_file=$(mktemp) + +# Use sed to replace placeholders in the original file +sed -e "s|TARGET_REVISION|$TARGET_REVISION|g" \ + -e "s|CLUSTER_NAME|$CLUSTER_NAME|g" \ + $GEN3_HOME/kube/services/cluster-level-resources/app.yaml > "$temp_file" + +echo "WARNING: Do you have a folder already set up for this environment in gen3-gitops, in the form of /cluster-values/cluster-values.yaml? If not, this will not work." +echo "" +read -n 1 -s -r -p "Press any key to confirm and continue, or Ctrl+C to cancel..." +echo "" + +# Apply the templated file with kubectl +kubectl apply -f "$temp_file" + +# Clean up the temporary file +rm "$temp_file" + +echo "Application has been applied to the cluster." \ No newline at end of file diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh index a6a024578..c90ca348b 100644 --- a/gen3/bin/kube-setup-cohort-middleware.sh +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -34,6 +34,7 @@ setup_secrets() { cat - > "$credsFile" < /dev/null 2>&1; then + if g3k_manifest_lookup '.versions["dicom-server"]' > /dev/null 2>&1; then export DICOM_SERVER_URL="/dicom-server" gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)" fi - if g3k_manifest_lookup .versions["orthanc"] > /dev/null 2>&1; then + if g3k_manifest_lookup .versions.orthanc > /dev/null 2>&1; then export DICOM_SERVER_URL="/orthanc" gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)" fi diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index bdcff8ed0..97365677d 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -5,6 +5,44 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" +function CostUsagePolicy() { + roleName="$(gen3 api safe-name hatchery-sa)" + # Cost Usage Report policy + curPolicy="costUsageReportPolicy" + + # Use the AWS CLI to list all policies attached to the role and then grep to search for the policy name + policyArn=$(aws iam list-role-policies --role-name "$roleName" | grep "$curPolicy") + + # Check if the policy ARN variable is empty or not + if [ -n "$policyArn" ]; then + echo "Policy $curPolicy is attached to the role $roleName." + else + echo "Policy $curPolicy is NOT attached to the role $roleName." + echo "Attaching policy" + # Define the policy document + policyDocument='{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "ce:GetCostAndUsage", + "Resource": "*" + } + ] + }' + + # Create an inline policy for the role + aws iam put-role-policy --role-name "$roleName" --policy-name "$curPolicy" --policy-document "$policyDocument" + if [ $? -eq 0 ]; then + echo "Inline policy $curPolicy has been successfully created and attached to the role $roleName." + else + echo "There was an error creating the inline policy $curPolicy." + fi + + fi +} + # Jenkins friendly export WORKSPACE="${WORKSPACE:-$HOME}" @@ -137,6 +175,8 @@ $assumeImageBuilderRolePolicyBlock "Action": [ "batch:DescribeComputeEnvironments", "batch:CreateComputeEnvironment", + "batch:UpdateComputeEnvironment", + "batch:ListJobs", "batch:CreateJobQueue", "batch:TagResource", "iam:ListPolicies", @@ -159,10 +199,28 @@ $assumeImageBuilderRolePolicyBlock "iam:CreateInstanceProfile", "iam:AddRoleToInstanceProfile", "iam:PassRole", - "s3:CreateBucket" + "kms:CreateKey", + "kms:CreateAlias", + "kms:DescribeKey", + "kms:TagResource", + "s3:CreateBucket", + "s3:PutEncryptionConfiguration", + "s3:PutBucketPolicy", + "s3:PutLifecycleConfiguration" ], "Resource": "*" }, + { + "Sid": "CreateSlrForNextflowBatchWorkspaces", + "Effect": "Allow", + "Action": "iam:CreateServiceLinkedRole", + "Resource": "arn:aws:iam::*:role/aws-service-role/batch.amazonaws.com/*", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "batch.amazonaws.com" + } + } + }, { "Sid": "PassRoleForNextflowBatchWorkspaces", "Effect": "Allow", @@ -209,6 +267,9 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 fi +# function to setup IAM policies for CostUsageReport +CostUsagePolicy + if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json") SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json") diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index b75470f73..df5731cf1 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -15,23 +15,6 @@ ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.na scriptDir="${GEN3_HOME}/kube/services/ingress" gen3_ingress_setup_waf() { - gen3_log_info "Starting GPE-312 waf setup" - #variable to see if WAF already exists - export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).Name'` -if [[ -z $waf ]]; then - gen3_log_info "Creating Web ACL. This may take a few minutes." - aws wafv2 create-web-acl\ - --name $vpc_name-waf \ - --scope REGIONAL \ - --default-action Allow={} \ - --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \ - --rules file://${GEN3_HOME}/gen3/bin/waf-rules-GPE-312.json \ - --region us-east-1 - #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up - sleep 300 -else - gen3_log_info "WAF already exists. Skipping..." -fi gen3_log_info "Attaching ACL to ALB." export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).ARN'` export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'` diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 949c1ccd1..0a743f7ed 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -24,13 +24,15 @@ gen3_deploy_karpenter() { karpenter=$(g3k_config_lookup .global.karpenter_version) fi export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "25+" ]; then + if [ "${clusterversion}" = "28+" ]; then + karpenter=${karpenter:-v0.32.9} + elif [ "${clusterversion}" = "25+" ]; then karpenter=${karpenter:-v0.27.0} elif [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} else - karpenter=${karpenter:-v0.22.0} - fi + karpenter=${karpenter:-v0.32.9} + fi local queue_name="$(gen3 api safe-name karpenter-sqs)" echo '{ "Statement": [ @@ -38,6 +40,7 @@ gen3_deploy_karpenter() { "Action": [ "ssm:GetParameter", "iam:PassRole", + "iam:*InstanceProfile", "ec2:DescribeImages", "ec2:RunInstances", "ec2:DescribeSubnets", @@ -142,6 +145,7 @@ gen3_deploy_karpenter() { sleep 15 aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true gen3_log_info "Installing karpenter using helm" + helm template karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version ${karpenter} --namespace "karpenter" | g3kubectl apply -f - helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ diff --git a/gen3/bin/kube-setup-s3-csi-driver.sh b/gen3/bin/kube-setup-s3-csi-driver.sh new file mode 100644 index 000000000..c93ccf8dd --- /dev/null +++ b/gen3/bin/kube-setup-s3-csi-driver.sh @@ -0,0 +1,202 @@ +#!/bin/bash + +#################################################################################################### +# Script: kube-setup-s3-csi-driver.sh +# +# Description: +# This script sets up the Mountpoint for Amazon S3 CSI driver in an EKS cluster. +# It creates necessary IAM policies and roles. +# +# Usage: +# gen3 kube-setup-s3-csi-driver [bucket_name] +# +#################################################################################################### + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +account_id=$(aws sts get-caller-identity --query "Account" --output text) +vpc_name="$(gen3 api environment)" +namespace="$(gen3 db namespace)" +default_bucket_name_encrypted="gen3-db-backups-encrypted-${account_id}" +bucket_name=${1:-$default_bucket_name_encrypted} + +cluster_arn=$(kubectl config current-context) +eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}') + +gen3_log_info "account_id: $account_id" +gen3_log_info "vpc_name: $vpc_name" +gen3_log_info "namespace: $namespace" +gen3_log_info "bucket_name: $bucket_name" +gen3_log_info "eks_cluster: $eks_cluster" + +# Create policy for Mountpoint for Amazon S3 CSI driver +create_s3_csi_policy() { + policy_name="AmazonS3CSIDriverPolicy-${eks_cluster}" + policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$policy_name'].[Arn]" --output text) + if [ -z "$policy_arn" ]; then + cat < /tmp/s3-csi-policy-$$.json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "MountpointFullBucketAccess", + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${bucket_name}" + ] + }, + { + "Sid": "MountpointFullObjectAccess", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:AbortMultipartUpload", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${bucket_name}/*" + ] + } + ] +} +EOF + policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document file:///tmp/s3-csi-policy-$$.json --query "Policy.Arn" --output text) + rm -f /tmp/s3-csi-policy-$$.json + fi + gen3_log_info "Created or found policy with ARN: $policy_arn" + echo $policy_arn +} + +# Create the trust policy for Mountpoint for Amazon S3 CSI driver +create_s3_csi_trust_policy() { + oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///') + trust_policy_file="/tmp/aws-s3-csi-driver-trust-policy-$$.json" + cat < ${trust_policy_file} +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${account_id}:oidc-provider/${oidc_url}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "${oidc_url}:aud": "sts.amazonaws.com", + "${oidc_url}:sub": "system:serviceaccount:*:s3-csi-*" + } + } + } + ] +} +EOF +} + +# Create the IAM role for Mountpoint for Amazon S3 CSI driver +create_s3_csi_role() { + role_name="AmazonEKS_S3_CSI_DriverRole-${eks_cluster}" + if ! aws iam get-role --role-name $role_name 2>/dev/null; then + aws iam create-role --role-name $role_name --assume-role-policy-document file:///tmp/aws-s3-csi-driver-trust-policy-$$.json + rm -f /tmp/aws-s3-csi-driver-trust-policy-$$.json + fi + gen3_log_info "Created or found role: $role_name" + echo $role_name +} + +# Attach the policies to the IAM role +attach_s3_csi_policies() { + role_name=$1 + policy_arn=$2 + eks_policy_name="eks-s3-csi-policy-${eks_cluster}" + gen3_log_info "Attaching S3 CSI policy with ARN: $policy_arn to role: $role_name" + eks_policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$eks_policy_name'].Arn" --output text) + if [ -z "$eks_policy_arn" ]; then + cat < /tmp/eks-s3-csi-policy-$$.json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${bucket_name}", + "arn:aws:s3:::${bucket_name}/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:Encrypt", + "kms:GenerateDataKey" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "eks:DescribeCluster" + ], + "Resource": "*" + } + ] +} +EOF + eks_policy_arn=$(aws iam create-policy --policy-name "$eks_policy_name" --policy-document file:///tmp/eks-s3-csi-policy-$$.json --query "Policy.Arn" --output text) + rm -f /tmp/eks-s3-csi-policy-$$.json + fi + aws iam attach-role-policy --role-name $role_name --policy-arn $policy_arn + aws iam attach-role-policy --role-name $role_name --policy-arn $eks_policy_arn +} + +# Create or update the CSI driver and its resources +setup_csi_driver() { + create_s3_csi_policy + policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == 'AmazonS3CSIDriverPolicy-${eks_cluster}'].[Arn]" --output text) + create_s3_csi_trust_policy + create_s3_csi_role + role_name="AmazonEKS_S3_CSI_DriverRole-${eks_cluster}" + attach_s3_csi_policies $role_name $policy_arn + + # Install CSI driver + gen3_log_info "eks cluster name: $eks_cluster" + + # Capture the output of the command and prevent it from exiting the script + csi_driver_check=$(aws eks describe-addon --cluster-name $eks_cluster --addon-name aws-mountpoint-s3-csi-driver --query 'addon.addonName' --output text 2>&1 || true) + + if echo "$csi_driver_check" | grep -q "ResourceNotFoundException"; then + gen3_log_info "CSI driver not found, installing..." + aws eks create-addon --cluster-name $eks_cluster --addon-name aws-mountpoint-s3-csi-driver --service-account-role-arn arn:aws:iam::${account_id}:role/AmazonEKS_S3_CSI_DriverRole-${eks_cluster} + csi_status="CREATING" + retries=0 + while [ "$csi_status" != "ACTIVE" ] && [ $retries -lt 12 ]; do + gen3_log_info "Waiting for CSI driver to become active... (attempt $((retries+1)))" + sleep 10 + csi_status=$(aws eks describe-addon --cluster-name $eks_cluster --addon-name aws-mountpoint-s3-csi-driver --query 'addon.status' --output text || echo "CREATING") + retries=$((retries+1)) + done + if [ "$csi_status" == "ACTIVE" ]; then + gen3_log_info "CSI driver successfully installed and active." + else + gen3_log_error "CSI driver installation failed or not active. Current status: $csi_status" + fi + elif echo "$csi_driver_check" | grep -q "aws-mountpoint-s3-csi-driver"; then + gen3_log_info "CSI driver already exists, skipping installation." + else + gen3_log_info "Unexpected error occurred: $csi_driver_check" + exit 1 + fi +} + +setup_csi_driver diff --git a/gen3/bin/prometheus.sh b/gen3/bin/prometheus.sh index 1d71c6a7a..d7290451c 100644 --- a/gen3/bin/prometheus.sh +++ b/gen3/bin/prometheus.sh @@ -4,9 +4,7 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" - -#export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-server.prometheus.svc.cluster.local"}" -export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-operated.monitoring.svc.cluster.local:9090"}" +export GEN3_PROMHOST="${GEN3_PROMHOST:-"https://mimir.planx-pla.net"}" gen3_prom_help() { gen3 help prometheus @@ -16,11 +14,11 @@ function gen3_prom_curl() { local urlBase="$1" shift || return 1 local hostOrKey="${1:-${GEN3_PROMHOST}}" - local urlPath="api/v1/$urlBase" + local urlPath="prometheus/api/v1/$urlBase" if [[ "$hostOrKey" =~ ^http ]]; then gen3_log_info "fetching $hostOrKey/$urlPath" - curl -s -H 'Accept: application/json' "$hostOrKey/$urlPath" + curl -s -H 'Accept: application/json' -H "X-Scope-OrgID: anonymous" "$hostOrKey/$urlPath" else gen3 api curl "$urlPath" "$hostOrKey" fi diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 6dac0ea16..b1b63e243 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -137,8 +137,12 @@ sleep 30 # for serviceName in $(gen3 db services); do if [[ "$serviceName" != "peregrine" ]]; then # sheepdog and peregrine share the same db - # --force will also drop connections to the database to ensure database gets dropped - gen3 db reset "$serviceName" --force + if [[ "$serviceName" != "argo" ]]; then + # --force will also drop connections to the database to ensure database gets dropped + gen3 db reset "$serviceName" --force + else + echo "Skipping the Argo DB reset, as that will delete archived workflows." + fi fi done diff --git a/gen3/bin/s3.sh b/gen3/bin/s3.sh index e89d3ca7b..0625bdf83 100644 --- a/gen3/bin/s3.sh +++ b/gen3/bin/s3.sh @@ -103,7 +103,8 @@ _add_bucket_to_cloudtrail() { # _bucket_exists() { local bucketName=$1 - if [[ -z "$(gen3_aws_run aws s3api head-bucket --bucket $bucketName 2>&1)" ]]; then + gen3_aws_run aws s3api head-bucket --bucket $bucketName > /dev/null 2>&1 + if [[ $? -eq 0 ]]; then echo 0 else echo 1 @@ -173,10 +174,12 @@ gen3_s3_info() { gen3_log_err "Unable to fetch AWS account ID." return 1 fi - if [[ ! -z "$(gen3_aws_run aws s3api head-bucket --bucket $1 2>&1)" ]]; then + + if [[ $(_bucket_exists $bucketName) -ne 0 ]]; then gen3_log_err "Bucket does not exist" return 1 fi + local rootPolicyArn="arn:aws:iam::${AWS_ACCOUNT_ID}:policy" if gen3_aws_run aws iam get-policy --policy-arn ${rootPolicyArn}/${writerName} >/dev/null 2>&1; then writerPolicy="{ \"name\": \"$writerName\", \"policy_arn\": \"${rootPolicyArn}/${writerName}\" } " diff --git a/gen3/bin/shutdown.sh b/gen3/bin/shutdown.sh index 46ca49925..2b5a55e9b 100644 --- a/gen3/bin/shutdown.sh +++ b/gen3/bin/shutdown.sh @@ -30,6 +30,8 @@ gen3_shutdown_namespace() { ( export KUBECTL_NAMESPACE="$namespace" g3kubectl delete --all deployments --now & + # Delete all StatefulSets + g3kubectl delete --all statefulsets --now & # ssjdispatcher leaves jobs laying around when undeployed g3kubectl delete --all "jobs" --now & # ssjdispatcher leaves jobs laying around when undeployed diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json deleted file mode 100644 index b8cdccabe..000000000 --- a/gen3/bin/waf-rules-GPE-312.json +++ /dev/null @@ -1,153 +0,0 @@ -[ - { - "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet", - "Priority": 0, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesAdminProtectionRuleSet", - "RuleActionOverrides": [ - { - "Name": "AdminProtection_URIPATH", - "ActionToUse": { - "Challenge": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesAmazonIpReputationList", - "Priority": 1, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesAmazonIpReputationList", - "RuleActionOverrides": [ - { - "Name": "AWSManagedReconnaissanceList", - "ActionToUse": { - "Count": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList" - } - }, - { - "Name": "AWS-AWSManagedRulesCommonRuleSet", - "Priority": 2, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesCommonRuleSet", - "Version": "Version_1.4", - "RuleActionOverrides": [ - { - "Name": "EC2MetaDataSSRF_BODY", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "GenericLFI_BODY", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "SizeRestrictions_QUERYSTRING", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "SizeRestrictions_BODY", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "CrossSiteScripting_BODY", - "ActionToUse": { - "Count": {} - } - }, - { - "Name": "SizeRestrictions_URIPATH", - "ActionToUse": { - "Allow": {} - } - }, - { - "Name": "SizeRestrictions_Cookie_HEADER", - "ActionToUse": { - "Allow": {} - } - } - ] - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesCommonRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", - "Priority": 3, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesKnownBadInputsRuleSet" - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet" - } - }, - { - "Name": "AWS-AWSManagedRulesLinuxRuleSet", - "Priority": 4, - "Statement": { - "ManagedRuleGroupStatement": { - "VendorName": "AWS", - "Name": "AWSManagedRulesLinuxRuleSet" - } - }, - "OverrideAction": { - "None": {} - }, - "VisibilityConfig": { - "SampledRequestsEnabled": true, - "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesLinuxRuleSet" - } - } -] \ No newline at end of file diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh index e7b951d1c..f614cf662 100644 --- a/gen3/bin/workon.sh +++ b/gen3/bin/workon.sh @@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then } EOM ) - gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" --create-bucket-configuration ‘{“LocationConstraint”:“‘$(aws configure get $GEN3_PROFILE.region)‘“}’ + gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" $([[ $(aws configure get $GEN3_PROFILE.region) = "us-east-1" ]] && echo "" || echo --create-bucket-configuration LocationConstraint="$(aws configure get $GEN3_PROFILE.region)") sleep 5 # Avoid race conditions if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then touch "$bucketCheckFlag" diff --git a/gen3/lib/g3k_manifest.sh b/gen3/lib/g3k_manifest.sh index ae42e84ba..d69ef5b99 100644 --- a/gen3/lib/g3k_manifest.sh +++ b/gen3/lib/g3k_manifest.sh @@ -253,8 +253,11 @@ g3k_manifest_filter() { kvList+=("$kvLabelKey" "tags.datadoghq.com/version: '$version'") done environment="$(g3k_config_lookup ".global.environment" "$manifestPath")" + hostname="$(g3k_config_lookup ".global.hostname" "$manifestPath")" kvEnvKey=$(echo "GEN3_ENV_LABEL" | tr '[:lower:]' '[:upper:]') + kvHostKey=$(echo "GEN3_HOSTNAME_LABEL" | tr '[:lower:]' '[:upper:]') kvList+=("$kvEnvKey" "tags.datadoghq.com/env: $environment") + kvList+=("$kvHostKey" "hostname: $hostname") for key in $(g3k_config_lookup '. | keys[]' "$manifestPath"); do gen3_log_debug "harvesting key $key" for key2 in $(g3k_config_lookup ".[\"${key}\"] "' | to_entries | map(select((.value|type != "array") and (.value|type != "object"))) | map(.key)[]' "$manifestPath" | grep '^[a-zA-Z]'); do diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index adc35ad2f..80538842e 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -243,7 +243,7 @@ spec: cpu: 0.4 memory: 1200Mi limits: - cpu: 1.0 + cpu: 2.0 memory: 2400Mi command: ["/bin/bash"] args: diff --git a/gen3/test/bootstrapTest.sh b/gen3/test/bootstrapTest.sh index be3241f31..d07512d8b 100644 --- a/gen3/test/bootstrapTest.sh +++ b/gen3/test/bootstrapTest.sh @@ -12,7 +12,7 @@ test_bootstrap_fenceconfig() { because $? "secret template exists and is valid yaml: $secretConf" [[ -f "$publicConf" ]] && yq -r . < "$secretConf" > /dev/null; because $? "public template exists and is valid yaml: $secretConf" - python3.8 "$GEN3_HOME/apis_configs/yaml_merge.py" "$publicConf" "$secretConf" | yq -r . > /dev/null; + python3.9 "$GEN3_HOME/apis_configs/yaml_merge.py" "$publicConf" "$secretConf" | yq -r . > /dev/null; because $? "yaml_perge public private should yield valid yaml" } diff --git a/gen3/test/fenceStuffTest.sh b/gen3/test/fenceStuffTest.sh index 09a0eb125..df250a1ad 100644 --- a/gen3/test/fenceStuffTest.sh +++ b/gen3/test/fenceStuffTest.sh @@ -17,7 +17,7 @@ EOM C: 4 B: 3 EOM - json3="$(python3.8 "$GEN3_HOME/apis_configs/yaml_merge.py" "$yaml1" "$yaml2")"; because $? "yaml_merge should succeed" + json3="$(python3.9 "$GEN3_HOME/apis_configs/yaml_merge.py" "$yaml1" "$yaml2")"; because $? "yaml_merge should succeed" [[ "1" == "$(jq -r .A <<<"$json3")" ]]; because $? ".A should be 1" /bin/rm "$yaml1" /bin/rm "$yaml2" diff --git a/kube/services/ambassador/ambassador-deploy.yaml b/kube/services/ambassador/ambassador-deploy.yaml index 8788cef13..28e6a41fd 100644 --- a/kube/services/ambassador/ambassador-deploy.yaml +++ b/kube/services/ambassador/ambassador-deploy.yaml @@ -24,6 +24,7 @@ spec: netnolimit: "yes" userhelper: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/arborist/arborist-deploy.yaml b/kube/services/arborist/arborist-deploy.yaml index 5deef6ac7..360c5c04a 100644 --- a/kube/services/arborist/arborist-deploy.yaml +++ b/kube/services/arborist/arborist-deploy.yaml @@ -24,6 +24,7 @@ spec: # for revproxy authz public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index ae1c16653..c084533fe 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -4,83 +4,44 @@ metadata: name: karpenter-templates namespace: argo-events data: - provisioner.yaml: | - apiVersion: karpenter.sh/v1alpha5 - kind: Provisioner + nodeclass.yaml: | + apiVersion: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass metadata: name: workflow-WORKFLOW_NAME spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["on-demand"] - - key: kubernetes.io/arch - operator: In - values: - - amd64 - - key: node.kubernetes.io/instance-type - operator: In - values: - - c6a.large - - c6a.xlarge - - c6a.2xlarge - - c6a.4xlarge - - c6a.8xlarge - - c6a.12xlarge - - c6i.large - - c6i.xlarge - - c6i.2xlarge - - c6i.4xlarge - - c6i.8xlarge - - c6i.12xlarge - - m6a.2xlarge - - m6a.4xlarge - - m6a.8xlarge - - m6a.12xlarge - - m6a.16xlarge - - m6i.2xlarge - - m6i.4xlarge - - m6i.8xlarge - - m6i.12xlarge - - m6i.16xlarge - taints: - - key: role - value: WORKFLOW_NAME - effect: NoSchedule - labels: - role: WORKFLOW_NAME - limits: - resources: - cpu: 2000 - providerRef: - name: workflow-WORKFLOW_NAME - # Kill nodes after 30 days to ensure they stay up to date - ttlSecondsUntilExpired: 2592000 - ttlSecondsAfterEmpty: 10 - - nodetemplate.yaml: | - apiVersion: karpenter.k8s.aws/v1alpha1 - kind: AWSNodeTemplate - metadata: - name: workflow-WORKFLOW_NAME - spec: - subnetSelector: - karpenter.sh/discovery: ENVIRONMENT - securityGroupSelector: - karpenter.sh/discovery: ENVIRONMENT-workflow - tags: - Environment: ENVIRONMENT - Name: eks-ENVIRONMENT-workflow-karpenter - karpenter.sh/discovery: ENVIRONMENT - workflowname: WORKFLOW_NAME - gen3username: GEN3_USERNAME - gen3service: argo-workflows - purpose: workflow + amiFamily: AL2 + amiSelectorTerms: + - name: 1-31-EKS-FIPS* + owner: "143731057154" + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + deleteOnTermination: true + encrypted: true + volumeSize: 100Gi + volumeType: gp2 metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled httpPutResponseHopLimit: 2 httpTokens: optional + role: eks_ENVIRONMENT_workers_role + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: ENVIRONMENT-workflow + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: ENVIRONMENT + tags: + Environment: ENVIRONMENT + Name: eks-ENVIRONMENT-workflow-karpenter + gen3service: argo-workflows + gen3username: GEN3_USERNAME + gen3teamproject: "GEN3_TEAMNAME" + karpenter.sh/discovery: ENVIRONMENT + purpose: workflow + workflowname: WORKFLOW_NAME userData: | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="BOUNDARY" @@ -96,27 +57,99 @@ data: sysctl -w fs.inotify.max_user_watches=12000 - sudo yum update -y - sudo yum install -y dracut-fips openssl >> /opt/fips-install.log - sudo dracut -f - # configure grub - sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - - --BOUNDARY - Content-Type: text/cloud-config; charset="us-ascii" - - power_state: - delay: now - mode: reboot - message: Powering off - timeout: 2 - condition: true - --BOUNDARY-- - blockDeviceMappings: - - deviceName: /dev/xvda - ebs: - volumeSize: 100Gi - volumeType: gp2 - encrypted: true - deleteOnTermination: true + + nodepool.yaml: | + apiVersion: karpenter.sh/v1beta1 + kind: NodePool + metadata: + name: workflow-WORKFLOW_NAME + spec: + disruption: + consolidateAfter: 10s + consolidationPolicy: WhenEmpty + expireAfter: 48h0m0s + limits: + cpu: 4k + template: + metadata: + labels: + purpose: workflow + role: WORKFLOW_NAME + spec: + nodeClassRef: + name: workflow-WORKFLOW_NAME + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: node.kubernetes.io/instance-type + operator: In + values: + - c6a.large + - c6a.xlarge + - c6a.2xlarge + - c6a.4xlarge + - c6a.8xlarge + - c6a.12xlarge + - c7a.large + - c7a.xlarge + - c7a.2xlarge + - c7a.4xlarge + - c7a.8xlarge + - c7a.12xlarge + - c6i.large + - c6i.xlarge + - c6i.2xlarge + - c6i.4xlarge + - c6i.8xlarge + - c6i.12xlarge + - c7i.large + - c7i.xlarge + - c7i.2xlarge + - c7i.4xlarge + - c7i.8xlarge + - c7i.12xlarge + - m6a.2xlarge + - m6a.4xlarge + - m6a.8xlarge + - m6a.12xlarge + - m6a.16xlarge + - m6a.24xlarge + - m7a.2xlarge + - m7a.4xlarge + - m7a.8xlarge + - m7a.12xlarge + - m7a.16xlarge + - m7a.24xlarge + - m6i.2xlarge + - m6i.4xlarge + - m6i.8xlarge + - m6i.12xlarge + - m6i.16xlarge + - m6i.24xlarge + - m7i.2xlarge + - m7i.4xlarge + - m7i.8xlarge + - m7i.12xlarge + - m7i.16xlarge + - m7i.24xlarge + - r7iz.2xlarge + - r7iz.4xlarge + - r7iz.8xlarge + - r7iz.12xlarge + - r7iz.16xlarge + - r7iz.24xlarge + - key: kubernetes.io/os + operator: In + values: + - linux + taints: + - effect: NoSchedule + key: role + value: WORKFLOW_NAME diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml index 293c0e119..e483d3297 100644 --- a/kube/services/argo-events/workflows/sensor-completed.yaml +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -51,12 +51,12 @@ spec: args: - "-c" - | - if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + if kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete nodepool workflow-$WORKFLOW_NAME fi - if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete ec2nodeclass workflow-$WORKFLOW_NAME fi env: - name: WORKFLOW_NAME diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 4221f5742..05da3bc38 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -36,6 +36,10 @@ spec: dependencyName: workflow-created-event dataKey: body.metadata.labels.gen3username dest: spec.template.spec.containers.0.env.1.value + - src: + dependencyName: workflow-created-event + dataKey: body.metadata.labels.gen3teamproject + dest: spec.template.spec.containers.0.env.2.value source: resource: apiVersion: batch/v1 @@ -59,26 +63,42 @@ spec: args: - "-c" - | - if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + #!/bin/bash + + if [ -z "$NODEPOOL_TEMPLATE" ]; then + NODEPOOL_TEMPLATE="/manifests/nodepool.yaml" + fi + + if [ -z "$NODECLASS_TEMPLATE" ]; then + NODECLASS_TEMPLATE="/manifests/nodeclass.yaml" + fi + + if ! kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/GEN3_TEAMNAME/$GEN3_TEAMNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODECLASS_TEMPLATE" | kubectl apply -f - fi - if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + if ! kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/GEN3_TEAMNAME/$GEN3_TEAMNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODEPOOL_TEMPLATE" | kubectl apply -f - fi env: - name: WORKFLOW_NAME value: "" - name: GEN3_USERNAME value: "" + - name: GEN3_TEAMNAME + value: "" - name: ENVIRONMENT valueFrom: configMapKeyRef: name: environment key: environment + - name: NODEPOOL_TEMPLATE + value: /manifests/nodepool.yaml + - name: NODECLASS_TEMPLATE + value: /manifests/nodeclass.yaml volumeMounts: - name: karpenter-templates-volume - mountPath: /home/manifests + mountPath: /manifests volumes: - name: karpenter-templates-volume configMap: diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml index c235a820a..0b12cb118 100644 --- a/kube/services/argo-events/workflows/sensor-deleted.yaml +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -47,12 +47,12 @@ spec: args: - "-c" - | - if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + if kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete nodepool workflow-$WORKFLOW_NAME fi - if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete ec2nodeclass workflow-$WORKFLOW_NAME fi env: - name: WORKFLOW_NAME diff --git a/kube/services/argo-pod-pending-monitor/application.yaml b/kube/services/argo-pod-pending-monitor/application.yaml new file mode 100644 index 000000000..9bfc1a7e6 --- /dev/null +++ b/kube/services/argo-pod-pending-monitor/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: argo-pod-pending-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/argo-pod-pending-monitor + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/argo-pod-pending-monitor/argo-pod-pending.yaml b/kube/services/argo-pod-pending-monitor/argo-pod-pending.yaml new file mode 100644 index 000000000..d3d75a84e --- /dev/null +++ b/kube/services/argo-pod-pending-monitor/argo-pod-pending.yaml @@ -0,0 +1,42 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: argo-pod-pending-monitor + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is the label we want to monitor, probably will never need to change + - name: NODE_LABEL + value: purpose=workflow + # This is in minutes + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_alarm_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + pending_pods=$(kubectl get pods -n argo -o json | jq -r '.items[] | select(.status.phase == "Pending") | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp} | select(((now - (.creationTimestamp | fromdateiso8601)) / 60) > 15) | .name') + if [[ ! -z $pending_pods ]]; then + echo "Pods $pending_pods has been around too long, sending an alert" + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Pods \`${pending_pods}\` are older than 15 minutes!\"}" $SLACK_WEBHOOK_URL + else + echo "All good here!" + fi + restartPolicy: OnFailure diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 89ec29ecc..3b9d1b6a2 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -24,6 +24,7 @@ spec: GEN3_ENV_LABEL GEN3_ARGO-WRAPPER_VERSION GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 23dda4a5a..c1e951773 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,6 +1,5 @@ controller: - parallelism: 10 - namespaceParallelism: 5 + parallelism: 7 metricsConfig: # -- Enables prometheus metrics server enabled: true @@ -62,6 +61,20 @@ controller: workflowDefaults: spec: archiveLogs: true + onExit: alert-on-timeout + templates: + - name: alert-on-timeout + script: + image: quay.io/cdis/amazonlinux-debug:master + command: [sh] + envFrom: + - secretRef: + name: slack-webhook-secret + source: | + failure_reason=$(echo {{workflow.failures}} | jq 'any(.[]; .message == "Step exceeded its deadline")' ) + if [ "$failure_reason" = "true" ]; then + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"ALERT: Workflow {{workflow.name}} has been killed due to timeout\"}" "$SLACK_WEBHOOK_URL" + fi # -- [Node selector] nodeSelector: @@ -92,18 +105,13 @@ server: # -- Influences the creation of the ConfigMap for the workflow-controller itself. useDefaultArtifactRepo: true +# -- Use static credentials for S3 (eg. when not using AWS IRSA) +useStaticCredentials: false artifactRepository: # -- Archive the main container logs as an artifact archiveLogs: true # -- Store artifact in a S3-compliant object store s3: - # Note the `key` attribute is not the actual secret, it's the PATH to - # the contents in the associated secret, as defined by the `name` attribute. - accessKeySecret: - name: argo-s3-creds - key: AccessKeyId - secretKeySecret: - name: argo-s3-creds - key: SecretAccessKey bucket: GEN3_ARGO_BUCKET endpoint: s3.amazonaws.com + useSDKCreds: true diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index 935cab408..b0cf5c661 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -27,6 +27,7 @@ spec: # for network policy netnolimit: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: serviceAccountName: audit-service-sa affinity: @@ -93,6 +94,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: requests: cpu: 100m @@ -108,6 +114,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: limits: cpu: 0.8 @@ -116,4 +127,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy-irsa.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy-irsa.yaml new file mode 100644 index 000000000..9cda339cc --- /dev/null +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy-irsa.yaml @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aws-es-proxy-deployment + annotations: + gen3.io/network-ingress: "arranger,arranger-server,arranger-dashboard,guppy,metadata,spark,tube" +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: esproxy + revisionHistoryLimit: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: esproxy + netvpc: "yes" + GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL + spec: + serviceAccountName: esproxy-sa + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + automountServiceAccountToken: false + priorityClassName: aws-es-proxy-high-priority + containers: + - name: esproxy + GEN3_AWS-ES-PROXY_IMAGE|-image: quay.io/cdis/aws-es-proxy:v1.3.1-| + imagePullPolicy: Always + ports: + - containerPort: 9200 + env: + - name: "ES_ENDPOINT" + GEN3_ES_ENDPOINT|-value: es.internal.io-| + command: ["/bin/sh"] + # NOTE- NEED TO RUN `gen3 kube-setup-aws-es-proxy` TO POPULATE ES_ENDPOINT - ugh! + # NOTE- `gen3 roll aws-es-proxy` WILL NOT WORK! + args: + - "-c" + - | + if [ -f /aws-es-proxy ]; + then + # 1.3 needs this PR: https://github.com/uc-cdis/aws-es-proxy/pull/2 + # aws-es-proxy 1.0+ is prone to throw ES timeout error from client + # customize timeout value to compensate this, note the -timeout option only works for 1.2+ + BINARY="/aws-es-proxy -timeout 180" + elif [ -f /usr/local/bin/aws-es-proxy ]; + then + # 0.9 + BINARY=/usr/local/bin/aws-es-proxy + elif [ -f /go/src/github.com/abutaha/aws-es-proxy/aws-es-proxy ]; + then + # 0.8 + BINARY=/go/src/github.com/abutaha/aws-es-proxy/aws-es-proxy + fi + ${BINARY} -endpoint "https://$ES_ENDPOINT" -verbose -listen ":9200" + resources: + requests: + cpu: 250m + memory: 256Mi + limits: + cpu: 1000m + memory: 2Gi \ No newline at end of file diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index ad74fc25b..c7f72b4d8 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -21,6 +21,7 @@ spec: app: esproxy netvpc: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: nodeAffinity: @@ -44,6 +45,7 @@ spec: - name: credentials secret: secretName: "aws-es-proxy" + priorityClassName: aws-es-proxy-high-priority containers: - name: esproxy GEN3_AWS-ES-PROXY_IMAGE|-image: quay.io/cdis/aws-es-proxy:0.8-| diff --git a/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml new file mode 100644 index 000000000..6bd619a22 --- /dev/null +++ b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: aws-es-proxy-high-priority +value: 1000000 +globalDefault: false +description: "Priority class for aws-es-proxy service" diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index fa6b741a2..740e18c91 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -97,6 +97,36 @@ spec: secretKeyRef: name: cedar-g3auto key: "cedar_api_key.txt" + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_PROFILING_ENABLED + value: "true" + - name: DD_TRACE_SAMPLE_RATE + value: "1" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - name: "ca-volume" readOnly: true diff --git a/kube/services/cluster-level-resources/app.yaml b/kube/services/cluster-level-resources/app.yaml new file mode 100644 index 000000000..95a2ed4c4 --- /dev/null +++ b/kube/services/cluster-level-resources/app.yaml @@ -0,0 +1,21 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cluster-level-resources + namespace: argocd +spec: + project: default + destination: + namespace: argocd + server: https://kubernetes.default.svc + source: + repoURL: https://github.com/uc-cdis/gen3-gitops.git + targetRevision: TARGET_REVISION + path: cluster-level-resources + helm: + valueFiles: + - ../CLUSTER_NAME/cluster-values/cluster-values.yaml + releaseName: cluster-level-resources + syncPolicy: + automated: + selfHeal: true diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 43bd90e5d..58040e6d4 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -16,6 +16,7 @@ spec: release: production public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: nodeAffinity: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index 9df6fbc93..7cd9b6bbe 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -16,6 +16,7 @@ spec: release: production public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: nodeAffinity: diff --git a/kube/services/dicom-viewer/dicom-viewer-service.yaml b/kube/services/dicom-viewer/dicom-viewer-service.yaml index ea2576584..26f3a21b0 100644 --- a/kube/services/dicom-viewer/dicom-viewer-service.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-service.yaml @@ -12,4 +12,4 @@ spec: nodePort: null name: http type: ClusterIP - \ No newline at end of file + diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 1722676e0..cf03036df 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -17,6 +17,9 @@ spec: maxUnavailable: 0 template: metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics labels: app: fence release: production @@ -28,6 +31,7 @@ spec: userhelper: "yes" tags.datadoghq.com/service: "fence" GEN3_ENV_LABEL + GEN3_HOSTNAME_LABEL GEN3_FENCE_VERSION GEN3_DATE_LABEL spec: @@ -243,7 +247,7 @@ spec: cpu: 0.4 memory: 1200Mi limits: - cpu: 1.0 + cpu: 2.0 memory: 2400Mi command: ["/bin/bash"] args: diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml index f0da277dc..7acc9f745 100644 --- a/kube/services/frontend-framework/frontend-framework-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml @@ -18,6 +18,7 @@ spec: app: frontend-framework public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml index 8cad981c8..15ca3d6a1 100644 --- a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml @@ -18,6 +18,7 @@ spec: app: frontend-framework public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index c3e8d121c..1dc6c7da0 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -23,6 +23,7 @@ spec: GEN3_GUPPY_VERSION GEN3_ENV_LABEL GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index f7de81d79..80e64a582 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -24,6 +24,7 @@ spec: GEN3_HATCHERY_VERSION GEN3_ENV_LABEL GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index 239079058..af60e9b4a 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -27,6 +27,7 @@ spec: GEN3_ENV_LABEL GEN3_INDEXD_VERSION GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index 954e996f2..596c726a0 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -135,19 +135,6 @@ spec: subPath: "ca.pem" - name: dockersock mountPath: "/var/run/docker.sock" - - name: selenium - image: selenium/standalone-chrome:3.14 - ports: - - containerPort: 4444 - readinessProbe: - httpGet: - path: /wd/hub/sessions - port: 4444 - readinessProbe: - httpGet: - path: /wd/hub/sessions - port: 4444 - imagePullPolicy: Always volumes: - name: datadir persistentVolumeClaim: diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index 08365f811..c54464b00 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -131,19 +131,6 @@ spec: subPath: "ca.pem" - name: dockersock mountPath: "/var/run/docker.sock" - - name: selenium - image: selenium/standalone-chrome:3.14 - ports: - - containerPort: 4444 - readinessProbe: - httpGet: - path: /wd/hub/sessions - port: 4444 - readinessProbe: - httpGet: - path: /wd/hub/sessions - port: 4444 - imagePullPolicy: Always volumes: - name: datadir persistentVolumeClaim: diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml index 29603d27f..a72623736 100644 --- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: arborist-rm-expired-access diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml index 733c17cf7..01e71bade 100644 --- a/kube/services/jobs/covid19-bayes-cronjob.yaml +++ b/kube/services/jobs/covid19-bayes-cronjob.yaml @@ -1,5 +1,5 @@ # gen3 job run covid19-bayes-cronjob S3_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: covid19-bayes diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index 463fbfb2e..3c3828dac 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: etl @@ -95,8 +95,10 @@ spec: subPath: user.yaml resources: limits: - cpu: 1 + cpu: 2 memory: 10Gi + requests: + cpu: 2 command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index 6b9b887ec..266b0410c 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -91,8 +91,10 @@ spec: subPath: user.yaml resources: limits: - cpu: 1 + cpu: 2 memory: 10Gi + requests: + cpu: 2 command: ["/bin/bash" ] args: - "-c" diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index 6c58ef291..eba842ddf 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-visa-update diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml index 1668429ad..9bdc8bd09 100644 --- a/kube/services/jobs/gdcdb-create-job.yaml +++ b/kube/services/jobs/gdcdb-create-job.yaml @@ -51,7 +51,7 @@ spec: - "-c" # Script always succeeds if it runs (echo exits with 0) - | - eval $(python 2> /dev/null < /dev/null && python || poetry run python) < /dev/null; then + echo datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P XXXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password XXXXX --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password "${db_creds[db_password]}" --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + + else + echo poetry run datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P XXXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + poetry run datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo poetry run python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password XXXXX --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + poetry run python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password "${db_creds[db_password]}" --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + fi echo "Exit code: $?" restartPolicy: Never diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml index ce485cce3..2b9e4e49a 100644 --- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-access diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml index eb102f5bf..b40e22624 100644 --- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-service-account diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml index 499d6cabd..6b4fc10aa 100644 --- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml +++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-init-proxy-groups diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml index 4e796cea0..fd8bba606 100644 --- a/kube/services/jobs/google-manage-account-access-cronjob.yaml +++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-account-access diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml index ea0bcc45f..eff76d30a 100644 --- a/kube/services/jobs/google-manage-keys-cronjob.yaml +++ b/kube/services/jobs/google-manage-keys-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-keys diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml index 57981d813..49e83374f 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-verify-bucket-access-group diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml index 77d249e37..4f54752c9 100644 --- a/kube/services/jobs/hatchery-reaper-job.yaml +++ b/kube/services/jobs/hatchery-reaper-job.yaml @@ -110,7 +110,7 @@ spec: done # legacy reaper code - gen3_log_info "Running legacy reaper job (based on local cluster/ prometheus)" + gen3_log_info "Running legacy reaper job (based on Mimir)" if appList="$(gen3 jupyter idle none "$(gen3 db namespace)" kill)" && [[ -n "$appList" && -n "$slackWebHook" && "$slackWebHook" != "None" ]]; then curl -X POST --data-urlencode "payload={\"text\": \"hatchery-reaper in $gen3Hostname: \n\`\`\`\n${appList}\n\`\`\`\"}" "${slackWebHook}" fi diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml index d79274bb7..1ca71fc8d 100644 --- a/kube/services/jobs/healthcheck-cronjob.yaml +++ b/kube/services/jobs/healthcheck-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: healthcheck diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml index 8b041740e..473159f35 100644 --- a/kube/services/jobs/indexd-authz-job.yaml +++ b/kube/services/jobs/indexd-authz-job.yaml @@ -39,6 +39,9 @@ spec: - name: config-helper configMap: name: config-helper + - name: fence-yaml + configMap: + name: fence containers: - name: indexd GEN3_INDEXD_IMAGE @@ -70,12 +73,15 @@ spec: readOnly: true mountPath: "/var/www/indexd/config_helper.py" subPath: config_helper.py + - name: "fence-yaml" + mountPath: "/var/www/indexd/user.yaml" + subPath: user.yaml imagePullPolicy: Always command: ["/bin/bash"] args: - "-c" - | - flags="--path /var/www/indexd/ --arborist-url http://arborist-service" + flags="--path /var/www/indexd/ --arborist-url http://arborist-service --user-yaml-path /var/www/indexd/user.yaml" if [[ "$USE_SHEEPDOG" == "true" ]]; then db_uri=$(python - <<- EOF from base64 import b64decode diff --git a/kube/services/jobs/indexd-single-table-migration-job.yaml b/kube/services/jobs/indexd-single-table-migration-job.yaml new file mode 100644 index 000000000..e1e6a81a3 --- /dev/null +++ b/kube/services/jobs/indexd-single-table-migration-job.yaml @@ -0,0 +1,83 @@ +# Setup for running this migration https://github.com/uc-cdis/indexd/blob/master/docs/migration_to_single_table_indexd.md +apiVersion: batch/v1 +kind: Job +metadata: + name: indexd-single-table-migration +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + automountServiceAccountToken: false + volumes: + - name: config-volume + secret: + secretName: "indexd-secret" # pragma: allowlist secret + - name: "indexd-creds-volume" + secret: + secretName: "indexd-creds" # pragma: allowlist secret + - name: config-helper + configMap: + name: config-helper + - name: creds-volume-new + secret: + secretName: "indexd-new-creds" # pragma: allowlist secret + containers: + - name: indexd + GEN3_INDEXD_IMAGE + env: + - name: START_DID + GEN3_START_DID|-value: ""-| + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/indexd/local_settings.py" + subPath: "local_settings.py" + - name: "indexd-creds-volume" + readOnly: true + mountPath: "/var/www/indexd/creds.json" + subPath: creds.json + - name: "config-helper" + readOnly: true + mountPath: "/var/www/indexd/config_helper.py" + subPath: config_helper.py + - name: "creds-volume-new" + readOnly: true + mountPath: "/var/www/indexd/creds_new.json" + subPath: creds.json + resources: + requests: + cpu: 1000m + memory: 1Gi + imagePullPolicy: Always + command: ["/bin/bash"] + args: + - "-c" + - | + flags="--creds-path /var/www/indexd/creds_new.json" + if [[ -n "$START_DID" ]]; then + flags="$flags --start-did $START_DID" + fi + time python /indexd/bin/migrate_to_single_table.py $flags + echo "Exit code: $?" + restartPolicy: Never + + diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml index 676307481..228a1989d 100644 --- a/kube/services/jobs/indexd-userdb-job.yaml +++ b/kube/services/jobs/indexd-userdb-job.yaml @@ -74,7 +74,7 @@ spec: # Script always succeeds if it runs (echo exits with 0) # indexd image does not include jq, so use python - | - eval $(python 2> /dev/null < /dev/null || poetry run python 2> /dev/null) < /dev/null || poetry run python /indexd/bin/index_admin.py create --username "$user" --password "${user_db[$user]}") done echo "Exit code: $?" restartPolicy: Never diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index 8ef33532f..7f4043753 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -74,6 +74,12 @@ spec: name: manifest-metadata key: AGG_MDS_NAMESPACE optional: true + - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD + valueFrom: + configMapKeyRef: + name: manifest-metadata + key: AGG_MDS_DEFAULT_DATA_DICT_FIELD + optional: true imagePullPolicy: Always command: ["/bin/sh"] args: diff --git a/kube/services/jobs/psql-db-aurora-migration-job.yaml b/kube/services/jobs/psql-db-aurora-migration-job.yaml new file mode 100644 index 000000000..ca81c37e8 --- /dev/null +++ b/kube/services/jobs/psql-db-aurora-migration-job.yaml @@ -0,0 +1,219 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-aurora-migration +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: psql-db-copy-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: [ "/bin/bash" ] + args: + - "-c" + - | + # This job migrates (takes backup and restores) the databases in a Gen3 instance to an Aurora RDS cluster. + # Requirements: + # 1. Aurora server credentials should be present in the Gen3Secrets/creds.json with name 'aurora'. + # 2. Ensure that `gen3 psql aurora` and `gen3 secrets decode aurora-creds` work as expected. + # 3. The job needs the "psql-db-copy-sa" service account with the necessary permissions to read secrets from all relevant namespaces. + + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + default_databases=($(echo -e "$(gen3 db services)" | sort -r)) + date_str=$(date -u +%y%m%d_%H%M%S) + databases=("${default_databases[@]}") + gen3_log_info "databases: ${databases[@]}" + + # Initialize sheepdog_db_name and failed_migrations variables + sheepdog_db_name="" + failed_migrations="" + + # find Aurora Server credentials + aurora_host_name=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_host') + aurora_master_username=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_username') + aurora_master_password=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_password') + aurora_master_database=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_database') + + gen3_log_info "Aurora Creds: \n aurora_host_name: $aurora_host_name \n aurora_master_username: $aurora_master_username \n aurora_master_database: $aurora_master_database" + + # Verify important variables are present + if [ -z "$aurora_host_name" ] || [ -z "$aurora_master_username" ] || [ -z "$aurora_master_password" ] || [ -z "$aurora_master_database" ]; then + gen3_log_err "Aurora credentials are missing. Exiting." + exit 1 + fi + + new_resources="" + + # Function to truncate to 63 characters + function truncate_identifier() { + local identifier=$1 + if [ ${#identifier} -gt 63 ]; then + echo "${identifier:0:63}" + else + echo "$identifier" + fi + } + + # Function to create a database with retry logic + function create_database_with_retry() { + local db_name=$1 + local retries=5 + local wait_time=10 + for i in $(seq 1 $retries); do + PGPASSWORD=${db_password} psql -h $aurora_host_name -U "$db_user" -d postgres -c "CREATE DATABASE $db_name" + if [ $? -eq 0 ]; then + return 0 + fi + gen3_log_err "Failed to create database $db_name. Retrying in $wait_time seconds..." + sleep $wait_time + done + return 1 + } + + # Looping through each service to: + # - Extract the database credentials. + # - Check if the user already exists, if not, create the user. + # - Grant required privileges. + # - Create the database (except for peregrine). + # - Backup and restore the database on the Aurora Cluster. + for database in "${databases[@]}"; do + for secret_name in "${database}-creds creds.json" "$database-g3auto dbcreds.json"; do + creds=$(gen3 secrets decode $secret_name 2>/dev/null) + if [ $? -eq 0 ] && [ ! -z "$creds" ]; then + db_hostname=$(echo $creds | jq -r .db_host) + db_username=$(echo $creds | jq -r .db_username) + db_password=$(echo $creds | jq -r .db_password) + db_database=$(echo $creds | jq -r .db_database) + gen3_log_info "Extracting service credentials for $database from $secret_name: \n db_hostname: $db_hostname \n db_username: $db_username \n db_database: $db_database \n" + break + fi + done + + if [ -z "$db_hostname" ] || [ -z "$db_username" ] || [ -z "$db_password" ] || [ -z "$db_database" ]; then + gen3_log_err "Failed to extract database credentials for $database" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Failed to extract credentials" + continue + fi + + # Check source database accessibility + PGPASSWORD=${db_password} pg_isready -h $db_hostname -U "$db_username" -d "$db_database" + if [ $? -ne 0 ]; then + gen3_log_err "Cannot connect to source database $db_database at $db_hostname. Skipping database $database." + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Cannot connect to source database at $db_hostname" + continue + fi + + # Define db_user and db_name variables with replaced hyphens + db_user="$(echo $database | tr '-' '_')_user_$(echo $namespace | tr '-' '_')" + db_name="$(echo $database | tr '-' '_')_$(echo $namespace | tr '-' '_')_${date_str}" + + # Truncate identifiers if necessary + db_user=$(truncate_identifier $db_user) + db_name=$(truncate_identifier $db_name) + + # Try to connect to the Aurora database with the extracted credentials. + # If the connection is successful, it means the user already exists. + # If not, create the user. + + PGPASSWORD=${db_password} psql -h $aurora_host_name -U "$db_user" -d postgres -c "\q" + if [ $? -eq 0 ]; then + gen3_log_info "User $db_user, password already exists" + else + gen3 psql aurora -c "SET password_encryption = 'scram-sha-256';CREATE USER \"$db_user\" WITH PASSWORD '$db_password' CREATEDB" + if [ $? -ne 0 ]; then + gen3_log_err "Failed to create user for $database" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Failed to create user" + continue + else + gen3_log_info "Database user $db_user created successfully" + fi + fi + + if [ "$database" != "peregrine" ]; then + # Create the database with a unique name by appending namespace and date. + create_database_with_retry $db_name + if [ $? -ne 0 ]; then + gen3_log_err "Failed to create database for $database" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Failed to create database" + continue + else + gen3_log_info "Database $db_name created successfully" + if [ "$database" == "sheepdog" ]; then + sheepdog_db_name=$db_name + fi + fi + + # Backup the current database and restore it to the newly created database. + if gen3 db backup $database | PGPASSWORD=${db_password} psql -h $aurora_host_name -U "$db_user" -d "$db_name"; then + gen3_log_info "Database $database restored successfully to $db_name" + new_resources="${new_resources}\nSource_Database: $db_database Source_Host: $db_hostname Source_User: $db_username Restored_Database: $db_name User: $db_user" + else + gen3_log_err "Failed to backup and restore database for $database" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Failed to backup and restore database" + fi + fi + + if [ "$database" == "peregrine" ]; then + if [ -n "$sheepdog_db_name" ]; then + gen3 psql aurora -d "$sheepdog_db_name" -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \"$db_user\"" + if [ $? -ne 0 ]; then + gen3_log_err "Failed to grant access to sheepdog tables for peregrine user" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Failed to grant access to sheepdog tables for peregrine user" + continue + else + gen3_log_info "Access to sheepdog tables granted successfully for peregrine user" + new_resources="${new_resources}\nUser: $db_user with access to sheepdog database $sheepdog_db_name" + fi + else + gen3_log_err "Sheepdog database not found for granting permissions to peregrine user" + failed_migrations="${failed_migrations}\nDatabase: $database, Error: Sheepdog database not found for granting permissions" + fi + fi + done + + # Logging the newly created resources + gen3_log_info "New resources created on $aurora_host_name\n$new_resources" + + # Logging the failed migrations + if [ -n "$failed_migrations" ]; then + gen3_log_info "Failed migrations:\n$failed_migrations" + fi + + # Sleep for 600 seconds to allow the user to check the logs + sleep 600 + restartPolicy: Never diff --git a/kube/services/jobs/psql-db-backup-encrypt-job.yaml b/kube/services/jobs/psql-db-backup-encrypt-job.yaml new file mode 100644 index 000000000..f0d1a9587 --- /dev/null +++ b/kube/services/jobs/psql-db-backup-encrypt-job.yaml @@ -0,0 +1,256 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-backup-encrypt +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbencrypt-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_alarm_webhook + optional: true + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + - name: SKIP_DBS + valueFrom: + configMapKeyRef: + name: dbbackup-exclude-list + key: skip_dbs + optional: true + command: [ "/bin/bash" ] + args: + - "-c" + - | + #!/bin/bash + + # This script takes backup of Gen3 Service databases, encrypts it, and moves it to an encrypted S3 bucket. + # Requirements: + # 1. PGP public key must be available as a variable in the script. + # 2. The job needs the necessary permissions to read secrets, config maps from the target namespace. + # 3. Databases to skip can be provided using a ConfigMap called `dbbackup-exclude-list` containing a comma-separated list of databases under the key `skip_dbs`. + # Example command to create the ConfigMap: + # kubectl create configmap dbbackup-exclude-list --from-literal=skip_dbs="arborist-bak,wts-bak" + + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + + # Fetch necessary information + namespace=$(gen3 api namespace) + environment=$(gen3 api environment) + hostname=$(gen3 api hostname) + + # Fetch and filter databases + databases=$(gen3 db services | grep -v -E "$(echo "${SKIP_DBS}" | sed 's/, */|/g' )") + + + # Log skipped databases + for db in ${SKIP_DBS//,/ }; do + gen3_log_info "Skipping backup for database: $db" + done + + date_str=$(date -u +%y%m%d_%H%M%S) + gen3_log_info "Databases to be backed up: ${databases}" + + # Define backup directory structure + BACKUP_DIR="/home/ubuntu/backup" + HOST_DIR="${BACKUP_DIR}/${hostname}" + ENV_NS_DIR="${HOST_DIR}/${environment}-${namespace}" + DATE_DIR="${ENV_NS_DIR}/${date_str}" + mkdir -p "${DATE_DIR}" + # PGP public key + PUBLIC_KEY="-----BEGIN PGP PUBLIC KEY BLOCK----- + + mQINBGar5esBEADFHTpT8IzB5Vn77Ied9O1MlsEkn+k/Qbn1giEZia+BiGSGfJqD + ebJn3B/6NeUqyfq55ADw9oNNXw+LcTZrRtZeOv8kq+mfdJ64e1Qnv9i0l6e+LXbq + An3wUvQy+djtTIpQDIdtk0UyYQcNsxkdaqjbYzbNr33mbEjD4JfsOt7qkFJRLG26 + Mc8GEJxYfK0PYX8P54LuU+jc2bq/O9VK13YJ7WYYhrRBsoAej2aRr+3KELrACCeE + RZ8G0XPBhAI96FE6dcohoVo1+m3mXTR6BBtqAIslc0tWyqk5S5YPrGsYeogOl+yq + HyVildf25/ZLFHEnfnyOYAx5ghKRisKRx8bJ2esbSVSryvnpeOMtA57Wba3y+cFn + 5W5YG+MqLG+tqWFIRMs+zLeYnZtP/F2Qdc+5CgT0rEtPI8OpilaB+GiPlRjgDM4m + mbv1XABJvho7uWco1yASrBDsaDQKgkWpVnyIETZOP+FWpK7LJvUz9l/aoSMUK9iQ + Ko1SggewM4fCyBeoSso7aZ75xQK+XkRyFeyd2DqotT/e2ZgIt//TzQ9LF61SVq+Q + hYKJsTxFedAK6Q1C5sHzzG+fFbOTrQ71vgOtKh7eT8quM9sAsCXw4YMGS2v0mSyB + kiJllrz6I54pKiXs2iXYQZLs6hDNDHH0/uEjOVGsG9y/vAdVuRr39VbVGQARAQAB + tCtQbGF0Zm9ybSBFbmdpbmVlcmluZyA8cGVAY3Rkcy51Y2hpY2Fnby5lZHU+iQJO + BBMBCgA4FiEEkqaslDgj+ReG0CykPBvbSP+i50gFAmar5esCGy8FCwkIBwIGFQoJ + CAsCBBYCAwECHgECF4AACgkQPBvbSP+i50gm7xAAwCvhBeEESHUbwd27r8YyOY1r + ZEELagJSaCMUA5W7C780y2RTxVHJ7XmVVEwMCCXnZ0u7G+GZH3k6jHuIRrYwPGgY + ehjAwmLU3uRTQDnbGtrWpLlgFZtqHSQO09sstiuuYYEniIGTt3/yGDMPsuqgguPN + pCT4IARoke3RdHAu5LQRZKaN1U0a/8qWIckCCOWLY8vkzjo/5IKoJQhel4eN3Zwn + 4qokIbDU9En+9De//rniIPMJFn26mQc9NIBW0uy6J2tNG/y3wJba3MNWL+WdCznE + yaFsTTGVzfdyCI3+HWq+fjCnrTQeYcsfPTbifpyaVdb6+FDj1yhY+hlJzLmDCMgP + OT3MD9NyWgBxuB2XqPOjo5RtA8uh3avNljRYC9k0bvuawNpGSZu7LKd2w79jAknm + Vh6TJ4+WHWx9vAozrwQ+O+8RmD0CPojsj4OQHb9lVTDd++6D7pq9o8yrBaZNCv9l + /gXk+f/3D19v0iYTlJF4OlGJyiTRfpJ27lq5Z0AuSm0SO/sc5O2tOV4fYDKUHnn9 + G+kw9+ZAdRpNS4x3W6j3sC3/Y5kKhD2fpyycHUfm2n0j2mGmXN1kQ28NU0mhJttB + OZazdgeijPXqN7+DM64iwKz9fSamc09FK7JTDgb64oAA0Py29bT9WLAMdYTNhFrE + agGOzCqb4TEjHoDIa9u5Ag0EZqvl6wEQAN1eAl7+ttfCd3NicrzwUHczsCyRyqde + HCjWRPhQ5rQ8kAmvt/b1SD/cTZM8nhLGOUBNEq9cK9ZuOS71AYvxKG30yYz0VakX + VDcHO0iAxSXqSKK1jrr9x4qqU1jw7Phy80WsVv7yA/vAsOug5ikqwAFVIEkSAltu + wk48xLqSeP0+g9fJGzEi78p+itjkhz9n+vgQEZN8OuI6yShlA2mB5Sq9ekvs4mHC + BvAFNBhTgK2szl6GUOBmoREnqf7hId7IhmhirzZxdI3v7yMrGMB0aH3++JdNHA7x + SeYN8B41RAH61kwz7JEoh4sVdfppYF7xx94numfX4YTftQGYvLIgbW4WzoE3BKAl + LSV3+1mERp06QM5zdH8zBwGRiM/ob/x+g2htyqYMG+6M1ZjMgrrNjsP5Zy80k//F + LBok3inKLNalM28WwtYdoXNnsYTOo3+UzIjtl1hfZoYgbn6LuiL0Oewga7QrOZ/P + UCZOwPdL2TgKDOqt7usdHso5i4139BOu6quBBp7ouqFSKFbWoOdffik/g0f+5UPw + +nEBN0JfpN6ACA1P6p/GzHkfYcOflumFjkpFFhB4PvHxpdBSH7T90ec+a/9XGImL + EIoeKMpCl3+yayd9u8JzLCZVYo2rgTnp/DoqoGPzv5W7DR709sAtSbxcuA4Klbzu + t9Xc9DKc6in/ABEBAAGJBGwEGAEKACAWIQSSpqyUOCP5F4bQLKQ8G9tI/6LnSAUC + Zqvl6wIbLgJACRA8G9tI/6LnSMF0IAQZAQoAHRYhBEubwQz2su3GAKUEIgZh6MFg + Klj0BQJmq+XrAAoJEAZh6MFgKlj0iHoP/0vEZkRVCkNnWQPUuq9c1tqc+9dX9VJh + Mx6EYh8tcuAbilEZpAsWT0qasM6Y87BO56X/suG+2agZfLkz9o60uBPlcHkCuttW + vrAmXaVSXWv8EEvDaaGTymSM0cEDEd+4hIaFobbeOm6FQfdp57yAI4QGDmK1bzkE + fG6bejHkI0DubR4kumHXlMiDgSLeOdUh0IbsDWl8+3wcpocNtIy8Q2V+hCuRW5l1 + Ag4I6P2qadpPlbbV4mxQzOCfn/Y2vHmpXL7FJBaCTgiYgT+uyFj91b9tbYcsVFW5 + 2vuXWpVFrDNhMzRS8Fa3GXoM3SQn9cKMDgSp9X0lyDrj8DnGjG/0o+zHB4VnC3jz + Lms56al3t0lBuG9unz0e3sFCwvwUeYIjnqU1ViosZvz3u7TrpsMdsuKHISs7ck2j + rLNbi97/vdRjTARpQCNAN5V6YIjvx53OWSMJscGvGpOGlM9GbSy1a4eZ2vKbNelN + TQDWWao6nfInvbewG2OhZrx9MzajJvF1YD85O6LpDkIFCyZLb3rjKUWtEduQrJMe + ztj/hHhl+702EXWPxHFaYySfatcAutrB+n9Z7l96gzLqt8trrsopEYNLH9rmNesL + DrDwRjN5C0W3hGIhq03kR0tq/hQeZfhvREKDzGCITi1wef41ZUSG7dkGWT7n+WCw + 1IQ6DzzALDAyzH4QAKrQ4NCM+39sV+NPi+eyAIJ94P+cerhMPZh0LEdzHlX+DSUv + UoRAHuIml2VBe9cnwgD0tHXdxvjg3XLDwufvCfOu06jEmnEHpsokd/3qYj5dJ3Nd + Q4HvLQVKCnEvtM5uOoUZYxkGxobhH8ah18eC5/YmA95V3fiYF/Jg96I//Zbq/BZY + lTO5NjQzutNrrnEsr5BDbHDbURLZ58iixWLtYIVI04FRuu2UDZa9bNvjEQuwZos3 + nzHxmJeluo91HbW+FdRFByehrAOfUhkb04xJKEBXjhOqdUeSezIGhp88pb+yhV+w + WNSsxK+uOJ9Pr1Sjz3/pr9nopVFF1kqY8iE3GYgiYpu3p2A1zGUxlaoHQCZ/aT08 + whGzEsGkgQGOGX3pseKaYIVbxMNbfRGsJCKjdukQbuy5Gz/ffAm8vvf7JfPWmVUO + G+zU9L9ZIHZKlQ76PQTA1mEWa3akU6vVScDbNUiObCNZPQJdj6V6HpVAlo/sOXOt + 1RaIB2Oz5ViwAOJFYxO/PomcXiMOThnkF7op8R2I4cVoYlKnxK0VUoluNX9fiH5D + aI9PgmA2NVbQ/LqP+rP3hLbFSlh0nXjt4NxCbE14ApSslsoEaqilNgtL9UcIzkBE + 3lNYclZLeQk5SLPsohmsXoYJ6W8G1XopvZ/cG417GQ4N7FOr9VRBXimRX71O + =/4uP + -----END PGP PUBLIC KEY BLOCK-----" + + # Import the public key + echo "$PUBLIC_KEY" | gpg --import + + # Validate Slack webhook + if [[ -n "${slackWebHook}" && "${slackWebHook}" == https* ]]; then + slack=true + else + echo "WARNING: slackWebHook is not set or invalid; not sending alerts to Slack" + slack=false + fi + + # Function to encrypt a database with PGP public key + function encrypt_database() { + local db_name=$1 + gpg --yes --trust-model always --output "${DATE_DIR}/${db_name}.sql.gpg" --encrypt --recipient pe@ctds.uchicago.edu "${DATE_DIR}/${db_name}.sql" + + if [ $? -eq 0 ]; then + rm "${DATE_DIR}/${db_name}.sql" + gen3_log_info "Successfully encrypted and removed the original file for database $db_name. \n" + return 0 + fi + gen3_log_err "Failed to encrypt database $db_name.\n" + return 1 + } + + # Loop through each service to back up and encrypt the database + for database in $databases; do + for secret_name in "${database}-creds creds.json" "$database-g3auto dbcreds.json"; do + creds=$(gen3 secrets decode $secret_name 2>/dev/null) + # Extracting service credentials + if [ $? -eq 0 ] && [ ! -z "$creds" ]; then + db_hostname=$(echo $creds | jq -r .db_host) + db_username=$(echo $creds | jq -r .db_username) + db_password=$(echo $creds | jq -r .db_password) + db_database=$(echo $creds | jq -r .db_database) + gen3_log_info "Extracting service credentials for $database from $secret_name:\n db_hostname: $db_hostname\n db_username: $db_username\n db_database: $db_database\n" + break + fi + done + + # Verify credentials are extracted + if [ -z "$db_hostname" ] || [ -z "$db_username" ] || [ -z "$db_password" ] || [ -z "$db_database" ]; then + gen3_log_err "Failed to extract database credentials for $database" + failed_backups="${failed_backups}\nDatabase: $database, Error: Failed to extract credentials" + continue + fi + + # Check database accessibility + PGPASSWORD=${db_password} pg_isready -h $db_hostname -U "$db_username" -d "$db_database" + if [ $? -ne 0 ]; then + gen3_log_err "Cannot connect to source database $db_database at $db_hostname. Skipping database $database." + failed_backups="${failed_backups}\nDatabase: $database, Error: Cannot connect to source database at $db_hostname" + continue + fi + + if [ "$database" != "peregrine" ]; then + # Backup the current database + if PGPASSWORD=${db_password} pg_dump -h $db_hostname -U "$db_username" -d "$db_database" > "${DATE_DIR}/${db_database}.sql"; then + gen3_log_info "Database $database backed up to ${DATE_DIR}/${db_database}.sql" + if encrypt_database "$db_database"; then + backedup_databases="${backedup_databases}\nDatabase: $db_database" + else + failed_backups="${failed_backups}\nDatabase: $database, Error: Failed to encrypt database" + fi + else + gen3_log_err "Failed to backup $database" + failed_backups="${failed_backups}\nDatabase: $database, Error: Failed to backup database" + fi + fi + done + + # Logging the successful backups + if [ -n "$backedup_databases" ]; then + gen3_log_info "Successfully backed up and encrypted databases:\n$backedup_databases" + fi + + # Logging the failed backups + if [ -n "$failed_backups" ]; then + gen3_log_info "Failed backups:\n$failed_backups" + if [ "$slack" = true ]; then + curl -X POST --data-urlencode "payload={\"text\": \"*Backup failed* for psql-db-backup-encrypt on ${hostname} Cluster: ${environment} Namespace: ${namespace} at $(date).\nFailed backups: ${failed_backups}\"}" $slackWebHook + fi + fi + + # Sleep for 600 seconds to allow the user to check the logs + sleep 600 + volumeMounts: + - mountPath: "/home/ubuntu/backup" + name: s3-volume + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + volumes: + - name: s3-volume + persistentVolumeClaim: + claimName: s3-pvc-db-backups + restartPolicy: Never diff --git a/kube/services/jobs/psql-db-copy-aurora-job.yaml b/kube/services/jobs/psql-db-copy-aurora-job.yaml new file mode 100644 index 000000000..a29274146 --- /dev/null +++ b/kube/services/jobs/psql-db-copy-aurora-job.yaml @@ -0,0 +1,193 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-copy-aurora +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: psql-db-copy-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + - name: SOURCE_NAMESPACE + GEN3_SOURCE_NAMESPACE|-value: "staging"-| # Default value, should be overwritten by the environment variable + command: [ "/bin/bash" ] + args: + - "-c" + - | + # This script copies specified databases from a source namespace to the current namespace on the same Aurora RDS instance. + # + # This script requires the following to work properly: + # + # 1. Aurora server credentials must be present in the Gen3Secrets/creds.json file. + # These credentials should be present as a Kubernetes secret named "aurora-creds". + # This secret should contain the keys: db_host, db_username, db_password, and db_database. + # + # 2. The "gen3 psql aurora" command should be available to connect to the Aurora server. + # + # 3. The "gen3 secrets decode aurora-creds creds.json" command should work, allowing the script to decode the necessary secrets. + # + # 4. The source and the destination databases should be on the same Aurora instance. + # + # 5. The ServiceAccount, roles, and role binding must be set up using the script psql-db-copy-aurora-sa.yaml. + # The psql-db-copy-aurora-sa.yaml script is configured for the default namespace. + # Modify the namespace as needed before applying it where the script will run. + # These can be created by executing the command: + # kubectl apply -f ${GEN3_HOME}/kube/services/jobs/psql-db-copy-aurora-sa.yaml + # + # How to run the script: + # gen3 job run psql-db-copy-aurora -v SOURCE_NAMESPACE + # + + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + date_str=$(date -u +%y%m%d_%H%M%S) + # Define the default databases to be copied + databases=( "indexd" "sheepdog" "metadata") + gen3_log_info "databases to be processed: ${databases[@]}" + source_namespace=$SOURCE_NAMESPACE + gen3_log_info "Source Namespace: $source_namespace" + + # find Aurora Server credentials + aurora_host_name=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_host') + aurora_master_username=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_username') + aurora_master_password=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_password') + aurora_database=$(gen3 secrets decode aurora-creds creds.json | jq -r '.db_database') + + # Verify important variables are present + if [ -z "$aurora_host_name" ] || [ -z "$aurora_master_username" ] || [ -z "$aurora_master_password" ] || [ -z "$aurora_database" ]; then + gen3_log_err "Aurora credentials are missing. Exiting." + exit 1 + fi + + # Function to truncate to 63 characters + function truncate_identifier() { + local identifier=$1 + if [ ${#identifier} -gt 63 ]; then + echo "${identifier:0:63}" + else + echo "$identifier" + fi + } + + # Function to decode Kubernetes secrets + function secrets_decode() { + local namespace=$1 + local secret=$2 + local key=$3 + local secrets_value + + secrets_value=$(kubectl get secret -n $namespace $secret -o json 2>/dev/null | jq -r --arg key "$key" '.data[$key]' | base64 --decode --ignore-garbage 2>/dev/null) + if [ $? -ne 0 ] || [ -z "$secrets_value" ]; then + echo "Secret $secret in namespace $namespace not found or failed to decode" >&2 + return 1 + else + echo "$secrets_value" + fi + } + + # Array to hold the names of newly created databases + new_databases=() + + # Looping through each database + for database in "${databases[@]}"; do + source_creds="" + creds="" + + # Try to get the source and destination credentials with the "-g3auto" suffix and key "dbcreds.json" + source_creds=$(secrets_decode $source_namespace ${database}-g3auto dbcreds.json) + if [ $? -ne 0 ]; then + source_creds="" + fi + creds=$(secrets_decode $namespace ${database}-g3auto dbcreds.json) + if [ $? -ne 0 ]; then + creds="" + fi + + # If the "-g3auto" suffix didn't work for both source_creds and creds, try with the suffix "creds" and key "creds.json" + if [ -z "$source_creds" ] && [ -z "$creds" ]; then + source_creds=$(secrets_decode $source_namespace ${database}-creds creds.json) + if [ $? -ne 0 ]; then + source_creds="" + fi + creds=$(secrets_decode $namespace ${database}-creds creds.json) + if [ $? -ne 0 ]; then + creds="" + fi + fi + + # If we still couldn't get the credentials, log an error and continue to the next database + if [ -z "$source_creds" ] || [ -z "$creds" ]; then + gen3_log_err "Failed to extract database credentials for $database" + continue + fi + + source_db_database=$(echo $source_creds | jq -r .db_database) + db_username=$(echo $creds | jq -r .db_username) + db_database=$(echo $creds | jq -r .db_database) + + if [ -z "$source_db_database" ] || [ -z "$db_username" ] || [ -z "$db_database" ]; then + gen3_log_err "One or more required credentials are missing for $database. Skipping." + continue + fi + target_db=$(truncate_identifier $(echo "${database}_${namespace}_${date_str}" | tr '-' '_')) + gen3_log_info "Processing database: $database" + gen3_log_info "Source DB: $source_db_database, Username: $db_username, Current DB: $db_database, Target DB: $target_db" + + # DB commands + gen3 psql aurora -c "GRANT \"$db_username\" TO \"$aurora_master_username\"" + gen3 psql aurora -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$source_db_database' AND pid <> pg_backend_pid()" + gen3 psql aurora -c "CREATE DATABASE \"$target_db\" WITH TEMPLATE \"$source_db_database\" OWNER \"$db_username\"" + pg_command="DO \$\$ DECLARE tbl record; BEGIN FOR tbl IN (SELECT table_schema || '.' || table_name AS full_table_name FROM information_schema.tables WHERE table_schema = 'public') LOOP EXECUTE 'ALTER TABLE ' || tbl.full_table_name || ' OWNER TO \"$db_username\";'; END LOOP; END \$\$;" + PGPASSWORD=${aurora_master_password} psql -h $aurora_host_name -U "$aurora_master_username" -d "$target_db" -c "$pg_command" + if [ $? -eq 0 ]; then + gen3_log_info "Successfully processed $database" + new_databases+=("$target_db") + else + gen3_log_err "Failed to process $database" + fi + done + + gen3_log_info "Job Completed" + + # Print the list of newly created databases + gen3_log_info "Newly created Database Names::" + for new_db in "${new_databases[@]}"; do + gen3_log_info "$new_db" + done + + sleep 600 + restartPolicy: Never diff --git a/kube/services/jobs/psql-db-copy-aurora-sa.yaml b/kube/services/jobs/psql-db-copy-aurora-sa.yaml new file mode 100644 index 000000000..e6977a187 --- /dev/null +++ b/kube/services/jobs/psql-db-copy-aurora-sa.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: psql-db-copy-sa + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: psql-db-copy-role +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: psql-db-copy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psql-db-copy-role +subjects: +- kind: ServiceAccount + name: psql-db-copy-sa + namespace: default # Ensure this references the correct namespace + diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index f05ab518a..69d66ec3f 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -5,7 +5,7 @@ #####REQUIRED VARIABLE######## #SOURCE_BUCKET #TARGET_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: s3sync diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml index 4f82e9d43..3c6d58768 100644 --- a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -24,28 +24,26 @@ spec: - name: karpenter-templates-volume mountPath: /manifests env: - - name: PROVISIONER_TEMPLATE - value: /manifests/provisioner.yaml - - name: AWSNODETEMPLATE_TEMPLATE - value: /manifests/nodetemplate.yaml + - name: NODEPOOL_TEMPLATE + value: /manifests/nodepool.yaml + - name: NODECLASS_TEMPLATE + value: /manifests/nodeclass.yaml command: ["/bin/bash"] args: - "-c" - | #!/bin/bash - if [ -z "$PROVISIONER_TEMPLATE" ]; then - PROVISIONER_TEMPLATE="provisioner.yaml" + if [ -z "$NODEPOOL_TEMPLATE" ]; then + NODEPOOL_TEMPLATE="/manifests/provisioner.yaml" fi - if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then - AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + if [ -z "$NODECLASS_TEMPLATE" ]; then + NODECLASS_TEMPLATE="/manifests/nodeclass.yaml" fi ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") - RAW_WORKFLOWS=$(kubectl get workflows -n argo -o yaml) - - WORKFLOWS=$(echo "${RAW_WORKFLOWS}" | yq -r '.items[] | [.metadata.name, .metadata.labels.gen3username] | join(" ")') + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{" "}{.metadata.labels.gen3teamproject}{"\n"}') WORKFLOW_ARRAY=() @@ -53,20 +51,25 @@ spec: WORKFLOW_ARRAY+=("$line") done <<< "$WORKFLOWS" + echo $WORKFLOWS + for workflow in "${WORKFLOW_ARRAY[@]}" do workflow_name=$(echo "$workflow" | awk '{print $1}') workflow_user=$(echo "$workflow" | awk '{print $2}') + workflow_team=$(echo "$workflow" | awk '{print $3}') - if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then - echo "No awsnodetemplate found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - - fi + if [ ! -z "$workflow_name" ]; then + if ! kubectl get ec2nodeclass workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_TEAMNAME/$workflow_team/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODECLASS_TEMPLATE" | kubectl apply -f - + fi - if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then - echo "No provisioner found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + if ! kubectl get nodepool workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_TEAMNAME/$workflow_team/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODEPOOL_TEMPLATE" | kubectl apply -f - + fi fi done restartPolicy: OnFailure diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 6ba8b3a0f..fbb783135 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -3,6 +3,9 @@ kind: AWSNodeTemplate metadata: name: default spec: + amiSelector: + aws::name: EKS-FIPS* + aws::owners: "143731057154" subnetSelector: karpenter.sh/discovery: VPC_NAME securityGroupSelector: @@ -32,30 +35,12 @@ spec: sysctl -w fs.inotify.max_user_watches=12000 - sudo yum update -y - sudo yum install -y dracut-fips openssl >> /opt/fips-install.log - sudo dracut -f - # configure grub - sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - # --BOUNDARY # Content-Type: text/cloud-config; charset="us-ascii" # mounts: # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] - --BOUNDARY - - Content-Type: text/cloud-config; charset="us-ascii" - - power_state: - delay: now - mode: reboot - message: Powering off - timeout: 2 - condition: true - - --BOUNDARY-- blockDeviceMappings: - deviceName: /dev/xvda diff --git a/kube/services/karpenter/provisionerDefault.yaml b/kube/services/karpenter/provisionerDefault.yaml index ac08284ce..f92a5e383 100644 --- a/kube/services/karpenter/provisionerDefault.yaml +++ b/kube/services/karpenter/provisionerDefault.yaml @@ -11,14 +11,14 @@ spec: - key: kubernetes.io/arch operator: In values: - - amd64 + - amd64 - key: karpenter.k8s.aws/instance-category operator: In values: - - c - - m - - r - - t + - c + - m + - r + - t # Set a limit of 1000 vcpus limits: resources: @@ -30,6 +30,4 @@ spec: consolidation: enabled: true # Kill nodes after 30 days to ensure they stay up to date - ttlSecondsUntilExpired: 2592000 - - + ttlSecondsUntilExpired: 604800 diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml index 0966f2480..3551cfa66 100644 --- a/kube/services/manifestservice/manifestservice-deploy.yaml +++ b/kube/services/manifestservice/manifestservice-deploy.yaml @@ -24,6 +24,7 @@ spec: userhelper: "yes" netvpc: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: serviceAccountName: manifestservice-sa affinity: diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index 9bb6ac9c5..71ab7b484 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -25,6 +25,7 @@ spec: # for network policy netnolimit: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -91,6 +92,12 @@ spec: name: manifest-metadata key: AGG_MDS_NAMESPACE optional: true + - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD + valueFrom: + configMapKeyRef: + name: manifest-metadata + key: AGG_MDS_DEFAULT_DATA_DICT_FIELD + optional: true imagePullPolicy: Always livenessProbe: httpGet: @@ -110,6 +117,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env - name: config-volume readOnly: true mountPath: /aggregate_config.json @@ -133,6 +145,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env resources: limits: cpu: 0.8 @@ -141,4 +158,6 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head + diff --git a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml index 7ad51caca..93c2de3c3 100644 --- a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-sowerjob spec: - spec: podSelector: matchLabels: app: sowerjob diff --git a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml index 7b1f85c29..bd6e03f05 100644 --- a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-ssjdispatcherjob spec: - spec: podSelector: matchLabels: app: ssjdispatcherjob diff --git a/kube/services/node-affinity-daemonset/README.md b/kube/services/node-affinity-daemonset/README.md new file mode 100644 index 000000000..3de3bae06 --- /dev/null +++ b/kube/services/node-affinity-daemonset/README.md @@ -0,0 +1,5 @@ +# Prerequisites + +This service needs certmanager to work. Please install certmanager before deploying this service. Once certmanager is installed, you can deploy this service by applying the manifests in this directory. + +Code lives in https://github.com/uc-cdis/node-affinity-webhook/ diff --git a/kube/services/node-affinity-daemonset/deployment.yaml b/kube/services/node-affinity-daemonset/deployment.yaml new file mode 100644 index 000000000..027dd690c --- /dev/null +++ b/kube/services/node-affinity-daemonset/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: node-affinity-daemonset + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: node-affinity-daemonset + template: + metadata: + labels: + app: node-affinity-daemonset + spec: + containers: + - name: node-affinity-daemonset + image: quay.io/cdis/node-affinity-daemonset:master + ports: + - containerPort: 8443 + volumeMounts: + - name: webhook-certs + mountPath: /etc/webhook/certs + readOnly: true + volumes: + - name: webhook-certs + secret: + secretName: webhook-certs #pragma: allowlist secret diff --git a/kube/services/node-affinity-daemonset/service.yaml b/kube/services/node-affinity-daemonset/service.yaml new file mode 100644 index 000000000..022ca443f --- /dev/null +++ b/kube/services/node-affinity-daemonset/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: node-affinity-daemonset + namespace: kube-system +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: node-affinity-daemonset diff --git a/kube/services/node-affinity-daemonset/webhook.yaml b/kube/services/node-affinity-daemonset/webhook.yaml new file mode 100644 index 000000000..b1c92dbb9 --- /dev/null +++ b/kube/services/node-affinity-daemonset/webhook.yaml @@ -0,0 +1,43 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: node-affinity-daemonset + cert-manager.io/inject-ca-from: kube-system/node-affinity-daemonset-cert +webhooks: + - name: node-affinity-daemonset.k8s.io + clientConfig: + service: + name: node-affinity-daemonset + namespace: kube-system + path: "/mutate" + rules: + - operations: ["CREATE"] + apiGroups: ["apps"] + apiVersions: ["v1"] + resources: ["daemonsets"] + admissionReviewVersions: ["v1"] + sideEffects: None + +--- + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: node-affinity-daemonset-cert + namespace: kube-system +spec: + secretName: webhook-certs #pragma: allowlist secret + dnsNames: + - node-affinity-daemonset.kube-system.svc + issuerRef: + name: selfsigned + +--- + +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned + namespace: kube-system +spec: + selfSigned: {} diff --git a/kube/services/node-monitors/argo-monitors/argo-node-age.yaml b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml index 890495ee0..7a60a32ce 100644 --- a/kube/services/node-monitors/argo-monitors/argo-node-age.yaml +++ b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml @@ -5,8 +5,11 @@ metadata: namespace: default spec: schedule: "*/5 * * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 jobTemplate: spec: + backoffLimit: 4 template: metadata: labels: @@ -27,7 +30,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: slack_webhook + key: slack_alarm_webhook command: ["/bin/bash"] args: @@ -55,4 +58,4 @@ spec: curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODE_NAME}\` is older than 3 hours!\"}" $SLACK_WEBHOOK_URL fi done - restartPolicy: OnFailure \ No newline at end of file + restartPolicy: OnFailure diff --git a/kube/services/node-monitors/fenceshib-jenkins-test.yaml b/kube/services/node-monitors/fenceshib-jenkins-test.yaml new file mode 100644 index 000000000..deaf26b3e --- /dev/null +++ b/kube/services/node-monitors/fenceshib-jenkins-test.yaml @@ -0,0 +1,43 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fenceshib-service-check + namespace: default +spec: + schedule: "0 */4 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + backoffLimit: 4 + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + + fenceshib=$(kubectl get services -A | grep "fenceshib-service" | awk '{print $2}') + + # Check if there are any fenceshib services + if [[ ! -z "$fenceshib" ]]; then + echo "Alert: Service fenceshib-service found with output: $fenceshib" + curl -X POST -H 'Content-type: application/json' --data "{\"text\": \"WARNING: Fenceshib service discovered in qaplanetv1 cluster. This could cause issues with future CI runs. Please delete this service if it is not needed. Run the following in qaplanetv1 to see which namespace it is in: \`kubectl get services -A | grep "fenceshib-service"\`\"}" $SLACK_WEBHOOK_URL + else + echo "Fenceshib Service Not Found" + fi + restartPolicy: OnFailure diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml index 500832fc3..709dfc79e 100644 --- a/kube/services/node-monitors/node-not-ready.yaml +++ b/kube/services/node-monitors/node-not-ready.yaml @@ -5,8 +5,11 @@ metadata: namespace: default spec: schedule: "*/30 * * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 jobTemplate: spec: + backoffLimit: 4 template: metadata: labels: @@ -21,7 +24,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: slack_webhook + key: slack_alarm_webhook - name: ENVIRONMENT valueFrom: configMapKeyRef: diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml index e2d712533..7c686df91 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml @@ -17,9 +17,19 @@ data: "ajax": false, "icon": "fa fa-openid" }]; + // This tells Atlas to show the 'lock' icon to the user when + // viewing concept sets, cohort definitions, and other + // artifacts so that they can edit READ and WRITE + // permissions. By default, this is set to true + configLocal.enablePermissionManagement = true; // Default is true + // If the enablePermissionManagement is set to true, you can specify + // if this capability should be limited to only users that have a + // specific permission for this. Leave false if all users should have that ability. + configLocal.limitedPermissionManagement = true; // Default is false all users can share. If true, only users with the permission "artifact:global:share:put" get the ability to share artifacts. configLocal.cohortComparisonResultsEnabled = false; configLocal.userAuthenticationEnabled = true; configLocal.plpResultsEnabled = false; + configLocal.refreshTokenThreshold = 1000 * 60 * 4; // refresh auth token if it will expire within 4 minutes return configLocal; }); diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index 8eb01ec08..a5d0972eb 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -22,7 +22,7 @@ stringData: security_cors_enabled: "true" security_origin: "*" - security_token_expiration: "43200" + security_token_expiration: "900" security_ssl_enabled: "false" security_provider: AtlasRegularSecurity diff --git a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml index fc45434ca..e2df93cd0 100644 --- a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml +++ b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml @@ -86,7 +86,7 @@ spec: periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 80 + - containerPort: 8080 volumeMounts: - name: config-volume-g3auto readOnly: true diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index 20bba64ad..6467fe325 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -29,6 +29,7 @@ spec: GEN3_ENV_LABEL GEN3_PEREGRINE_VERSION GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 742f1b71c..20347a3be 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -19,6 +19,7 @@ spec: app: portal public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -182,20 +183,6 @@ spec: name: global key: mapbox_token optional: true - - name: DATADOG_APPLICATION_ID - # Optional application ID for Datadog - valueFrom: - secretKeyRef: - name: portal-datadog-config - key: datadog_application_id - optional: true - - name: DATADOG_CLIENT_TOKEN - # Optional client token for Datadog - valueFrom: - secretKeyRef: - name: portal-datadog-config - key: datadog_client_token - optional: true - name: DATA_UPLOAD_BUCKET # S3 bucket name for data upload, for setting up CSP GEN3_DATA_UPLOAD_BUCKET|-value: ""-| diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index f639a1e15..b65d58982 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -19,6 +19,7 @@ spec: app: portal public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -182,20 +183,6 @@ spec: name: global key: mapbox_token optional: true - - name: DATADOG_APPLICATION_ID - # Optional application ID for Datadog - valueFrom: - secretKeyRef: - name: portal-datadog-config - key: datadog_application_id - optional: true - - name: DATADOG_CLIENT_TOKEN - # Optional client token for Datadog - valueFrom: - secretKeyRef: - name: portal-datadog-config - key: datadog_client_token - optional: true - name: DATA_UPLOAD_BUCKET # S3 bucket name for data upload, for setting up CSP GEN3_DATA_UPLOAD_BUCKET|-value: ""-| diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 45e6daaea..375f424ed 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -30,6 +30,10 @@ spec: GEN3_ENV_LABEL GEN3_FENCE_VERSION GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics spec: serviceAccountName: fence-sa affinity: diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index fb5ce173f..954cb847c 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -25,6 +25,7 @@ spec: # for network policy netnolimit: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -90,6 +91,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: requests: cpu: 100m @@ -105,6 +111,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: limits: cpu: 0.8 @@ -113,4 +124,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/revproxy/gen3.nginx.conf/ohdsi-atlas-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohdsi-atlas-service.conf deleted file mode 100644 index f482e4824..000000000 --- a/kube/services/revproxy/gen3.nginx.conf/ohdsi-atlas-service.conf +++ /dev/null @@ -1,12 +0,0 @@ - location /ohdsi-atlas/ { - if ($csrf_check !~ ^ok-\S.+$) { - return 403 "failed csrf check"; - } - - set $proxy_service "ohdsi-atlas"; - # upstream is written to logs - set $upstream http://ohdsi-atlas-service.$namespace.svc.cluster.local; - rewrite ^/ohdsi-atlas/(.*) /$1 break; - proxy_pass $upstream; - client_max_body_size 0; - } diff --git a/kube/services/revproxy/gen3.nginx.conf/ohdsi-webapi-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohdsi-webapi-service.conf deleted file mode 100644 index cd0d41f0a..000000000 --- a/kube/services/revproxy/gen3.nginx.conf/ohdsi-webapi-service.conf +++ /dev/null @@ -1,12 +0,0 @@ - location /ohdsi-webapi/ { - if ($csrf_check !~ ^ok-\S.+$) { - return 403 "failed csrf check"; - } - - set $proxy_service "ohdsi-webapi"; - # upstream is written to logs - set $upstream http://ohdsi-webapi-service.$namespace.svc.cluster.local; - rewrite ^/ohdsi-webapi/(.*) /$1 break; - proxy_pass $upstream; - client_max_body_size 0; - } diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf index ed736189c..78286eeab 100644 --- a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf @@ -20,7 +20,7 @@ location /orthanc/ { client_max_body_size 0; } -location /orthanc/dicom-web/studies/ { +location /orthanc/dicom-web/studies { set $authz_method "read"; set $authz_resource "/services/orthanc/studies"; set $authz_service "orthanc"; diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 9f10ce90b..80fd582e0 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -23,6 +23,7 @@ spec: userhelper: "yes" internet: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -196,11 +197,12 @@ spec: mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" resources: - requests: - cpu: 100m - memory: 100Mi + requests: + cpu: 0.5 + memory: 1024Mi limits: - memory: 800Mi + cpu: 1.0 + memory: 2048Mi command: ["/bin/sh" ] args: - "-c" diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index a260c8741..2f476d0f0 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -27,6 +27,7 @@ spec: GEN3_ENV_LABEL GEN3_SHEEPDOG_VERSION GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/sower/sower-deploy.yaml b/kube/services/sower/sower-deploy.yaml index b66739d06..2c4e5f610 100644 --- a/kube/services/sower/sower-deploy.yaml +++ b/kube/services/sower/sower-deploy.yaml @@ -22,6 +22,7 @@ spec: public: "yes" netnolimit: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: diff --git a/kube/services/spark/spark-deploy.yaml b/kube/services/spark/spark-deploy.yaml index b280cecf0..2a17b5fba 100644 --- a/kube/services/spark/spark-deploy.yaml +++ b/kube/services/spark/spark-deploy.yaml @@ -81,8 +81,8 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 3 - memory: 4Gi + cpu: 2 + memory: 2Gi command: ["/bin/bash" ] args: - "-c" diff --git a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml index 554c60cb5..990f583cb 100644 --- a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml +++ b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml @@ -21,6 +21,7 @@ spec: netnolimit: "yes" public: "yes" GEN3_DATE_LABEL + GEN3_HOSTNAME_LABEL spec: serviceAccountName: ssjdispatcher-service-account securityContext: diff --git a/kube/services/workflow-age-monitor/application.yaml b/kube/services/workflow-age-monitor/application.yaml new file mode 100644 index 000000000..99798bb2b --- /dev/null +++ b/kube/services/workflow-age-monitor/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: argo-workflow-age-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/workflow-age-monitor/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/workflow-age-monitor/argo-workflow-age.yaml b/kube/services/workflow-age-monitor/argo-workflow-age.yaml new file mode 100644 index 000000000..52910ad4a --- /dev/null +++ b/kube/services/workflow-age-monitor/argo-workflow-age.yaml @@ -0,0 +1,61 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: argo-workflow-age + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: argo-workflow-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is 3 * 3600, or 3 hours + - name: THRESHOLD_TIME + value: "10800" + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_alarm_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + # Get all workflows with specific label and check their age + kubectl get workflows --all-namespaces -o json | jq -c '.items[] | {name: .metadata.name, startedTimestamp: .status.startedAt}' | while read workflow_info; do + WORKFLOW_NAME=$(echo $workflow_info | jq -r '.name') + STARTED_TIMESTAMP=$(echo $workflow_info | jq -r '.startedTimestamp') + + echo "Checking workflow $WORKFLOW_NAME" + echo "$STARTED_TIMESTAMP" + + if [ "$STARTED_TIMESTAMP" != "null" ]; then + echo "Workflow $WORKFLOW_NAME started at $STARTED_TIMESTAMP" + # Convert creation timestamp to Unix Epoch time + CREATION_EPOCH=$(date -d "$STARTED_TIMESTAMP" +%s) + + # Get current Unix Epoch time + CURRENT_EPOCH=$(date +%s) + + # Calculate workflow age in seconds + WORKFLOW_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH)) + + # Check if workflow age is greater than threshold + if [ "$WORKFLOW_AGE" -gt "$THRESHOLD_TIME" ]; then + echo "Workflow $WORKFLOW_NAME has been running for over $THRESHOLD_TIME seconds, sending an alert" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Workflow \`${WORKFLOW_NAME}\` has been running longer than $THRESHOLD_TIME seconds\"}" $SLACK_WEBHOOK_URL + fi + fi + done + restartPolicy: OnFailure diff --git a/kube/services/workflow-age-monitor/auth.yaml b/kube/services/workflow-age-monitor/auth.yaml new file mode 100644 index 000000000..fb7970a3e --- /dev/null +++ b/kube/services/workflow-age-monitor/auth.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflow-monitor + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflow-monitor-binding +subjects: + - kind: ServiceAccount + name: argo-workflow-monitor + namespace: default +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index e54a9cfc4..c6c4ffe74 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -29,6 +29,7 @@ spec: GEN3_DATE_LABEL GEN3_WTS_VERSION GEN3_ENV_LABEL + GEN3_HOSTNAME_LABEL spec: affinity: podAntiAffinity: @@ -162,10 +163,11 @@ spec: args: - "-c" - | - if hash alembic 2>/dev/null; then + if hash alembic 2>/dev/null || poetry run alembic --version >/dev/null 2>&1; then echo "Running DB migration" cd /wts - alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || alembic upgrade head else # WTS < 0.3.0 does not have the DB migration setup echo "Alembic not installed - not running DB migration" diff --git a/package-lock.json b/package-lock.json index 69c298911..bd0b13589 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "async": "^3.2.2", "aws-sdk": "^2.814.0", "elasticdump": "^6.84.1", - "express": "^4.17.1", + "express": "^4.19.2", "json-schema": "^0.4.0", "minimatch": "^3.0.5", "minimist": "^1.2.6", @@ -32,7 +32,14 @@ "node": ">= 0.6" } }, - "node_modules/ajv": {}, + "node_modules/accepts/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/ansi-regex": { "version": "6.0.1", "license": "MIT", @@ -47,6 +54,14 @@ "version": "1.1.1", "license": "MIT" }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, "node_modules/assert-plus": { "version": "1.0.0", "license": "MIT", @@ -116,6 +131,14 @@ ], "license": "MIT" }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, "node_modules/big.js": { "version": "5.2.2", "license": "MIT", @@ -124,12 +147,12 @@ } }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -137,7 +160,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -175,6 +198,42 @@ "node": ">= 0.8" } }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/caseless": { "version": "0.12.0", "license": "Apache-2.0" @@ -204,15 +263,17 @@ } }, "node_modules/content-type": { - "version": "1.0.4", - "license": "MIT", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", "engines": { "node": ">= 0.6" } }, "node_modules/cookie": { - "version": "0.5.0", - "license": "MIT", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", "engines": { "node": ">= 0.6" } @@ -221,6 +282,22 @@ "version": "1.0.6", "license": "MIT" }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/debug": { "version": "2.6.9", "license": "MIT", @@ -228,6 +305,22 @@ "ms": "2.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delay": { "version": "5.0.0", "license": "MIT", @@ -260,6 +353,15 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "node_modules/ee-first": { "version": "1.1.1", "license": "MIT" @@ -343,6 +445,43 @@ "node": ">= 0.8" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-define-property/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-html": { "version": "1.0.3", "license": "MIT" @@ -362,16 +501,16 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -467,6 +606,16 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz", "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA==" }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, "node_modules/finalhandler": { "version": "1.2.0", "license": "MIT", @@ -490,6 +639,17 @@ "is-callable": "^1.1.3" } }, + "node_modules/for-each/node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/forever-agent": { "version": "0.6.1", "license": "Apache-2.0", @@ -524,10 +684,21 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "license": "MIT" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dependencies": { + "assert-plus": "^1.0.0" + } }, - "node_modules/get-intrinsic": {}, "node_modules/gopd": { "version": "1.0.1", "license": "MIT", @@ -538,6 +709,24 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/gopd/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/har-schema": { "version": "2.0.0", "license": "ISC", @@ -556,14 +745,63 @@ "node": ">=6" } }, - "node_modules/has": { + "node_modules/har-validator/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { "version": "1.0.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dependencies": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" }, "engines": { - "node": ">= 0.4.0" + "node": ">= 0.4" } }, "node_modules/http-errors": { @@ -593,6 +831,30 @@ "npm": ">=1.3.7" } }, + "node_modules/http-signature/node_modules/sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/http-status": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz", @@ -603,7 +865,8 @@ }, "node_modules/iconv-lite": { "version": "0.4.24", - "license": "MIT", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -626,7 +889,6 @@ "node": ">=10" } }, - "node_modules/ip-address": {}, "node_modules/ipaddr.js": { "version": "1.9.1", "license": "MIT", @@ -634,7 +896,6 @@ "node": ">= 0.10" } }, - "node_modules/is-callable": {}, "node_modules/is-typedarray": { "version": "1.0.0", "license": "MIT" @@ -655,11 +916,21 @@ "node": ">= 0.6.0" } }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + }, "node_modules/json-schema": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, "node_modules/json-stringify-safe": { "version": "5.0.1", "license": "ISC" @@ -742,7 +1013,8 @@ }, "node_modules/media-typer": { "version": "0.3.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", "engines": { "node": ">= 0.6" } @@ -775,7 +1047,6 @@ "node": ">=4" } }, - "node_modules/mime-db": {}, "node_modules/mime-types": { "version": "2.1.35", "license": "MIT", @@ -786,6 +1057,14 @@ "node": ">= 0.6" } }, + "node_modules/mime-types/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/minimatch": { "version": "3.1.2", "license": "ISC", @@ -808,7 +1087,6 @@ "version": "2.0.0", "license": "MIT" }, - "node_modules/negotiator": {}, "node_modules/oauth-sign": { "version": "0.9.0", "license": "Apache-2.0", @@ -816,6 +1094,14 @@ "node": "*" } }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/on-finished": { "version": "2.4.1", "license": "MIT", @@ -878,6 +1164,11 @@ "version": "2.1.0", "license": "MIT" }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, "node_modules/proxy-addr": { "version": "2.0.7", "license": "MIT", @@ -889,7 +1180,6 @@ "node": ">= 0.10" } }, - "node_modules/psl": {}, "node_modules/punycode": { "version": "2.1.1", "license": "MIT", @@ -911,63 +1201,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/qs/node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/querystring": { "version": "0.2.0", "engines": { @@ -982,8 +1215,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "license": "MIT", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -994,7 +1228,6 @@ "node": ">= 0.8" } }, - "node_modules/readable-stream": {}, "node_modules/request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -1066,6 +1299,25 @@ "readable-stream": "^2.3.0" } }, + "node_modules/s3-stream-upload/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/s3-stream-upload/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "node_modules/s3signed": { "version": "0.1.0", "license": "ISC", @@ -1107,7 +1359,8 @@ }, "node_modules/safer-buffer": { "version": "2.1.2", - "license": "MIT" + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/sax": { "version": "1.2.1", @@ -1160,10 +1413,79 @@ "node": ">= 0.8.0" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-length/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "license": "ISC" }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/socks5-client": { "version": "1.2.8", "license": "MIT", @@ -1174,6 +1496,24 @@ "node": ">= 6.4.0" } }, + "node_modules/socks5-client/node_modules/ip-address": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz", + "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==", + "dependencies": { + "jsbn": "1.1.0", + "lodash": "^4.17.15", + "sprintf-js": "1.1.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/socks5-client/node_modules/jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" + }, "node_modules/socks5-http-client": { "version": "1.0.4", "license": "MIT", @@ -1194,7 +1534,11 @@ "node": ">= 6.4.0" } }, - "node_modules/sshpk": {}, + "node_modules/sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" + }, "node_modules/statuses": { "version": "2.0.1", "license": "MIT", @@ -1202,6 +1546,19 @@ "node": ">= 0.8" } }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "node_modules/through": { "version": "2.3.8", "license": "MIT" @@ -1224,6 +1581,11 @@ "node": ">=0.8" } }, + "node_modules/tough-cookie/node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" + }, "node_modules/tunnel-agent": { "version": "0.6.0", "license": "Apache-2.0", @@ -1234,9 +1596,15 @@ "node": "*" } }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + }, "node_modules/type-is": { "version": "1.6.18", - "license": "MIT", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -1252,6 +1620,14 @@ "node": ">= 0.8" } }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, "node_modules/url": { "version": "0.10.3", "license": "MIT", @@ -1276,6 +1652,11 @@ "which-typed-array": "^1.1.2" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "node_modules/util/node_modules/available-typed-arrays": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", @@ -1287,42 +1668,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/util/node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/util/node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/util/node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/util/node_modules/has-tostringtag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", @@ -1463,15 +1808,29 @@ "requires": { "mime-types": "~2.1.34", "negotiator": "0.6.3" + }, + "dependencies": { + "negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" + } } }, - "ajv": {}, "ansi-regex": { "version": "6.0.1" }, "array-flatten": { "version": "1.1.1" }, + "asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "requires": { + "safer-buffer": "~2.1.0" + } + }, "assert-plus": { "version": "1.0.0" }, @@ -1512,16 +1871,24 @@ "base64-js": { "version": "1.5.1" }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "requires": { + "tweetnacl": "^0.14.3" + } + }, "big.js": { "version": "5.2.2" }, "body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "requires": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -1529,7 +1896,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" } @@ -1557,6 +1924,32 @@ "bytes": { "version": "3.1.2" }, + "call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "caseless": { "version": "0.12.0" }, @@ -1576,20 +1969,47 @@ } }, "content-type": { - "version": "1.0.4" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==" }, "cookie": { - "version": "0.5.0" + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==" }, "cookie-signature": { "version": "1.0.6" }, + "core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "requires": { + "assert-plus": "^1.0.0" + } + }, "debug": { "version": "2.6.9", "requires": { "ms": "2.0.0" } }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "delay": { "version": "5.0.0" }, @@ -1602,6 +2022,15 @@ "destroy": { "version": "1.2.0" }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "ee-first": { "version": "1.1.1" }, @@ -1668,6 +2097,33 @@ "encodeurl": { "version": "1.0.2" }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "requires": { + "get-intrinsic": "^1.2.4" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" + }, "escape-html": { "version": "1.0.3" }, @@ -1678,16 +2134,16 @@ "version": "1.1.1" }, "express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -1770,6 +2226,16 @@ } } }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, "finalhandler": { "version": "1.2.0", "requires": { @@ -1786,6 +2252,13 @@ "version": "0.3.3", "requires": { "is-callable": "^1.1.3" + }, + "dependencies": { + "is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==" + } } }, "forever-agent": { @@ -1806,13 +2279,36 @@ "version": "0.5.2" }, "function-bind": { - "version": "1.1.1" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "requires": { + "assert-plus": "^1.0.0" + } }, - "get-intrinsic": {}, "gopd": { "version": "1.0.1", "requires": { "get-intrinsic": "^1.1.3" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } } }, "har-schema": { @@ -1823,12 +2319,45 @@ "requires": { "ajv": "^6.12.3", "har-schema": "^2.0.0" + }, + "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + } } }, - "has": { + "has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "requires": { + "es-define-property": "^1.0.0" + } + }, + "has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==" + }, + "has-symbols": { "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" + }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "requires": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" } }, "http-errors": { @@ -1847,6 +2376,24 @@ "assert-plus": "^1.0.0", "jsprim": "^1.2.2", "sshpk": "^1.7.0" + }, + "dependencies": { + "sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + } } }, "http-status": { @@ -1856,6 +2403,8 @@ }, "iconv-lite": { "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "requires": { "safer-buffer": ">= 2.1.2 < 3" } @@ -1869,11 +2418,9 @@ "ini": { "version": "2.0.0" }, - "ip-address": {}, "ipaddr.js": { "version": "1.9.1" }, - "is-callable": {}, "is-typedarray": { "version": "1.0.0" }, @@ -1888,11 +2435,21 @@ "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==" }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + }, "json-schema": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, "json-stringify-safe": { "version": "5.0.1" }, @@ -1950,7 +2507,9 @@ "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA==" }, "media-typer": { - "version": "0.3.0" + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" }, "merge-descriptors": { "version": "1.0.1" @@ -1964,11 +2523,17 @@ "mime": { "version": "1.6.0" }, - "mime-db": {}, "mime-types": { "version": "2.1.35", "requires": { "mime-db": "1.52.0" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + } } }, "minimatch": { @@ -1985,10 +2550,14 @@ "ms": { "version": "2.0.0" }, - "negotiator": {}, "oauth-sign": { "version": "0.9.0" }, + "object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==" + }, "on-finished": { "version": "2.4.1", "requires": { @@ -2029,6 +2598,11 @@ "performance-now": { "version": "2.1.0" }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, "proxy-addr": { "version": "2.0.7", "requires": { @@ -2036,7 +2610,6 @@ "ipaddr.js": "1.9.1" } }, - "psl": {}, "punycode": { "version": "2.1.1" }, @@ -2046,47 +2619,6 @@ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", "requires": { "side-channel": "^1.0.4" - }, - "dependencies": { - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" - }, - "object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==" - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - } } }, "querystring": { @@ -2096,7 +2628,9 @@ "version": "1.2.1" }, "raw-body": { - "version": "2.5.1", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "requires": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -2104,7 +2638,6 @@ "unpipe": "1.0.0" } }, - "readable-stream": {}, "request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -2158,6 +2691,27 @@ "requires": { "buffer-queue": "~1.0.0", "readable-stream": "^2.3.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } } }, "s3signed": { @@ -2177,7 +2731,9 @@ "version": "5.2.1" }, "safer-buffer": { - "version": "2.1.2" + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sax": { "version": "1.2.1" @@ -2219,13 +2775,82 @@ "send": "0.18.0" } }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "setprototypeof": { "version": "1.2.0" }, + "side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "socks5-client": { "version": "1.2.8", "requires": { "ip-address": "~6.1.0" + }, + "dependencies": { + "ip-address": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz", + "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==", + "requires": { + "jsbn": "1.1.0", + "lodash": "^4.17.15", + "sprintf-js": "1.1.2" + } + }, + "jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" + } } }, "socks5-http-client": { @@ -2240,10 +2865,29 @@ "socks5-client": "~1.2.3" } }, - "sshpk": {}, + "sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" + }, "statuses": { "version": "2.0.1" }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } + } + }, "through": { "version": "2.3.8" }, @@ -2255,6 +2899,13 @@ "requires": { "psl": "^1.1.28", "punycode": "^2.1.1" + }, + "dependencies": { + "psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" + } } }, "tunnel-agent": { @@ -2263,8 +2914,15 @@ "safe-buffer": "^5.0.1" } }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + }, "type-is": { "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -2273,6 +2931,14 @@ "unpipe": { "version": "1.0.0" }, + "uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "requires": { + "punycode": "^2.1.0" + } + }, "url": { "version": "0.10.3", "requires": { @@ -2302,30 +2968,6 @@ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==" }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" - }, "has-tostringtag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", @@ -2378,6 +3020,11 @@ } } }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "utils-merge": { "version": "1.0.1" }, diff --git a/package.json b/package.json index fd2761a59..716d9a161 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,7 @@ "async": "^3.2.2", "aws-sdk": "^2.814.0", "elasticdump": "^6.84.1", - "express": "^4.17.1", + "express": "^4.19.2", "json-schema": "^0.4.0", "minimatch": "^3.0.5", "minimist": "^1.2.6", diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index f8b237eeb..693462b1c 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -254,7 +254,7 @@ resource "aws_route_table_association" "private_kube" { } resource "aws_route_table_association" "secondary_subnet_kube" { - count = "${var.secondary_cidr_block != "" ? 1 : 0}" + count = "${var.secondary_cidr_block != "" ? 4 : 0}" subnet_id = "${aws_subnet.eks_secondary_subnet.*.id[count.index]}" route_table_id = "${aws_route_table.eks_private.id}" depends_on = ["aws_subnet.eks_secondary_subnet"] diff --git a/tf_files/aws/nextflow_ami_pipeline/data.tf b/tf_files/aws/nextflow_ami_pipeline/data.tf new file mode 100644 index 000000000..a8b950b2a --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/data.tf @@ -0,0 +1,24 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:Name" + values = [var.vpc_name] + } +} + +data "aws_security_group" "default" { + vpc_id = data.aws_vpc.selected.id + + filter { + name = "group-name" + values = ["default"] + } +} + +data "aws_subnet" "private" { + vpc_id = data.aws_vpc.selected.id + + filter { + name = "tag:Name" + values = [var.subnet_name] + } +} diff --git a/tf_files/aws/nextflow_ami_pipeline/iam.tf b/tf_files/aws/nextflow_ami_pipeline/iam.tf new file mode 100644 index 000000000..0b3594dd4 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/iam.tf @@ -0,0 +1,36 @@ +## IAM Instance Profile for image builder + +resource "aws_iam_role" "image_builder" { + name = "EC2InstanceProfileForImageBuilder-nextflow" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +data "aws_iam_policy_document" "assume_role" { + statement { + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy_attachment" "amazon_ssm" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +resource "aws_iam_role_policy_attachment" "image_builder" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilder" +} + +resource "aws_iam_role_policy_attachment" "image_builder_ecr" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds" +} + +resource "aws_iam_instance_profile" "image_builder" { + name = "image-builder-profile" + role = aws_iam_role.image_builder.name +} diff --git a/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf new file mode 100644 index 000000000..0c3415003 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf @@ -0,0 +1,161 @@ +## Image builder component to install AWS cli using conda + +resource "aws_imagebuilder_component" "install_software" { + name = "InstallSoftware" + platform = "Linux" + version = "1.0.1" + + data = yamlencode({ + name = "InstallSoftware" + description = "Installs bzip2, wget, Miniconda3 and awscli" + schemaVersion = 1.0 + + phases = [{ + name = "build" + steps = [{ + name = "InstallPackages" + action = "ExecuteBash" + inputs = { + commands = [ + "sudo yum install -y bzip2 wget" + ] + } + }, + { + name = "InstallMiniconda" + action = "ExecuteBash" + inputs = { + commands = [ + "sudo su ec2-user", + "mkdir -p /home/ec2-user", + "export HOME=/home/ec2-user/", + "cd $HOME", + "# Download and install miniconda in ec2-user's home dir", + "wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda-install.sh", + "bash miniconda-install.sh -b -f -p /home/ec2-user/miniconda", + "rm miniconda-install.sh" + ] + } + }, + { + name = "InstallAWSCLI" + action = "ExecuteBash" + inputs = { + commands = [ + "export HOME=/home/ec2-user/", + "/home/ec2-user/miniconda/bin/conda install -c conda-forge -y awscli" + ] + } + }] + }, + { + name = "validate" + steps = [{ + name = "CheckInstalls" + action = "ExecuteBash" + inputs = { + commands = [ + "which bzip2", + "which wget", + "which conda", + "/home/ec2-user/miniconda/bin/conda list | grep awscli" + ] + } + }] + }, + { + name = "test" + steps = [{ + name = "TestAWSCLI" + action = "ExecuteBash" + inputs = { + commands = [ + "/home/ec2-user/miniconda/bin/aws --version" + ] + } + }] + }] + }) +} + + +## Image builder infrastructure config +resource "aws_imagebuilder_infrastructure_configuration" "image_builder" { + name = "nextflow-infra-config" + instance_profile_name = aws_iam_instance_profile.image_builder.name + security_group_ids = [data.aws_security_group.default.id] + subnet_id = data.aws_subnet.private.id + terminate_instance_on_failure = true +} + + +## Make sure the ami produced is public + +resource "aws_imagebuilder_distribution_configuration" "public_ami" { + name = "public-ami-distribution" + + distribution { + ami_distribution_configuration { + name = "gen3-nextflow-{{ imagebuilder:buildDate }}" + + ami_tags = { + Role = "Public Image" + } + + launch_permission { + user_groups = ["all"] + } + } + + region = "us-east-1" + } +} + + +## Image recipe +resource "aws_imagebuilder_image_recipe" "recipe" { + name = "nextflow-fips-recipe" + + parent_image = var.base_image + + version = "1.0.0" + + block_device_mapping { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + volume_size = 30 + volume_type = "gp2" + encrypted = false + } + } + + user_data_base64 = try(base64encode(var.user_data), null) + + component { + component_arn = "arn:aws:imagebuilder:us-east-1:aws:component/docker-ce-linux/1.0.0/1" + } + + component { + component_arn = aws_imagebuilder_component.install_software.arn + } + + + +} + + +# Image builder pipeline + +resource "aws_imagebuilder_image_pipeline" "nextflow" { + image_recipe_arn = aws_imagebuilder_image_recipe.recipe.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.image_builder.arn + name = "nextflow-fips" + + distribution_configuration_arn = aws_imagebuilder_distribution_configuration.public_ami.arn + + image_scanning_configuration { + image_scanning_enabled = true + } + +} diff --git a/tf_files/aws/nextflow_ami_pipeline/manifest.json b/tf_files/aws/nextflow_ami_pipeline/manifest.json new file mode 100644 index 000000000..62394dc4a --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/manifest.json @@ -0,0 +1,6 @@ +{ + "terraform": { + "module_version" : "0.12" + } + } + \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/root.tf b/tf_files/aws/nextflow_ami_pipeline/root.tf new file mode 100644 index 000000000..8ccad5e14 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/root.tf @@ -0,0 +1,7 @@ +# Inject credentials via the AWS_PROFILE environment variable and shared credentials file +# and/or EC2 metadata service +terraform { + backend "s3" { + encrypt = "true" + } +} \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/sample.tfvars b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars new file mode 100644 index 000000000..e6423d359 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars @@ -0,0 +1 @@ +vpc_name = "devplanetv2" \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/variables.tf b/tf_files/aws/nextflow_ami_pipeline/variables.tf new file mode 100644 index 000000000..58af6430f --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/variables.tf @@ -0,0 +1,28 @@ +variable "vpc_name" { + type = string +} + + +variable "subnet_name" { + type = string + default = "eks_private_0" +} + +variable "base_image" { + type = string + default = "arn:aws:imagebuilder:us-east-1:aws:image/amazon-linux-2-ecs-optimized-kernel-5-x86/x.x.x" +} + +variable "user_data" { + type = string + default = <> /opt/fips-install.log +sudo dracut -f +# configure grub +sudo /sbin/grubby --update-kernel=ALL --args="fips=1" +EOT +} \ No newline at end of file