From 29f286f0400317358767dd512d1e478120de1faf Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Tue, 5 Mar 2024 16:45:28 +1100 Subject: [PATCH 01/15] feat: introduce retention policy support --- Makefile | 4 +- docker-compose.yaml | 2 + ...populate-api-data-ci-local-control-k8s.gql | 90 ++++ node-packages/commons/src/tasks.ts | 20 + services/actions-handler/go.mod | 3 + services/actions-handler/go.sum | 14 + .../handler/action_retention.go | 157 +++++++ services/actions-handler/handler/handler.go | 18 +- services/actions-handler/main.go | 39 ++ .../20240502000000_retention_policy.js | 31 ++ services/api/src/models/retentionpolicy.ts | 102 +++++ services/api/src/resolvers.js | 48 ++- services/api/src/resources/backup/sql.ts | 2 +- .../api/src/resources/deployment/resolvers.ts | 37 +- services/api/src/resources/deployment/sql.ts | 54 ++- .../api/src/resources/environment/helpers.ts | 20 +- .../api/src/resources/organization/helpers.ts | 5 + .../api/src/resources/organization/sql.ts | 5 + services/api/src/resources/project/sql.ts | 6 + .../src/resources/retentionpolicy/PAYLOADS.md | 73 ++++ .../src/resources/retentionpolicy/README.md | 134 ++++++ .../src/resources/retentionpolicy/harbor.ts | 72 ++++ .../src/resources/retentionpolicy/helpers.ts | 387 +++++++++++++++++ .../src/resources/retentionpolicy/history.ts | 207 +++++++++ .../resources/retentionpolicy/resolvers.ts | 394 ++++++++++++++++++ .../api/src/resources/retentionpolicy/sql.ts | 123 ++++++ .../src/resources/retentionpolicy/types.ts | 140 +++++++ services/api/src/resources/task/helpers.ts | 4 + services/api/src/resources/task/resolvers.ts | 15 + services/api/src/resources/task/sql.ts | 52 +++ services/api/src/typeDefs.js | 196 +++++++++ services/logs2notifications/main.go | 10 +- 32 files changed, 2448 insertions(+), 16 deletions(-) create mode 100644 services/actions-handler/handler/action_retention.go create mode 100644 services/api/database/migrations/20240502000000_retention_policy.js create mode 100644 services/api/src/models/retentionpolicy.ts create mode 100644 services/api/src/resources/retentionpolicy/PAYLOADS.md create mode 100644 services/api/src/resources/retentionpolicy/README.md create mode 100644 services/api/src/resources/retentionpolicy/harbor.ts create mode 100644 services/api/src/resources/retentionpolicy/helpers.ts create mode 100644 services/api/src/resources/retentionpolicy/history.ts create mode 100644 services/api/src/resources/retentionpolicy/resolvers.ts create mode 100644 services/api/src/resources/retentionpolicy/sql.ts create mode 100644 services/api/src/resources/retentionpolicy/types.ts diff --git a/Makefile b/Makefile index 5897e7ff6f..702f361fe4 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ BUILD_DEPLOY_IMAGE_TAG ?= edge # OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG and OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY # set this to a particular build image if required, defaults to nothing to consume what the chart provides -OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG= +OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=retention-policy OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY= # To build k3d with Calico instead of Flannel, set this to true. Note that the Calico install in lagoon-charts is always @@ -417,7 +417,7 @@ STERN_VERSION = v2.6.1 CHART_TESTING_VERSION = v3.11.0 K3D_IMAGE = docker.io/rancher/k3s:v1.31.1-k3s1 TESTS = [nginx,api,features-kubernetes,bulk-deployment,features-kubernetes-2,features-variables,active-standby-kubernetes,tasks,drush,python,gitlab,github,bitbucket,services,workflows] -CHARTS_TREEISH = main +CHARTS_TREEISH = retention-policies CHARTS_REPOSITORY = https://github.com/uselagoon/lagoon-charts.git #CHARTS_REPOSITORY = ../lagoon-charts TASK_IMAGES = task-activestandby diff --git a/docker-compose.yaml b/docker-compose.yaml index 8b0ac7b293..2515a26257 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -126,6 +126,8 @@ services: actions-handler: image: ${IMAGE_REPO:-lagoon}/actions-handler:${IMAGE_REPO_ACTIONS_HANDLER_TAG:-${IMAGE_REPO_TAG:-latest}} restart: on-failure + environment: + - S3_FILES_HOST=http://172.17.0.1:9000 depends_on: - broker ssh: diff --git a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql index f897a2927d..51d5df5460 100644 --- a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql +++ b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql @@ -128,4 +128,94 @@ mutation PopulateApi { id } + RetPol1: createRetentionPolicy(input:{ + name: "harbor-policy" + type: HARBOR + harbor: { + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 * * * *" + } + }) { + id + name + configuration { + ... on HarborRetentionPolicy { + enabled + rules { + name + pattern + latestPulled + } + schedule + } + } + type + created + updated + } + + RetPol2: createRetentionPolicy(input:{ + name: "history-policy" + type: HISTORY + history: { + enabled: true + deploymentHistory: 5 + deploymentType: COUNT + taskHistory: 10 + taskType: COUNT + } + }) { + id + name + configuration { + ... on HistoryRetentionPolicy { + enabled + deploymentHistory + deploymentType + taskHistory + taskType + } + } + type + created + updated + } + + RetPolLink1: addRetentionPolicyLink(input:{ + id: 1 + scope: GLOBAL + scopeName: "global", + }) { + id + name + type + source + created + updated + } + + RetPolLink2: addRetentionPolicyLink(input:{ + id: 2 + scope: GLOBAL + scopeName: "global", + }) { + id + name + type + source + created + updated + } } diff --git a/node-packages/commons/src/tasks.ts b/node-packages/commons/src/tasks.ts index 01f5ad44a0..e0edfc3340 100644 --- a/node-packages/commons/src/tasks.ts +++ b/node-packages/commons/src/tasks.ts @@ -1551,3 +1551,23 @@ export const consumeTaskMonitor = async function( } }); } + + // leverages the `misc` queue to handle retention policy only messages to controller + // this is essentially a clone of createMiscTask, but specifically for retention policies +export const createRetentionPolicyTask = async function(policyData: any) { + var policyPayload: any = { + key: `deploytarget:${policyData.key}`, + misc: {} + } + switch (`deploytarget:${policyData.key}`) { + case 'deploytarget:harborpolicy:update': + // remote-controller has a basic payload resource under `misc` called `miscResource` which can store bytes + // so this b64 encodes the payload event and inserts it into the miscResource so that the remote-controller will understand it + const payloadBytes = new Buffer(JSON.stringify(policyData.data.event).replace(/\\n/g, "\n")).toString('base64') + policyPayload.misc.miscResource = payloadBytes + break; + default: + break; + } + return sendToLagoonTasks(policyData.data.target+':misc', policyPayload); +} diff --git a/services/actions-handler/go.mod b/services/actions-handler/go.mod index 211de02aa1..c2a2e421c7 100644 --- a/services/actions-handler/go.mod +++ b/services/actions-handler/go.mod @@ -3,6 +3,7 @@ module github.com/uselagoon/lagoon/services/actions-handler go 1.22 require ( + github.com/aws/aws-sdk-go v1.15.11 github.com/cheshir/go-mq/v2 v2.0.1 github.com/uselagoon/machinery v0.0.29 gopkg.in/matryer/try.v1 v1.0.0-20150601225556-312d2599e12e @@ -11,11 +12,13 @@ require ( require ( github.com/NeowayLabs/wabbit v0.0.0-20210927194032-73ad61d1620e // indirect github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 // indirect + github.com/go-ini/ini v1.25.4 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/guregu/null v4.0.0+incompatible // indirect github.com/hashicorp/go-version v1.7.0 // indirect + github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect github.com/machinebox/graphql v0.2.3-0.20181106130121-3a9253180225 // indirect github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 // indirect github.com/pborman/uuid v1.2.1 // indirect diff --git a/services/actions-handler/go.sum b/services/actions-handler/go.sum index ca98698ba3..2d571ad8d7 100644 --- a/services/actions-handler/go.sum +++ b/services/actions-handler/go.sum @@ -109,6 +109,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11 h1:m45+Ru/wA+73cOZXiEGLDH2d9uLN3iHqMc0/z4noDXE= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -300,6 +301,7 @@ github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -360,6 +362,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -491,6 +494,7 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -548,6 +552,7 @@ github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -561,6 +566,7 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -726,6 +732,7 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -789,8 +796,10 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -825,6 +834,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1034,6 +1044,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1174,6 +1186,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1414,6 +1427,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/services/actions-handler/handler/action_retention.go b/services/actions-handler/handler/action_retention.go new file mode 100644 index 0000000000..87fa8c4c8c --- /dev/null +++ b/services/actions-handler/handler/action_retention.go @@ -0,0 +1,157 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + mq "github.com/cheshir/go-mq/v2" + "github.com/uselagoon/machinery/utils/namespace" +) + +type S3RetentionCleanUp struct { + EnvironmentName string `json:"environmentName"` + ProjectName string `json:"projectName"` + Task struct { + ID string `json:"id"` + } `json:"task"` + EnvironmentID int `json:"environmentId"` + ProjectID int `json:"projectId"` + BuildName string `json:"buildName"` + RemoteID string `json:"remoteId"` +} + +func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.MessageQueue, action *Action, messageID string) error { + prefix := fmt.Sprintf("(messageid:%s) %s: ", messageID, action.EventType) + data, _ := json.Marshal(action.Data) + retention := S3RetentionCleanUp{} + json.Unmarshal(data, &retention) + switch action.EventType { + case "taskCleanup": + filePath := fmt.Sprintf("tasklogs/%s/%s-%s.txt", + retention.ProjectName, + retention.Task.ID, + retention.RemoteID, + ) + if retention.EnvironmentName != "" { + filePath = fmt.Sprintf("tasklogs/%s/%s/%s-%s.txt", + retention.ProjectName, + namespace.ShortenEnvironment(retention.ProjectName, namespace.MakeSafe(retention.EnvironmentName)), + retention.Task.ID, + retention.RemoteID, + ) + } + // clean up any files/attachments the task may have uploaded into it + err := m.deleteFileInDirS3( + prefix, + fmt.Sprintf("tasks/%s", + retention.Task.ID, + ), + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + // handle cleaning up task logs + err = m.deleteFileS3( + prefix, + filePath, + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + case "buildCleanup": + // handle cleaning up build logs + err := m.deleteFileS3( + prefix, + fmt.Sprintf("buildlogs/%s/%s/%s-%s.txt", + retention.ProjectName, + retention.EnvironmentName, + retention.BuildName, + retention.RemoteID, + ), + retention, + ) + if err != nil { + log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + return err + } + } + return nil +} + +// deleteFileS3 +func (m *Messenger) deleteFileS3(prefix, fileName string, retention S3RetentionCleanUp) error { + var forcePath bool + forcePath = true + session, err := session.NewSession(&aws.Config{ + Region: aws.String(m.S3Configuration.S3FilesRegion), + Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), + Credentials: credentials.NewStaticCredentials(m.S3Configuration.S3FilesAccessKeyID, m.S3Configuration.S3FilesSecretAccessKey, ""), + S3ForcePathStyle: &forcePath, + }) + if err != nil { + return err + } + + object := s3.DeleteObjectInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Key: aws.String(fileName), + } + _, err = s3.New(session).DeleteObject(&object) + if err != nil { + return err + } + if m.EnableDebug { + log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, fileName, retention.EnvironmentName, retention.EnvironmentID)) + } + return nil +} + +// deleteDirFileS3 +func (m *Messenger) deleteFileInDirS3(prefix, fileName string, retention S3RetentionCleanUp) error { + var forcePath bool + forcePath = true + session, err := session.NewSession(&aws.Config{ + Region: aws.String(m.S3Configuration.S3FilesRegion), + Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), + Credentials: credentials.NewStaticCredentials(m.S3Configuration.S3FilesAccessKeyID, m.S3Configuration.S3FilesSecretAccessKey, ""), + S3ForcePathStyle: &forcePath, + }) + if err != nil { + return err + } + listobject := s3.ListObjectsInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Prefix: aws.String(fileName), + } + s := s3.New(session) + err = s.ListObjectsPages(&listobject, func(page *s3.ListObjectsOutput, lastPage bool) bool { + for _, c := range page.Contents { + _, err := s.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Key: c.Key, + }) + if err != nil { + log.Println(fmt.Sprintf("%sError deleting file %s for environment: %v, id: %v: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID, err)) + continue // try other files + } + if m.EnableDebug { + log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID)) + } + } + return *page.IsTruncated + }) + if err != nil { + return err + } + return nil +} diff --git a/services/actions-handler/handler/handler.go b/services/actions-handler/handler/handler.go index 07abb98d76..a5bbf66b86 100644 --- a/services/actions-handler/handler/handler.go +++ b/services/actions-handler/handler/handler.go @@ -24,6 +24,16 @@ type LagoonAPI struct { Version string `json:"version"` } +// S3Configuration . +type S3Configuration struct { + S3FilesAccessKeyID string + S3FilesSecretAccessKey string + S3FilesBucket string + S3FilesRegion string + S3FilesOrigin string + S3IsGCS bool +} + // Action is the structure of an action that is received via the message queue. type Action struct { Type string `json:"type"` // defines the action type @@ -41,6 +51,7 @@ type messenger interface { type Messenger struct { Config mq.Config LagoonAPI LagoonAPI + S3Configuration S3Configuration ConnectionAttempts int ConnectionRetryInterval int ActionsQueueName string @@ -49,10 +60,11 @@ type Messenger struct { } // New returns a messaging with config -func New(config mq.Config, lagoonAPI LagoonAPI, startupAttempts int, startupInterval int, actionsQueueName, controllerQueueName string, enableDebug bool) *Messenger { +func New(config mq.Config, lagoonAPI LagoonAPI, s3Configuration S3Configuration, startupAttempts int, startupInterval int, actionsQueueName, controllerQueueName string, enableDebug bool) *Messenger { return &Messenger{ Config: config, LagoonAPI: lagoonAPI, + S3Configuration: s3Configuration, ConnectionAttempts: startupAttempts, ConnectionRetryInterval: startupInterval, ActionsQueueName: actionsQueueName, @@ -113,6 +125,10 @@ func (m *Messenger) Consumer() { // and perform the steps to run the mutation against the lagoon api case "deployEnvironmentLatest": err = m.handleDeployEnvironment(ctx, messageQueue, action, messageID) + // check if this a `retentionCleanup` type of action + // and perform the steps to clean up anything related to the retention clean up event type + case "retentionCleanup": + err = m.handleRetention(ctx, messageQueue, action, messageID) } // if there aren't any errors, then ack the message, an error indicates that there may have been an issue with the api handling the request // skipping this means the message will remain in the queue diff --git a/services/actions-handler/main.go b/services/actions-handler/main.go index 60f7529f81..f3437cd252 100644 --- a/services/actions-handler/main.go +++ b/services/actions-handler/main.go @@ -32,6 +32,13 @@ var ( controllerExchange string jwtSubject string jwtIssuer string + + s3FilesAccessKeyID string + s3FilesSecretAccessKey string + s3FilesBucket string + s3FilesRegion string + s3FilesOrigin string + s3isGCS bool ) func main() { @@ -71,6 +78,21 @@ func main() { "The name of the queue in rabbitmq to use.") flag.StringVar(&controllerExchange, "controller-exchange", "lagoon-tasks", "The name of the exchange in rabbitmq to use.") + + // S3 configuration + flag.StringVar(&s3FilesAccessKeyID, "s3-files-access-key", "minio", + "The S3 files access key.") + flag.StringVar(&s3FilesSecretAccessKey, "s3-files-secret-access-key", "minio123", + "The S3 files secret access key.") + flag.StringVar(&s3FilesBucket, "s3-files-bucket", "lagoon-files", + "The S3 files bucket.") + flag.StringVar(&s3FilesRegion, "s3-files-region", "auto", + "The S3 files region.") + flag.StringVar(&s3FilesOrigin, "s3-files-origin", "http://minio.127.0.0.1.nip.io:9000", + "The S3 files origin.") + flag.BoolVar(&s3isGCS, "s3-google-cloud", false, + "If the storage backend is google cloud.") + flag.Parse() // get overrides from environment variables @@ -89,6 +111,13 @@ func main() { controllerQueueName = variables.GetEnv("CONTROLLER_QUEUE_NAME", controllerQueueName) controllerExchange = variables.GetEnv("CONTROLLER_EXCHANGE", controllerExchange) + s3FilesAccessKeyID = variables.GetEnv("S3_FILES_ACCESS_KEY_ID", s3FilesAccessKeyID) + s3FilesSecretAccessKey = variables.GetEnv("S3_FILES_SECRET_ACCESS_KEY", s3FilesSecretAccessKey) + s3FilesBucket = variables.GetEnv("S3_FILES_BUCKET", s3FilesBucket) + s3FilesRegion = variables.GetEnv("S3_FILES_REGION", s3FilesRegion) + s3FilesOrigin = variables.GetEnv("S3_FILES_HOST", s3FilesOrigin) + s3isGCS = variables.GetEnvBool("S3_FILES_GCS", s3isGCS) + enableDebug := true graphQLConfig := handler.LagoonAPI{ @@ -100,6 +129,15 @@ func main() { Version: lagoonAPIVersion, } + s3Config := handler.S3Configuration{ + S3FilesAccessKeyID: s3FilesAccessKeyID, + S3FilesSecretAccessKey: s3FilesSecretAccessKey, + S3FilesBucket: s3FilesBucket, + S3FilesRegion: s3FilesRegion, + S3FilesOrigin: s3FilesOrigin, + S3IsGCS: s3isGCS, + } + log.Println("actions-handler running") config := mq.Config{ @@ -208,6 +246,7 @@ func main() { messenger := handler.New(config, graphQLConfig, + s3Config, startupConnectionAttempts, startupConnectionInterval, actionsQueueName, diff --git a/services/api/database/migrations/20240502000000_retention_policy.js b/services/api/database/migrations/20240502000000_retention_policy.js new file mode 100644 index 0000000000..0ff5ba450c --- /dev/null +++ b/services/api/database/migrations/20240502000000_retention_policy.js @@ -0,0 +1,31 @@ +/** + * @param { import("knex").Knex } knex + * @returns { Promise } + */ +exports.up = async function(knex) { + return knex.schema + .createTable('retention_policy', function (table) { + table.increments('id').notNullable().primary(); + table.string('name', 300).unique({indexName: 'name'}); + table.enu('type',['harbor','history']).notNullable(); + table.text('configuration'); + table.timestamp('updated').notNullable().defaultTo(knex.fn.now()); + table.timestamp('created').notNullable().defaultTo(knex.fn.now()); + }) + .createTable('retention_policy_reference', function (table) { + table.integer('retention_policy'); + table.enu('scope',['global','organization','project']).notNullable(); + table.integer('id'); + table.unique(['retention_policy', 'scope', 'id'], {indexName: 'organization_policy'}); + }) +}; + +/** + * @param { import("knex").Knex } knex + * @returns { Promise } + */ +exports.down = async function(knex) { + return knex.schema + .dropTable('retention_policy') + .dropTable('retention_policy_reference') +}; \ No newline at end of file diff --git a/services/api/src/models/retentionpolicy.ts b/services/api/src/models/retentionpolicy.ts new file mode 100644 index 0000000000..3e6c74eecf --- /dev/null +++ b/services/api/src/models/retentionpolicy.ts @@ -0,0 +1,102 @@ +import { logger } from "../loggers/logger" + +export interface HarborRetentionPolicy { + enabled: boolean + branchRetention: number + pullrequestRetention: number + schedule: string +} + +export interface HistoryRetentionPolicy { + enabled: boolean + deploymentHistory: number + taskHistory: number +} + +export const RetentionPolicy = () => { + const convertHarborRetentionPolicyToJSON = async ( + harbor: HarborRetentionPolicy + ): Promise => { + const c = JSON.stringify(harbor) + return c + }; + + const convertHistoryRetentionPolicyToJSON = async ( + history: HistoryRetentionPolicy + ): Promise => { + const c = JSON.stringify(history) + return c + }; + + const convertJSONToHarborRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.branchRetention != "number") { + throw new Error("branchRetention must be a number"); + } + if (typeof c.pullrequestRetention != "number") { + throw new Error("pullrequestRetention must be a number"); + } + if (typeof c.schedule != "string") { + throw new Error("schedule must be a string"); + } + return c + }; + + const convertJSONToHistoryRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.deploymentHistory != "number") { + throw new Error("deploymentHistory must be a number"); + } + if (typeof c.taskHistory != "number") { + throw new Error("taskHistory must be a number"); + } + return c + }; + + // run the configuration patches through the validation process + const returnValidatedConfiguration = async (type: string, patch: any): Promise => { + const c = JSON.stringify(patch[type]) + switch (type) { + case "harbor": + try { + await convertJSONToHarborRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + case "history": + try { + await convertJSONToHistoryRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + default: + throw new Error( + `Provided configuration is not valid for type ${type}` + ); + } + } + + return { + convertHarborRetentionPolicyToJSON, + convertHistoryRetentionPolicyToJSON, + convertJSONToHarborRetentionPolicy, + convertJSONToHistoryRetentionPolicy, + returnValidatedConfiguration + }; +}; \ No newline at end of file diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 0ea710d047..3b2ff29f6e 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -278,6 +278,17 @@ const { getRestoreLocation, } = require('./resources/backup/resolvers'); +const { + createRetentionPolicy, + updateRetentionPolicy, + deleteRetentionPolicy, + getRetentionPoliciesByProjectId, + getRetentionPoliciesByOrganizationId, + listRetentionPolicies, + addRetentionPolicyLink, + removeRetentionPolicyLink, +} = require('./resources/retentionpolicy/resolvers'); + const { getEnvVarsByProjectId, getEnvVarsByEnvironmentId, @@ -384,6 +395,20 @@ const resolvers = { OWNER: 'platform-owner', ORGANIZATION_OWNER: 'platform-organization-owner', }, + RetentionPolicyType: { + HARBOR: 'harbor', + HISTORY: 'history', + }, + RetentionPolicyScope: { + GLOBAL: 'global', + ORGANIZATION: 'organization', + PROJECT: 'project', + }, + HistoryRetentionType: { + COUNT: 'count', + DAYS: 'days', + MONTHS: 'months', + }, Openshift: { projectUser: getProjectUser, token: getToken, @@ -406,6 +431,7 @@ const resolvers = { groups: getGroupsByProjectId, privateKey: getPrivateKey, publicKey: getProjectDeployKey, + retentionPolicies: getRetentionPoliciesByProjectId, }, GroupInterface: { __resolveType(group) { @@ -456,12 +482,14 @@ const resolvers = { environments: getEnvironmentsByOrganizationId, owners: getOwnersByOrganizationId, deployTargets: getDeployTargetsByOrganizationId, - notifications: getNotificationsByOrganizationId + notifications: getNotificationsByOrganizationId, + retentionPolicies: getRetentionPoliciesByOrganizationId }, OrgProject: { groups: getGroupsByOrganizationsProject, groupCount: getGroupCountByOrganizationProject, notifications: getNotificationsForOrganizationProjectId, + retentionPolicies: getRetentionPoliciesByProjectId, }, OrgEnvironment: { project: getProjectById, @@ -509,6 +537,18 @@ const resolvers = { } } }, + RetentionPolicyConfiguration: { + __resolveType(obj) { + switch (obj.type) { + case 'harbor': + return 'HarborRetentionPolicy'; + case 'history': + return 'HistoryRetentionPolicy'; + default: + return null; + } + } + }, AdvancedTaskDefinition: { __resolveType (obj) { switch(obj.type) { @@ -591,6 +631,7 @@ const resolvers = { getEnvVariablesByProjectEnvironmentName, checkBulkImportProjectsAndGroupsToOrganization, allPlatformUsers: getAllPlatformUsers, + listRetentionPolicies }, Mutation: { addProblem, @@ -717,6 +758,11 @@ const resolvers = { deleteEnvironmentService, addPlatformRoleToUser, removePlatformRoleFromUser, + createRetentionPolicy, + updateRetentionPolicy, + deleteRetentionPolicy, + addRetentionPolicyLink, + removeRetentionPolicyLink }, Subscription: { backupChanged: backupSubscriber, diff --git a/services/api/src/resources/backup/sql.ts b/services/api/src/resources/backup/sql.ts index 1edaf3ae50..223c254a24 100644 --- a/services/api/src/resources/backup/sql.ts +++ b/services/api/src/resources/backup/sql.ts @@ -38,7 +38,7 @@ export const Sql = { deleteBackup: (backupId: string) => knex('environment_backup') .where('backup_id', backupId) - .delete() + .delete() // actually delete the backup, there is no real reason to retain this information, the snapshot is gone .toString(), truncateBackup: () => knex('environment_backup') diff --git a/services/api/src/resources/deployment/resolvers.ts b/services/api/src/resources/deployment/resolvers.ts index 28908dbc9c..8e4ad12724 100644 --- a/services/api/src/resources/deployment/resolvers.ts +++ b/services/api/src/resources/deployment/resolvers.ts @@ -18,6 +18,8 @@ import { knex, query, isPatchEmpty } from '../../util/db'; import { Sql } from './sql'; import { Helpers } from './helpers'; import { Helpers as environmentHelpers } from '../environment/helpers'; +import { Helpers as retentionHelpers } from '../retentionpolicy/helpers'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { Helpers as projectHelpers } from '../project/helpers'; import { addTask } from '@lagoon/commons/dist/api'; import { Sql as environmentSql } from '../environment/sql'; @@ -343,6 +345,10 @@ export const addDeployment: ResolverFn = async ( if (!sourceType) { sourceType = "API" } + const projectData = await projectHelpers(sqlClientPool).getProjectById( + environment.project + ); + const { insertId } = await query( sqlClientPool, Sql.insertDeployment({ @@ -362,10 +368,12 @@ export const addDeployment: ResolverFn = async ( sourceUser, }) ); - const rows = await query(sqlClientPool, Sql.selectDeployment(insertId)); const deployment = R.prop(0, rows); + // pass to the HistoryRetentionEnforcer to clean up deployments based on any retention policies + await HistoryRetentionEnforcer().cleanupDeployments(projectData, environment) + pubSub.publish(EVENTS.DEPLOYMENT, deployment); return deployment; }; @@ -381,13 +389,28 @@ export const deleteDeployment: ResolverFn = async ( project: R.path(['0', 'pid'], perms) }); + const deployment = await Helpers(sqlClientPool).getDeploymentById(id) + + if (!deployment) { + throw new Error( + `Invalid deployment input` + ); + } + + const environmentData = await environmentHelpers(sqlClientPool).getEnvironmentById(parseInt(deployment.environment)); + const projectData = await projectHelpers(sqlClientPool).getProjectById(environmentData.project); + await query(sqlClientPool, Sql.deleteDeployment(id)); + // pass the deployment to the HistoryRetentionEnforcer + await HistoryRetentionEnforcer().cleanupDeployment(projectData, environmentData, deployment) + userActivityLogger(`User deleted deployment '${id}'`, { project: '', event: 'api:deleteDeployment', payload: { - deployment: id + deployment: id, + deploymentName: deployment.name } }); @@ -426,9 +449,10 @@ export const updateDeployment: ResolverFn = async ( Sql.selectPermsForDeployment(id) ); + const projectId = R.path(['0', 'pid'], permsDeployment) // Check access to modify deployment as it currently stands await hasPermission('deployment', 'update', { - project: R.path(['0', 'pid'], permsDeployment) + project: projectId }); if (environment) { @@ -466,6 +490,13 @@ export const updateDeployment: ResolverFn = async ( pubSub.publish(EVENTS.DEPLOYMENT, deployment); + try { + // handle retention policy hooks + await retentionHelpers(sqlClientPool).postDeploymentProjectPolicyHook(parseInt(projectId.toString(), 10), status) + } catch (e) { + logger.warn(`failed to perform postDeploymentProjectPolicyHook: ${e}`) + } + userActivityLogger(`User updated deployment '${id}'`, { project: '', event: 'api:updateDeployment', diff --git a/services/api/src/resources/deployment/sql.ts b/services/api/src/resources/deployment/sql.ts index e84e942d36..d22b8602be 100644 --- a/services/api/src/resources/deployment/sql.ts +++ b/services/api/src/resources/deployment/sql.ts @@ -74,5 +74,57 @@ export const Sql = { .select({ pid: 'environment.project' }) .join('environment', 'deployment.environment', '=', 'environment.id') .where('deployment.id', id) - .toString() + .toString(), + // this selects all deployments for the environment and returns everything outside of the requested retain value + selectDeploymentHistoryRetention: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment + WHERE environment=`+environment+` AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM deployment + WHERE environment=`+environment+` + ORDER BY id DESC + LIMIT `+retain+` + ) d + );`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain days value + selectDeploymentHistoryRetentionDays: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain months value + selectDeploymentHistoryRetentionMonths: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // this selects all tasks for the environment and returns everything + selectDeploymentHistoryForEnvironment: (environment: number) => + knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+`;`) + .toString(), + // same as select, except it deletes all deployments for the environment outside of the requested retain value + deleteDeploymentHistory: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment + WHERE environment=`+environment+` AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM deployment + WHERE environment=`+environment+` + ORDER BY id DESC + LIMIT `+retain+` + ) d + );`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteDeploymentHistoryDays: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteDeploymentHistoryMonths: (environment: number, retain: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // delete all deployments for environment + deleteDeploymentHistoryForEnvironment: (environment: number) => + knex.raw(`DELETE FROM deployment WHERE environment=`+environment+`;`) + .toString(), }; diff --git a/services/api/src/resources/environment/helpers.ts b/services/api/src/resources/environment/helpers.ts index 201b285f37..aa731bb88d 100644 --- a/services/api/src/resources/environment/helpers.ts +++ b/services/api/src/resources/environment/helpers.ts @@ -6,7 +6,10 @@ import { Sql } from './sql'; import { Sql as problemSql } from '../problem/sql'; import { Sql as factSql } from '../fact/sql'; import { Helpers as projectHelpers } from '../project/helpers'; -// import { logger } from '../../loggers/logger'; +import { Sql as deploymentSql } from '../deployment/sql'; +import { Sql as taskSql } from '../task/sql'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; +import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool) => { const aliasOpenshiftToK8s = (environments: any[]) => { @@ -31,6 +34,8 @@ export const Helpers = (sqlClientPool: Pool) => { aliasOpenshiftToK8s, getEnvironmentById, deleteEnvironment: async (name: string, eid: number, pid: number) => { + const environmentData = await Helpers(sqlClientPool).getEnvironmentById(eid); + const projectData = await projectHelpers(sqlClientPool).getProjectById(pid); // clean up environment variables // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment variables`) await query( @@ -43,7 +48,18 @@ export const Helpers = (sqlClientPool: Pool) => { sqlClientPool, Sql.deleteServices(eid) ); - // @TODO: environment_storage, deployment, environment_backup, task, environment_problem, environment_fact + // @TODO: environment_storage, environment_backup, environment_problem, environment_fact + // purge all history for this environment, including logs and files from s3 + try { + await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) // remove all deployments and associated files + } catch (e) { + logger.error(`error running deployment retention enforcer: ${e}`) + } + try { + await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) // remove all tasks and associated files + } catch (e) { + logger.error(`error running task retention enforcer: ${e}`) + } // delete the environment // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid}`) await query( diff --git a/services/api/src/resources/organization/helpers.ts b/services/api/src/resources/organization/helpers.ts index 8b208ad361..732a92f54b 100644 --- a/services/api/src/resources/organization/helpers.ts +++ b/services/api/src/resources/organization/helpers.ts @@ -8,6 +8,10 @@ export const Helpers = (sqlClientPool: Pool) => { const getOrganizationById = async (id: number) => { const rows = await query(sqlClientPool, Sql.selectOrganization(id)); return R.prop(0, rows); + } + const getOrganizationByName = async (name: string) => { + const rows = await query(sqlClientPool, Sql.selectOrganizationByName(name)); + return R.prop(0, rows); }; const getProjectsByOrganizationId = async (id: number) => { const rows = await query(sqlClientPool, Sql.selectOrganizationProjects(id)); @@ -61,6 +65,7 @@ export const Helpers = (sqlClientPool: Pool) => { }; return { getOrganizationById, + getOrganizationByName, getProjectsByOrganizationId, getDeployTargetsByOrganizationId, getNotificationsForOrganizationId, diff --git a/services/api/src/resources/organization/sql.ts b/services/api/src/resources/organization/sql.ts index d05fca12dd..c58f71dfaa 100644 --- a/services/api/src/resources/organization/sql.ts +++ b/services/api/src/resources/organization/sql.ts @@ -82,6 +82,11 @@ export const Sql = { knex('project') .where('organization', '=', id) .toString(), + selectOrganizationProjectIds: (id: number) => + knex('project') + .select(knex.raw('group_concat(id) as project_ids')) + .where('organization', '=', id) + .toString(), selectOrganizationEnvironments: (id: number) => knex('organization') .select('e.*') diff --git a/services/api/src/resources/project/sql.ts b/services/api/src/resources/project/sql.ts index 2b7f941087..3085f03e5e 100644 --- a/services/api/src/resources/project/sql.ts +++ b/services/api/src/resources/project/sql.ts @@ -16,6 +16,12 @@ export const Sql = { .whereNotIn('id', ids) .orderBy('id', 'asc') .toString(), + selectAllProjectIDsNotIn: (ids) => + knex('project') + .select(knex.raw('group_concat(id) as project_ids')) + .whereNotIn('id', ids) + .orderBy('id', 'asc') + .toString(), selectAllProjectsIn: (ids: number) => knex('project') .select('id') diff --git a/services/api/src/resources/retentionpolicy/PAYLOADS.md b/services/api/src/resources/retentionpolicy/PAYLOADS.md new file mode 100644 index 0000000000..8345b6e988 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/PAYLOADS.md @@ -0,0 +1,73 @@ + + +# harbor remote event payloads + +When a harbor policy is enforced, a message will be sent to the remote clusters, the payload sent will be one of the following depending on if the policy is being added, updated, or removed. + +## add or update policy + +Policy addition or updating will contain the policy information and the project information. The remote-controller uses this and the `eventType` to add or update the policy in the harbor that is associated to the project in each remote. + +``` +{ + "key":"deploytarget:harborpolicy:update", + "misc":{ + "miscResource":"eyJ0eXBlIjoiaGFyYm9yUmV0ZW50aW9uUG9saWN5IiwiZXZlbnRUeXBlIjoidXBkYXRlUG9saWN5IiwiZGF0YSI6eyJwcm9qZWN0Ijp7ImlkIjoxODAsIm5hbWUiOiJsYWdvb24tZGVtby1vcmcifSwicG9saWN5Ijp7ImVuYWJsZWQiOnRydWUsInJ1bGVzIjpbeyJuYW1lIjoiYWxsIGJyYW5jaGVzLCBleGNsdWRpbmcgcHVsbHJlcXVlc3RzIiwicGF0dGVybiI6IltecHJcXC1dKi8qIiwibGF0ZXN0UHVsbGVkIjozfSx7Im5hbWUiOiJwdWxscmVxdWVzdHMiLCJwYXR0ZXJuIjoicHItKiIsImxhdGVzdFB1bGxlZCI6MX1dLCJzY2hlZHVsZSI6IjMgMyAqICogMyJ9fX0" + } +} +``` +And the decoded `miscResource` payload is structured like this, based on the type `HarborRetentionMessage`: +``` +{ + "type": "harborRetentionPolicy", + "eventType": "updatePolicy", + "data": { + "project": { + "id": 180, + "name": "lagoon-demo-org" + }, + "policy": { + "enabled": true, + "rules": [ + { + "name": "all branches, excluding pullrequests", + "pattern": "[^pr\\-]*/*", + "latestPulled": 3 + }, + { + "name": "pullrequests", + "pattern": "pr-*", + "latestPulled": 1 + } + ], + "schedule": "3 3 * * 3" + } + } +} +``` + +## remove policy + +Policy removal contains just the project information, the remote-controller uses this and the `eventType` to remove the policy from the associated project in harbor. + +``` +{ + "key":"deploytarget:harborpolicy:update", + "misc":{ + "miscResource":"eyJ0eXBlIjoiaGFyYm9yUmV0ZW50aW9uUG9saWN5IiwiZXZlbnRUeXBlIjoicmVtb3ZlUG9saWN5IiwiZGF0YSI6eyJwcm9qZWN0Ijp7ImlkIjoxODAsIm5hbWUiOiJsYWdvb24tZGVtby1vcmcifX19" + } +} +``` +And the decoded `miscResource` payload is structured like this, based on the type `HarborRetentionMessage`: +``` +{ + "type": "harborRetentionPolicy", + "eventType": "removePolicy", + "data": { + "project": { + "id": 180, + "name": "lagoon-demo-org" + } + } +} +``` \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/README.md b/services/api/src/resources/retentionpolicy/README.md new file mode 100644 index 0000000000..355318c0cd --- /dev/null +++ b/services/api/src/resources/retentionpolicy/README.md @@ -0,0 +1,134 @@ +# retentionpolicy + +# policy types + +Policy types are used to influence different parts of retention within Lagoon, this means it is possible to add retention policies that target specific areas for retention. + +Policies can be applied to the following scopes: `global`, `organization`, and `project`. + +If applied to the `global` scope, ALL projects will receive the policy. If a policy is then added to an `organization` scope, then this policy will override anything at the `global` scope, and then the same for `project` scope would override the other two. + +Policies are created as a top level resource, meaning they can exist but not be assigned or used. An administrator can then link a policy to a scope. The act of linking a policy to a scope is what will turn the policy on (if the state is enabled) + +A policy of one type can only be applied to a scope at a single time. For example, if a `harbor` policy is applied to the global scope, then you cannot add another `harbor` policy to the global scope. + +# policy enforcement + +Different policy types will have different methods of enforcement. See enforcement for each policy type below. + +## harbor + +This policy type is for managing how many images pushed to harbor are retained. This is a simplified version of what harbor offers that will work for images that Lagoon pushes into projects. + +The configuration options for harbor retention policies are +* enabled - the state of the policy +* rules - a list of rules to apply (or based) + * name - the name of the rule + * pattern - the pattern to use, this is based on doublestar path pattern matching and globbing (harbor uses this https://github.com/bmatcuk/doublestar#patterns) + * latestPulled - the number of images to retain for this rule +* schedule - how often to run this retention policy in harbor (this schedule is executed by harbor, not lagoon) + +> Note: changing a policy from `enabled: true` to `enabled: false` will remove the policy from anything it may be associated to. this is a way to set a global (or organization) policy and allow an organization (or project) policy to disable it. + +### enforcement + +harbor policies when they are linked, unlinked, or updated, are sent to deploytargets to pass on to the harbor defined in that deploytarget. + +For example, if a harbor policy is linked to a scope, a hook is executed which will work out, based on the scope, which deploytargets need to be informed of the new policy. + +If there exists a global scoped harbor policy, and a new organization based policy is created and linked to an organization. The policy enforcer will work out which deploytargets any projects within that organization need to be informed of this new policy and send messages to them so they update the policy in their respective harbors. +If the organization based policy is removed from the organization, then the enforcer will send a message to all of the projects in that organization again to inform them to revert back to the global policy. The same actions are performed if the policy would be applied to a project scope. + +### creating a harbor policy + +``` +mutation createHarborPolicy { + createRetentionPolicy(input:{ + name: "custom-harbor-policy" + type: HARBOR + harbor: { + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr\\-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 3 * * 3" + } + }) { + id + name + configuration { + ... on HarborRetentionPolicy { + enabled + rules { + name + pattern + latestPulled + } + schedule + } + } + type + created + updated + } +} +``` + +For information or examples of the payloads that the harbor policy enforcement sends, see `PAYLOADS.md` + +## history + +This policy type will trim down the number of items that are retained in an environments history. + +The configuration options for history are +* enabled - the state of the policy +* deploymentType - can be one of `COUNT`, `DAYS`, `MONTHS` +* deploymentHistory - depending on the type selected, will retain deployments (logs, status, etc...) to this number accordingly +* taskType - can be one of `COUNT`, `DAYS`, `MONTHS` +* taskHistory - depending on the type selected, will retain task history (logs, status, etc...) to this number accordingly + +### enforcement + +history policies are enforced on demand. For example, when a new task or deployment is triggered, a hook is called that will check if the environment needs to enforce the policy or not based on the policy configuration. + +### creating a history policy + +``` +mutation createHistoryPolicy { + createRetentionPolicy(input:{ + name: "custom-history-policy" + type: HISTORY + history: { + enabled: true + deploymentHistory: 15 + deploymentType: DAYS + taskHistory: 3 + taskType: MONTHS + } + }) { + id + name + configuration { + ... on HistoryRetentionPolicy { + enabled + deploymentHistory + deploymentType + taskHistory + taskType + } + } + type + created + updated + } +} +``` \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/harbor.ts b/services/api/src/resources/retentionpolicy/harbor.ts new file mode 100644 index 0000000000..41680bdce2 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/harbor.ts @@ -0,0 +1,72 @@ +import { logger } from "../../loggers/logger"; +import { query } from '../../util/db'; +import { Helpers } from './helpers'; +import { HarborRetentionMessage, HarborRetentionEventType, HarborRetentionMessageType } from './types'; +import { Sql as environmentSql } from '../environment/sql'; +import { Sql as openshiftSql } from '../openshift/sql'; +import { Sql as projectSql } from '../project/sql'; +import { createRetentionPolicyTask } from '@lagoon/commons/dist/tasks'; + +export const HarborRetentionEnforcer = () => { + const updateProjects = async (sqlClientPool, policyChanges: any) => { + // loop over all the policyChanges and get all the environments for the project, and the deploytargets environments are in + // send each deploytarget ID the policy change for the project so that the harbor in that deploytarget will + // get the updated retention policy changes immediately + for (const pol of policyChanges) { + const rows = await query(sqlClientPool, environmentSql.selectEnvironmentsByProjectId(null, pol.pid, false, false, [])); + if (rows.length > 0) { + let targets = [] + for (const row of rows) { + const deployTarget = await query(sqlClientPool, openshiftSql.selectOpenshift(row.openshift)); + if (targets.indexOf(deployTarget[0].name) === -1) { + targets.push(deployTarget[0].name); + } + } + const project = await query(sqlClientPool, projectSql.selectProjectById(pol.pid)) + for (const target of targets) { + if (pol.updatePolicy) { + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(pol.rpid) + // add or update harbor policy to project in the remote cluster + const event: HarborRetentionMessage = { + type: HarborRetentionMessageType.HarborRetentionPolicy, + eventType: HarborRetentionEventType.UpdatePolicy, + data: { + project: { + id: project[0].id, + name: project[0].name, + }, + policy: retpol.configuration + } + } + const data = { + target: target, + event: event + } + await createRetentionPolicyTask({ key: 'harborpolicy:update', data }); + } + if (pol.removePolicy) { + // remove harbor policy from project in the remote cluster + const event: HarborRetentionMessage = { + type: HarborRetentionMessageType.HarborRetentionPolicy, + eventType: HarborRetentionEventType.RemovePolicy, + data: { + project: { + id: project[0].id, + name: project[0].name, + } + } + } + const data = { + target: target, + event: event + } + await createRetentionPolicyTask({ key: 'harborpolicy:update', data }); + } + } + } + } + } + return { + updateProjects, + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/helpers.ts b/services/api/src/resources/retentionpolicy/helpers.ts new file mode 100644 index 0000000000..fa8d8efae9 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/helpers.ts @@ -0,0 +1,387 @@ +import * as R from 'ramda'; +import { Pool } from 'mariadb'; +import { asyncPipe } from '@lagoon/commons/dist/util/func'; +import { query } from '../../util/db'; +import { Sql } from './sql'; +import { Sql as organizationSql } from '../organization/sql'; +import { Sql as projectSql } from '../project/sql'; +import { logger } from '../../loggers/logger'; +import { Helpers as projectHelpers } from '../project/helpers'; +import { HarborRetentionEnforcer } from './harbor'; + +export const Helpers = (sqlClientPool: Pool) => { + const getRetentionPolicy = async (id: number) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPolicyById(id)); + return R.prop(0, rows); + }; + const getRetentionPolicyByName = async (name: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPolicyByName(name)); + return R.prop(0, rows); + }; + const getRetentionPolicyByTypeAndLink = async (type: string, sid: number, scope: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, sid, scope)); + return R.prop(0, rows); // ? R.prop(0, rows) : null; + }; + const getRetentionPoliciesByTypePolicyIDAndLink = async (type: string, policyId: number, sid: number, scope: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypePolicyIDAndLink(type, policyId, sid, scope)); + return rows; + }; + const getRetentionPoliciesByProjectWithType = async (type: string, project: number) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, project, "project")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(project, "project")); + } + return rows; + }; + const getRetentionPoliciesByOrganizationWithType = async (type: string, organization: number) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, organization, "organization")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(organization, "organization")); + } + return rows; + }; + const getRetentionPoliciesByGlobalWithType = async (type: string) => { + let rows = [] + if (type) { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, 0, "global")); + } else { + rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByLink(0, "global")); + } + return rows; + }; + const getRetentionPoliciesByScopeWithTypeAndLink = async (type: string, scope: string, scopeId: number) => { + let rows, gr, or, pr, orgRows = [] + const globalRows = await getRetentionPoliciesByGlobalWithType(type); + switch (scope) { + case "project": + const projectData = await projectHelpers(sqlClientPool).getProjectById(scopeId) + orgRows = await getRetentionPoliciesByOrganizationWithType(type, projectData.organization); + const pRows = await getRetentionPoliciesByProjectWithType(type, scopeId); + gr = globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + or = orgRows.map(row => ({ ...row, source: "organization", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + pr = pRows.map(row => ({ ...row, source: "project", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + if (pr.length == 0) { + rows = gr.filter(ar => !or.find(rm => (rm.type === ar.type ) )) + if (or.length != 0) { + rows = or.filter(ar => !pr.find(rm => (rm.type === ar.type ) )) + } + } else { + return pr + } + rows.push(...pr) + return rows; + case "organization": + orgRows = await getRetentionPoliciesByOrganizationWithType(type, scopeId); + gr = globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + or = orgRows.map(row => ({ ...row, source: "organization", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + rows = gr.filter(ar => !or.find(rm => (rm.type === ar.type ) )) + rows.push(...or) + return rows; + case "global": + return globalRows.map(row => ({ ...row, source: "global", configuration: {type: row.type, ...JSON.parse(row.configuration)} })) + default: + throw new Error( + `No matching scope` + ); + } + }; + const getDeployTargetsForRetentionPoliciesByProject = async (project: number) => { + const rows = await query(sqlClientPool, Sql.selectDeployTargetsForRetentionByProject(project)); + return rows; + }; + const getEnvironmentsForRetentionPoliciesByProject = async (project: number) => { + const rows = await query(sqlClientPool, Sql.selectEnvironmentsForRetentionByProject(project)); + return rows; + }; + /* + getProjectIdsForAssociatedPolicyID retrieves all project ids that have the associated policyid and type attached either globally, organizationally, or directly in the project + this is used to quickly figure out which projects need to be updated if the associated policy is modified + the data this generates should be in the format the `policyEnforcer` requires, see `policyEnforcer` for details + */ + const getProjectIdsForAssociatedPolicyID = async (type: string, policyId: number, removal: boolean) => { + let policyOverrides = [] // store all the policy overrides that this function will generate + let projects = [] // store all the collected project ids so that we can use it to select other projects not in this list later on + // this policy is applied globally, so check for any organizations or projects that may use this policy + // check if any organizations have a policy that is different to this updated policy, these should be excluded from receiving any updates + const oids = await query(sqlClientPool, Sql.selectScopeIDsByRetentionPolicyTypeExcludingPolicyID(type, "organization", policyId)); + if (oids.length != 0 && oids[0]["scopeIds"] != null) { + for (const oid of oids[0]["scopeIds"].split(',')) { + const opids = await query(sqlClientPool, organizationSql.selectOrganizationProjectIds(oid)) + if (opids[0]["projectIds"] != null) { + for (const pid of opids[0]["projectIds"].split(',')) { + projects.push(pid) + const d = await getRetentionPolicyByTypeAndLink(type, oid, "organization") + if (removal && d && d.id == policyId) { + const targetIndex = policyOverrides.findIndex(f=>f.pid===pid); + const policy = {pid: pid, updatePolicy: true, rpid: d.id} + if (targetIndex != -1) { + // if the project already exists in the overrides, but a change is determined to be made + // update the project with the new policy + policyOverrides[targetIndex] = policy; + } else { + // otherwise add the project and policy as a new item + policyOverrides.push(policy) + } + } + } + } + } + } + // check if any projects have a policy that is different to this updated policy, these should be excluded from receiving any updates + const pids = await query(sqlClientPool, Sql.selectScopeIDsByRetentionPolicyTypeExcludingPolicyID(type, "project", policyId)); + if (pids.length != 0 && pids[0]["scopeIds"] != null) { + for (const pid of pids[0]["scopeIds"].split(',')) { + projects.indexOf(pid) === -1 && projects.push(pid); + const d = await getRetentionPolicyByTypeAndLink(type, pid, "project") + if (removal && d && d.id == policyId) { + const targetIndex = policyOverrides.findIndex(f=>f.pid===pid); + const policy = {pid: pid, updatePolicy: true, rpid: d.id} + if (targetIndex != -1) { + // if the project already exists in the overrides, but a change is determined to be made + // update the project with the new policy + policyOverrides[targetIndex] = policy; + } else { + // otherwise add the project and policy as a new item + policyOverrides.push(policy) + } + } + } + } + // select all project ids that don't have a policy override + const updateProjects = await query(sqlClientPool, projectSql.selectAllProjectIDsNotIn(projects)) + if (updateProjects[0]["projectIds"] != null) { + const projects = updateProjects[0]["projectIds"].split(','); + for (const pid of projects) { + if (removal) { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: pid, removePolicy: true}) + } else { + policyOverrides.push({pid: pid, updatePolicy: true, rpid: policyId}) + } + } + } + // all of these project ids should get an update as long as the policy type requires immediate update changes + return policyOverrides + } + /* + getRetentionPolicyChangesRequiredByScope generates a list of project ids and the associated policy id that should be attached to this project + the data this generates should be in the format the `policyEnforcer` requires, see `policyEnforcer` for details + */ + const getRetentionPolicyChangesRequired = async (scopeId: number, scope: string, type: string, policyId: number, removal: boolean) => { + const globPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "global", 0) + let policyOverrides = [] // projects with override policies + switch (scope) { + case "global": + const projects = await getProjectIdsForAssociatedPolicyID(type, policyId, removal) + for (const p of projects) { + policyOverrides.push(p) + } + break; + case "organization": + const orgProjects = await query(sqlClientPool, organizationSql.selectOrganizationProjects(scopeId)) + let skip = false + for (const p of orgProjects) { + const pRetPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "project", p.id) + for (const rp of pRetPols) { + skip = true + switch (rp.source) { + case "global": + // if this policy is being removed from an organization, and there is a global policy that can be applied + // set that here + if (removal) { + if (rp.configuration.enabled) { + policyOverrides.push({pid: p.id, updatePolicy: true, rpid: rp.id}) + } else { + policyOverrides.push({pid: p.id, removePolicy: true}) + } + } + break; + case "organization": + // if this policy is being added to an organization, and there is a an organization policy that can be applied + // set that here + if (!removal) { + if (rp.configuration.enabled) { + policyOverrides.push({pid: p.id, updatePolicy: true, rpid: rp.id}) + } else { + policyOverrides.push({pid: p.id, removePolicy: true}) + } + } + break; + case "project": + // do nothing if the project has an override for the project, as it takes precedence + break; + } + } + if (!skip) { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: p.id, removePolicy: true}) + } + skip = false + } + break; + case "project": + let policyToApply = null + const projectData = await projectHelpers(sqlClientPool).getProjectById(scopeId) + const orgRows = await getRetentionPoliciesByOrganizationWithType(type, projectData.organization); + const pRetPols = await getRetentionPoliciesByScopeWithTypeAndLink(type, "project", scopeId) + if (pRetPols.length == 1) { + policyToApply = pRetPols[0] + } else { + if (orgRows.length == 1) { + policyToApply = orgRows[0] + } else { + if (globPols.length == 1) { + // apply the global polcy + policyToApply = globPols[0] + } + } + } + // if there is a policy to apply, and it is enabled, enable it here + if (policyToApply && policyToApply.configuration.enabled) { + policyOverrides.push({pid: scopeId, updatePolicy: true, rpid: policyToApply.id}) + } else { + // if the project has no other policies to apply + // then it need to have any policies that may have been attached to it, removed from it + // set that here for the policyEnforcer to consume + policyOverrides.push({pid: scopeId, removePolicy: true}) + } + break; + default: + throw new Error( + `No matching scope` + ); + } + return policyOverrides + } + const postRetentionPolicyUpdateHook = async (type: string, policyId: number, policyChanges: any, removal: boolean = false) => { + // retrieve all projects that need to be updated if a change in the policy is made + // not all policies will require immediate updates, but those that do will be done here + if (!policyChanges) { + policyChanges = await getProjectIdsForAssociatedPolicyID(type, policyId, removal) + } + await policyEnforcer(policyChanges, type) + } + // this hook can be used to perform actions when a policy is added to or removed from a scope + // depending on the scope + const postRetentionPolicyLinkHook = async (scopeId: number, scope: string, type: string, policyId: number, removal: boolean = false) => { + const policyChanges = await getRetentionPolicyChangesRequired(scopeId, scope, type, policyId, removal) + await policyEnforcer(policyChanges, type) + } + /* + policyEnforcer is the actual policy enforcement function, it will handle execution of policy changes that are required, if they are required. + the payload of `policyChanges` is as follows + [ + {pid: project.id, removePolicy: true}, + {pid: project.id, updatePolicy: true, rpid: policy.id} + ] + `removePolicy` indicates that any policies on this project of the requested type should be removed from this project + `updatePolicy` indicates that a policy of the requested type should be applied to this project + the post retention hooks (postRetentionPolicyLinkHook and postRetentionPolicyUpdateHook) will call policyEnforcer based on which resolver + called the hook (addRetentionPolicyLink, updateRetentionPolicy, removeRetentionPolicyLink) + */ + const policyEnforcer =async (policyChanges: any, type: string) => { + switch (type) { + case "harbor": + // send this to the harbor retention policy enforcer + await HarborRetentionEnforcer().updateProjects(sqlClientPool, policyChanges) + break; + case "history": + // do nothing, history changes are executed when deployment or task history is modified + // so policy updates are implemented in realtime + break; + default: + throw new Error( + `No matching type` + ); + } + } + // this hook can be used after a deployment has been updated to perform changes to any retention policies as required + const postDeploymentProjectPolicyHook = async (projectId: number, status: string ) => { + switch (status) { + case 'complete': + case 'failed': + case 'cancelled': + const rows = await getRetentionPoliciesByScopeWithTypeAndLink("harbor", "project", projectId); + if (rows[0]) { + // if a deployment is complete, cancelled, or fails, run the postretentionpolicylinkhook so that + // any harbor policies are applied to the new environment or project as required + // this is done just in case the project or environment was created AFTER the policy was created to ensure that it gets any updates + // additionally, it happens here rather than at project creation as there may be no harbor project at the time the project is created + await postRetentionPolicyLinkHook(projectId, "project", rows[0].type, rows[0].id, !rows[0].configuration.enabled) + } + break; + default: + break; + } + } + return { + getRetentionPolicy, + getRetentionPolicyByName, + getRetentionPoliciesByProjectWithType, + getRetentionPoliciesByOrganizationWithType, + getRetentionPoliciesByGlobalWithType, + getDeployTargetsForRetentionPoliciesByProject, + getEnvironmentsForRetentionPoliciesByProject, + getRetentionPolicyByTypeAndLink, + getRetentionPoliciesByTypePolicyIDAndLink, + getProjectIdsForAssociatedPolicyID, + getRetentionPolicyChangesRequired, + postRetentionPolicyLinkHook, + postRetentionPolicyUpdateHook, + policyEnforcer, + getRetentionPoliciesByScopeWithTypeAndLink, + postDeploymentProjectPolicyHook, + deleteRetentionPolicy: async (id: number) => { + // check for globals with this retention policy + const globals = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "global") + ); + if (globals.length > 0) { + throw new Error( + 'Unable to delete retention policy, it is in use globally and should be removed from global consumption first' + ); + } + + // check for organizations with this retention policy + const orgs = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "organization") + ); + if (orgs.length > 0) { + throw new Error( + 'Unable to delete retention policy, there are organizations using it that should be removed from first' + ); + } + + // check for organizations with this retention policy + const projects = await query( + sqlClientPool, Sql.selectRetentionPoliciesByLinkAndPolicyID(id, "project") + ); + if (projects.length > 0) { + throw new Error( + 'Unable to delete retention policy, there are projects using it that should be removed from first' + ); + } + await query( + sqlClientPool, + Sql.deleteRetentionPolicy(id) + ); + }, + updateRetentionPolicy: async (id: number, patch: any) => { + await query( + sqlClientPool, + Sql.updateRetentionPolicy({ + id: id, + patch: patch + }) + ); + } + }; +}; diff --git a/services/api/src/resources/retentionpolicy/history.ts b/services/api/src/resources/retentionpolicy/history.ts new file mode 100644 index 0000000000..12c1bd0445 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/history.ts @@ -0,0 +1,207 @@ + + +import { Helpers } from './helpers'; +import { sqlClientPool } from '../../clients/sqlClient'; +import { Sql as deploymentSql } from '../deployment/sql'; +import { Sql as taskSql } from '../task/sql'; +import { + sendToLagoonActions, + // @ts-ignore +} from '@lagoon/commons/dist/tasks'; +import { query } from '../../util/db'; +import { logger } from '../../loggers/logger'; + +export const HistoryRetentionEnforcer = () => { + const cleanupTask = async (projectData: any, environmentData: any, task: any) => { + // clean up the task log history and associated files from S3 + const actionData = { + type: "retentionCleanup", + eventType: "taskCleanup", + data: { + environmentName: environmentData.name, + environmentId: environmentData.id, + projectName: projectData.name, + projectId: projectData.id, + task: { + id: task.id.toString(), + }, + remoteId: task.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + const cleanupTasks = async (projectData: any, environmentData: any) => { + // basic clean up all but X latest tasks + const retpol = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink("history", "project", projectData.id) + if (retpol.length > 0) { + const c = retpol[0].configuration + if (c.enabled) { + let historyToDelete = [] + switch (c.taskType) { + case "count": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetention(environmentData.id, c.taskHistory)); + break; + case "days": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetentionDays(environmentData.id, c.taskHistory)); + break; + case "months": + historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryRetentionMonths(environmentData.id, c.taskHistory)); + break; + } + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "taskCleanup", + data: { + environmentName: environmentData.name, + environmentId: environmentData.id, + projectName: projectData.name, + projectId: projectData.id, + task: { + id: r.id.toString(), + }, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + switch (c.taskType) { + case "count": + await query(sqlClientPool, taskSql.deleteTaskHistory(environmentData.id, c.taskHistory)); + break; + case "days": + await query(sqlClientPool, taskSql.deleteTaskHistoryDays(environmentData.id, c.taskHistory)); + break; + case "months": + await query(sqlClientPool, taskSql.deleteTaskHistoryMonths(environmentData.id, c.taskHistory)); + break; + } + } + } + } + } + const cleanupDeployment = async (projectData: any, environmentData: any, deployment: any) => { + // clean up the deployment log history and associated files from S3 + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: deployment.name, + remoteId: deployment.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + const cleanupDeployments = async (projectData: any, environmentData: any) => { + // basic clean up all but X latest tasks + const retpol = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink("history", "project", projectData.id) + if (retpol.length > 0) { + const c = retpol[0].configuration + if (c.enabled) { + let historyToDelete = [] + switch (c.taskType) { + case "count": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetention(environmentData.id, c.deploymentHistory)); + break; + case "days": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetentionDays(environmentData.id, c.deploymentHistory)); + break; + case "months": + historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryRetentionMonths(environmentData.id, c.deploymentHistory)); + break; + } + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + switch (c.taskType) { + case "count": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistory(environmentData.id, c.deploymentHistory)); + break; + case "days": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryDays(environmentData.id, c.deploymentHistory)); + break; + case "months": + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryMonths(environmentData.id, c.deploymentHistory)); + break; + } + } + } + } + } + const cleanupAllDeployments = async (projectData: any, environmentData: any) => { + // get all the environment deployment history + const historyToDelete = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryForEnvironment(environmentData.id)); + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + // delete all the environment deployment history + await query(sqlClientPool, deploymentSql.deleteDeploymentHistoryForEnvironment(environmentData.id)); + } + } + const cleanupAllTasks = async (projectData: any, environmentData: any) => { + // get all the environment task history + const historyToDelete = await query(sqlClientPool, taskSql.selectTaskHistoryForEnvironment(environmentData.id)); + for (const r of historyToDelete) { + // fire off message to action-handler service to proceed with cleaning up old data in buckets + const actionData = { + type: "retentionCleanup", + eventType: "buildCleanup", + data: { + environmentName: environmentData.name, + projectName: projectData.name, + environmentId: environmentData.id, + projectId: projectData.id, + buildName: r.name, + remoteId: r.remoteId, + } + } + sendToLagoonActions("retentionCleanup", actionData) + } + if (historyToDelete.length != 0) { + // delete all the environment task history + await query(sqlClientPool, taskSql.deleteTaskHistoryForEnvironment(environmentData.id)); + } + } + return { + cleanupDeployment, + cleanupDeployments, + cleanupTask, + cleanupTasks, + cleanupAllDeployments, + cleanupAllTasks, + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/resolvers.ts b/services/api/src/resources/retentionpolicy/resolvers.ts new file mode 100644 index 0000000000..cec63b151b --- /dev/null +++ b/services/api/src/resources/retentionpolicy/resolvers.ts @@ -0,0 +1,394 @@ + +import * as R from 'ramda'; +import { ResolverFn } from '..'; +import { logger } from '../../loggers/logger'; +import { isPatchEmpty, query, knex } from '../../util/db'; +import { Helpers } from './helpers'; +import { RetentionPolicy } from './types'; +import { Helpers as organizationHelpers } from '../organization/helpers'; +import { Helpers as projectHelpers } from '../project/helpers'; +import { Sql } from './sql'; + +export const createRetentionPolicy: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'add'); + + if (input.id) { + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (retpol) { + throw new Error( + `Retention policy with ID ${input.id} already exists` + ); + } + } + + // @ts-ignore + if (!input.type) { + throw new Error( + 'Must provide type' + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByName(input.name) + if (retpol) { + throw new Error( + `Retention policy with name ${input.name} already exists` + ); + } + + // convert the type to the configuration json on import after passing through the validator + try { + input.configuration = await RetentionPolicy().returnValidatedConfiguration(input.type, input) + } catch (e) { + throw new Error( + `${e}` + ); + } + + const { insertId } = await query( + sqlClientPool, + Sql.createRetentionPolicy({ + ...input, + })); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(insertId); + + userActivityLogger(`User created a retention policy`, { + project: '', + event: 'api:createRetentionPolicy', + payload: { + patch: { + name: input.name, + configuration: input.configuration, + }, + data: row + } + }); + + + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; + // return row; +}; + +export const updateRetentionPolicy: ResolverFn = async ( + root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'update'); + + if (isPatchEmpty(input)) { + throw new Error('input.patch requires at least 1 attribute'); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + let patch = { + name: input.patch.name + } + + if (!input.patch[retpol.type]) { + throw new Error( + `Missing configuration for type ${retpol.type}, patch not provided` + ); + } + + // convert the type to the configuration json on import after passing through the validator + try { + patch["configuration"] = await RetentionPolicy().returnValidatedConfiguration(retpol.type, input.patch) + } catch (e) { + throw new Error( + `${e}` + ); + } + + await Helpers(sqlClientPool).updateRetentionPolicy(input.id, patch); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + + userActivityLogger(`User updated retention policy`, { + project: '', + event: 'api:updateRetentionPolicy', + payload: { + patch: patch, + data: row + } + }); + + if (retpol.configuration != row.configuration) { + // if a policy is updated, and the configuration is not the same as before the update + // then run postRetentionPolicyUpdateHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + const policyEnabled = input.patch[retpol.type].enabled + await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, null, !policyEnabled) + } + + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; + // return row; +}; + +export const deleteRetentionPolicy: ResolverFn = async ( + _root, + { id: rid }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + await hasPermission('retention_policy', 'delete'); + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(rid) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + await Helpers(sqlClientPool).deleteRetentionPolicy(rid); + + userActivityLogger(`User deleted a retention policy '${retpol.name}'`, { + project: '', + event: 'api:deleteRetentionPolicy', + payload: { + input: { + retentionPolicy: rid + } + } + }); + + return 'success'; +}; + +export const listRetentionPolicies: ResolverFn = async ( + root, + { type, name }, + { sqlClientPool, hasPermission } +) => { + await hasPermission('retention_policy', 'viewAll'); + + let queryBuilder = knex('retention_policy'); + if (type) { + queryBuilder = queryBuilder.and.where('type', type); + } + + if (name) { + queryBuilder = queryBuilder.where('name', name); + } + + const rows = await query(sqlClientPool, queryBuilder.toString()); + return rows.map(row => ({ ...row, source: null, configuration: {type: row.type, ...JSON.parse(row.configuration)} })); +}; + + +export const addRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + + let scopeId = 0 + switch (input.scope) { + case "global": + await hasPermission('retention_policy', 'addGlobal'); + break; + case "organization": + const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) + if (!organization) { + throw new Error( + `Organization does not exist` + ); + } + await hasPermission('retention_policy', 'addOrganization'); + scopeId = organization.id + break; + case "project": + const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) + if (!project) { + throw new Error( + `Project does not exist` + ); + } + await hasPermission('retention_policy', 'addProject'); + scopeId = project.id + break; + default: + throw new Error( + `No matching scope` + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + const retpoltypes = await Helpers(sqlClientPool).getRetentionPolicyByTypeAndLink(retpol.type, scopeId, input.scope); + if (retpoltypes) { + throw new Error( + `A retention policy of type ${retpol.type} is already attached to the ${input.scope}` + ); + } + + await query( + sqlClientPool, + Sql.addRetentionPolicyLink( + input.id, + input.scope, + scopeId, + ) + ); + + // if a policy is linked to a scope (global, organization, project) + // then run postRetentionPolicyLinkHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + await Helpers(sqlClientPool).postRetentionPolicyLinkHook(scopeId, input.scope, retpol.type, retpol.id, false) + + userActivityLogger(`User added a retention policy '${retpol.name}' to ${input.scope}`, { + project: '', + event: 'api:addRetentionPolicyOrganization', + payload: { + input: { + retentionPolicy: retpol.id, + scope: input.scope, + scopeId: scopeId + } + } + }); + + const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; +}; + +export const removeRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + let scopeId = 0 + switch (input.scope) { + case "global": + await hasPermission('retention_policy', 'addGlobal'); + break; + case "organization": + const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) + if (!organization) { + throw new Error( + `Organization does not exist` + ); + } + await hasPermission('retention_policy', 'addOrganization'); + scopeId = organization.id + break; + case "project": + const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) + if (!project) { + throw new Error( + `Project does not exist` + ); + } + await hasPermission('retention_policy', 'addProject'); + scopeId = project.id + break; + default: + throw new Error( + `No matching scope` + ); + } + + const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + if (!retpol) { + throw new Error( + `Retention policy does not exist` + ); + } + + const retpoltypes = await Helpers(sqlClientPool).getRetentionPoliciesByTypePolicyIDAndLink(retpol.type, input.id, scopeId, input.scope); + if (retpoltypes.length == 0) { + throw new Error( + `No matching retention policy attached to this ${input.scope}` + ); + } + + let preDeleteProjectIds = [] + if (input.scope == "global") { + // this is calculated before the policies are removed, as it is used after removing the policy being + // passed into the post removal update hook if required, only for global scoped policies that are being unlinked + preDeleteProjectIds = await Helpers(sqlClientPool).getProjectIdsForAssociatedPolicyID(retpol.type, retpol.id, true) + } + + await query( + sqlClientPool, + Sql.deleteRetentionPolicyLink( + input.id, + input.scope, + scopeId, + ) + ); + + // if a policy is unlinked to a scope (global, organization, project) + // then run postRetentionPolicyLinkHook or postRetentionPolicyUpdateHook to make sure that the policy enforcer does + // any policy updates for any impacted projects + if (input.scope != "global") { + // if this is a standard organization or project policy unlink, then handle that with the post retention policy link hook + // this hook knows how to check the change that impacts those two scopes + await Helpers(sqlClientPool).postRetentionPolicyLinkHook(scopeId, input.scope, retpol.type, retpol.id, true) + } else { + // global policy applications when they're remove require a different calculation step that will update + // projects that don't use any policy overrides, this is because the depth of reach of a global policy + // is a bit trickier to calculate + await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, preDeleteProjectIds, true) + } + + userActivityLogger(`User removed a retention policy '${retpol.name}' from organization`, { + project: '', + event: 'api:removeRetentionPolicyOrganization', + payload: { + input: { + retentionPolicy: retpol.id, + scope: input.scope, + scopeId: scopeId + } + } + }); + + return "success" +}; + +// This is only called by the project resolver, so there is no need to do any permission checks +export const getRetentionPoliciesByProjectId: ResolverFn = async ( + project, + args, + { sqlClientPool } +) => { + + let pid = args.project; + if (project) { + pid = project.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "project", project.id); + return rows; +}; + +// This is only called by the organization resolver, so there is no need to do any permission checks +export const getRetentionPoliciesByOrganizationId: ResolverFn = async ( + organization, + args, + { sqlClientPool } +) => { + + let oid = args.organization; + if (organization) { + oid = organization.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "organization", oid); + return rows; +}; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/sql.ts b/services/api/src/resources/retentionpolicy/sql.ts new file mode 100644 index 0000000000..58ae419caf --- /dev/null +++ b/services/api/src/resources/retentionpolicy/sql.ts @@ -0,0 +1,123 @@ +import { knex } from '../../util/db'; + +export const Sql = { + updateRetentionPolicy: ({ id, patch }: { id: number, patch: { [key: string]: any } }) => { + const updatePatch = { + ...patch, + updated: knex.fn.now(), + }; + return knex('retention_policy') + .where('id', '=', id) + .update(updatePatch) + .toString(); + }, + selectRetentionPolicyById: (id: number) => + knex('retention_policy') + .where('id', '=', id) + .toString(), + selectRetentionPolicyByName: (name: string) => + knex('retention_policy') + .where('name', '=', name) + .toString(), + selectRetentionPoliciesByType: (type: string) => + knex('retention_policy') + .where('type', '=', type) + .toString(), + selectRetentionPoliciesByLink: (id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .toString(), + selectRetentionPoliciesByTypeAndLink: (type: string, id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .andWhere(knex.raw('rp.type = ?', type)) + .toString(), + selectRetentionPoliciesByTypePolicyIDAndLink: (type: string, policyId: number, id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('retention_policy_reference.id = ?', id)) + .andWhere(knex.raw('rp.type = ?', type)) + .andWhere(knex.raw('rp.id = ?', policyId)) + .toString(), + selectRetentionPoliciesByLinkAndPolicyID: (id: number, scope: string) => + knex('retention_policy as rp') + .select('rp.*') + .join('retention_policy_reference', 'rp.id', '=', 'retention_policy_reference.retention_policy') + .where(knex.raw('retention_policy_reference.scope = ?', scope)) + .andWhere(knex.raw('rp.id = ?', id)) + .toString(), + selectScopeIDsByRetentionPolicyTypeExcludingPolicyID: (type: string, scope: string, policyId: number) => + knex('retention_policy as rp') + .select(knex.raw('group_concat(rpr.id) as scope_ids')) + .join('retention_policy_reference as rpr', 'rp.id', '=', 'rpr.retention_policy') + .where(knex.raw('rpr.scope = ?', scope)) + .andWhere(knex.raw('rp.type = ?', type)) + .whereNot(knex.raw('rp.id = ?', policyId)) + .toString(), + selectScopeIDsByRetentionPolicyTypeIncludingPolicyID: (type: string, scope: string, policyId: number) => + knex('retention_policy as rp') + .select(knex.raw('group_concat(rpr.id) as scope_ids')) + .join('retention_policy_reference as rpr', 'rp.id', '=', 'rpr.retention_policy') + .where(knex.raw('rpr.scope = ?', scope)) + .andWhere(knex.raw('rp.type = ?', type)) + .andWhere(knex.raw('rp.id = ?', policyId)) + .toString(), + deleteRetentionPolicy: (id: number) => + knex('retention_policy') + .where('id', '=', id) + .delete() + .toString(), + deleteRetentionPolicyLink: (id: number, scope: string, sid: number) => + knex('retention_policy_reference') + .where('retention_policy', '=', id) + .andWhere('scope', '=', scope) + .andWhere('id', '=', sid) + .delete() + .toString(), + createRetentionPolicy: (input) => { + const { + id, + name, + type, + configuration + } = input; + return knex('retention_policy').insert({ + id, + name, + type, + configuration + }).toString(); + }, + addRetentionPolicyLink: (id: number, scope: string, sid: number) => { + return knex('retention_policy_reference').insert({ + retentionPolicy: id, + scope, + id: sid + }).toString(); + }, + selectDeployTargetsForRetentionByProject: (pid: number) => + knex('project as p') + .select('p.name', 'p.id as pid', 'p.organization', 'dt.id as dtid', 'dt.name as dtname') + .join('environment as e', 'p.id', '=', 'e.project') + .join('openshift as dt', 'dt.id', '=', 'e.openshift') + .where('e.deleted', '0000-00-00 00:00:00') + .andWhere(knex.raw('p.id = ?', pid)) + .groupBy('p.name', 'e.openshift') + .toString(), + selectEnvironmentsForRetentionByProject: (pid: number) => + knex('project as p') + .select('p.name', 'p.id as pid', 'e.name as ename', 'e.id as eid', 'p.organization', 'dt.id as dtid', 'dt.name as dtname') + .join('environment as e', 'p.id', '=', 'e.project') + .join('openshift as dt', 'dt.id', '=', 'e.openshift') + .where('e.deleted', '0000-00-00 00:00:00') + .andWhere(knex.raw('p.id = ?', pid)) + .toString(), +} \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/types.ts b/services/api/src/resources/retentionpolicy/types.ts new file mode 100644 index 0000000000..1494d9b692 --- /dev/null +++ b/services/api/src/resources/retentionpolicy/types.ts @@ -0,0 +1,140 @@ +// the types for retention policies +export interface HarborRetentionPolicy { + enabled: Boolean + rules: [HarborRetentionRule] + schedule: string +} + +export interface HarborRetentionRule { + name: string + pattern: string + latestPulled: number +} + +export interface HistoryRetentionPolicy { + enabled: boolean + deploymentHistory: number + deploymentType: string + taskHistory: number + taskType: string +} + +export type HarborRetentionMessage = { + type: HarborRetentionMessageType + eventType: HarborRetentionEventType + data: { + project: { + id: number + name: string + } + policy?: HarborRetentionPolicy + } +} + +export enum HarborRetentionMessageType { + HarborRetentionPolicy = "harborRetentionPolicy" +} + +export enum HarborRetentionEventType { + RemovePolicy = "removePolicy", + UpdatePolicy = "updatePolicy" +} + +export const RetentionPolicy = () => { + const convertHarborRetentionPolicyToJSON = async ( + harbor: HarborRetentionPolicy + ): Promise => { + const c = JSON.stringify(harbor) + return c + }; + + const convertHistoryRetentionPolicyToJSON = async ( + history: HistoryRetentionPolicy + ): Promise => { + const c = JSON.stringify(history) + return c + }; + + const convertJSONToHarborRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + for (const rule of c.rules) { + if (typeof rule.name != "string") { + throw new Error(`${rule.name}: name must be a string`); + } + if (typeof rule.pattern != "string") { + throw new Error(`${rule.name}: pattern must be a string`); + } + if (typeof rule.latestPulled != "number") { + throw new Error(`${rule.name}: latestPulled must be a number`); + } + } + if (typeof c.schedule != "string") { + throw new Error("schedule must be a string"); + } + return c + }; + + const convertJSONToHistoryRetentionPolicy = async ( + configuration: string + ): Promise => { + const c = JSON.parse(configuration) + if (typeof c.enabled != "boolean") { + throw new Error("enabled must be a boolean"); + } + if (typeof c.deploymentHistory != "number") { + throw new Error("deploymentHistory must be a number"); + } + if (typeof c.deploymentType != "string") { + throw new Error("deploymentHistory must be HistoryRetentionType"); + } + if (typeof c.taskHistory != "number") { + throw new Error("taskHistory must be a number"); + } + if (typeof c.taskType != "string") { + throw new Error("taskHistory must be HistoryRetentionType"); + } + return c + }; + + // run the configuration patches through the validation process + const returnValidatedConfiguration = async (type: string, patch: any): Promise => { + const c = JSON.stringify(patch[type]) + switch (type) { + case "harbor": + try { + await convertJSONToHarborRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + case "history": + try { + await convertJSONToHistoryRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided configuration is not valid for type ${type}: ${e}` + ); + } + default: + throw new Error( + `Provided configuration is not valid for type ${type}` + ); + } + } + + return { + convertHarborRetentionPolicyToJSON, + convertHistoryRetentionPolicyToJSON, + convertJSONToHarborRetentionPolicy, + convertJSONToHistoryRetentionPolicy, + returnValidatedConfiguration + }; +}; \ No newline at end of file diff --git a/services/api/src/resources/task/helpers.ts b/services/api/src/resources/task/helpers.ts index db28a8c71e..1fc820e0f0 100644 --- a/services/api/src/resources/task/helpers.ts +++ b/services/api/src/resources/task/helpers.ts @@ -9,6 +9,7 @@ import { Sql } from './sql'; import { Sql as projectSql } from '../project/sql'; import { Sql as environmentSql } from '../environment/sql'; import { Helpers as environmentHelpers } from '../environment/helpers'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool, hasPermission, adminScopes) => { @@ -136,6 +137,9 @@ export const Helpers = (sqlClientPool: Pool, hasPermission, adminScopes) => { ); } + // pass to the HistoryRetentionEnforcer to clean up tasks based on any retention policies + await HistoryRetentionEnforcer().cleanupTasks(projectData, environmentData) + return taskData; }, addAdvancedTask: async ( diff --git a/services/api/src/resources/task/resolvers.ts b/services/api/src/resources/task/resolvers.ts index 57a7c34e10..dcce916ad7 100644 --- a/services/api/src/resources/task/resolvers.ts +++ b/services/api/src/resources/task/resolvers.ts @@ -18,6 +18,7 @@ import sha1 from 'sha1'; import { generateTaskName } from '@lagoon/commons/dist/util/lagoon'; import { sendToLagoonLogs } from '@lagoon/commons/dist/logs/lagoon-logger'; import { createMiscTask } from '@lagoon/commons/dist/tasks'; +import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; const accessKeyId = process.env.S3_FILES_ACCESS_KEY_ID || 'minio' const secretAccessKey = process.env.S3_FILES_SECRET_ACCESS_KEY || 'minio123' @@ -330,8 +331,22 @@ export const deleteTask: ResolverFn = async ( project: R.path(['0', 'pid'], rows) }); + const task = await Helpers(sqlClientPool, hasPermission).getTaskByTaskInput({id: id}) + + if (!task) { + throw new Error( + `Invalid task input` + ); + } + + const environmentData = await environmentHelpers(sqlClientPool).getEnvironmentById(parseInt(task.environment)); + const projectData = await projectHelpers(sqlClientPool).getProjectById(environmentData.project); + await query(sqlClientPool, Sql.deleteTask(id)); + // pass the task to the HistoryRetentionEnforcer + await HistoryRetentionEnforcer().cleanupTask(projectData, environmentData, task) + userActivityLogger(`User deleted task '${id}'`, { project: '', event: 'api:deleteTask', diff --git a/services/api/src/resources/task/sql.ts b/services/api/src/resources/task/sql.ts index d287ad879f..385b13a49c 100644 --- a/services/api/src/resources/task/sql.ts +++ b/services/api/src/resources/task/sql.ts @@ -277,4 +277,56 @@ export const Sql = { .where('advanced_task_definition', taskId) .del() .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain value + selectTaskHistoryRetention: (id: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task + WHERE environment=`+id+` AND admin_only_view=0 AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM task + WHERE environment=`+id+` AND admin_only_view=0 + ORDER BY id DESC + LIMIT `+retain+` + ) t + );`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain days value + selectTaskHistoryRetentionDays: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // this selects all tasks for the environment and returns everything outside of the requested retain months value + selectTaskHistoryRetentionMonths: (environment: number, retain: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // this selects all tasks for the environment and returns everything + selectTaskHistoryForEnvironment: (environment: number) => + knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+`;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistory: (environment: number, retain: number) => + knex.raw(`DELETE FROM task + WHERE environment=`+environment+` AND admin_only_view=0 AND id NOT IN ( + SELECT id + FROM ( + SELECT id + FROM task + WHERE environment=`+environment+` AND admin_only_view=0 + ORDER BY id DESC + LIMIT `+retain+` + ) t + );`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryDays: (environment: number, retain: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryMonths: (environment: number, retain: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + .toString(), + // same as select, except it deletes all tasks for the environment outside of the requested retain value + deleteTaskHistoryForEnvironment: (environment: number) => + knex.raw(`DELETE FROM task WHERE environment=`+environment+`;`) + .toString(), }; diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 6693c162e7..7472262fd3 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -816,6 +816,11 @@ const typeDefs = gql` """ buildImage: String sharedBaasBucket: Boolean + """ + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1106,6 +1111,10 @@ const typeDefs = gql` owners: [OrgUser] notifications(type: NotificationType): [Notification] created: String + """ + retentionPolicies are the available retention policies to an organization + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } input AddOrganizationInput { @@ -1151,6 +1160,11 @@ const typeDefs = gql` groups: [OrgGroupInterface] groupCount: Int notifications: [OrganizationNotification] + """ + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1449,6 +1463,7 @@ const typeDefs = gql` getEnvVariablesByProjectEnvironmentName(input: EnvVariableByProjectEnvironmentNameInput!): [EnvKeyValue] checkBulkImportProjectsAndGroupsToOrganization(input: AddProjectToOrganizationInput!): ProjectGroupsToOrganization allPlatformUsers(id: String, email: String, gitlabId: Int, role: PlatformRole): [User] + listRetentionPolicies(type: RetentionPolicyType, name: String): [RetentionPolicy] } type ProjectGroupsToOrganization { @@ -2340,6 +2355,167 @@ const typeDefs = gql` name: String } + """ + RetentionPolicyType is the types of retention policies supported in Lagoon + """ + enum RetentionPolicyType { + HARBOR + HISTORY + } + + """ + HarborRetentionPolicy is the type for harbor retention policies + """ + type HarborRetentionPolicy { + enabled: Boolean + rules: [HarborRetentionRule] + schedule: String + } + type HarborRetentionRule { + name: String + """ + Pattern is based on doublestar path pattern matching and globbing (harbor uses this) + Example, '[^pr\\-]*/*' to exclude pullrequests in a pattern, and 'pr-*' to only match pullrequest environments + https://github.com/bmatcuk/doublestar#patterns + """ + pattern: String + latestPulled: Int + } + + """ + HarborRetentionPolicyInput is the input for a HarborRetentionPolicy + """ + input HarborRetentionPolicyInput { + enabled: Boolean! + rules: [HarborRetentionRuleInput!] + schedule: String! + } + input HarborRetentionRuleInput { + name: String! + pattern: String! + latestPulled: Int! + } + + """ + HistoryRetentionType is the types of retention policies supported in Lagoon + """ + enum HistoryRetentionType { + COUNT + DAYS + MONTHS + } + + """ + HistoryRetentionPolicy is the type for history retention policies + """ + type HistoryRetentionPolicy { + enabled: Boolean + deploymentHistory: Int + """ + HistoryRetentionType to use + COUNT to retain up to X number items of history + DAYS to retain up to X number of days of history + MONTHS to retain up to X number of months of history + """ + deploymentType: HistoryRetentionType + taskHistory: Int + """ + HistoryRetentionType to use + COUNT to retain up to X number items of history + DAYS to retain up to X number of days of history + MONTHS to retain up to X number of months of history + """ + taskType: HistoryRetentionType + } + + """ + HistoryRetentionPolicyInput is the input for a HistoryRetentionPolicy + """ + input HistoryRetentionPolicyInput { + enabled: Boolean! + deploymentHistory: Int! + deploymentType: HistoryRetentionType! + taskHistory: Int! + taskType: HistoryRetentionType! + } + + """ + RetentionPolicyConfiguration is a union type of different retention policies supported in Lagoon + """ + union RetentionPolicyConfiguration = HarborRetentionPolicy | HistoryRetentionPolicy + + """ + RetentionPolicy is the return type for retention policies in Lagoon + """ + type RetentionPolicy { + id: Int + name: String + type: String + """ + configuration is the return type of union based retention policy configurations, the type of retention policy + influences the return type needed here + """ + configuration: RetentionPolicyConfiguration + created: String + updated: String + """ + source is where the retention policy source is coming from, this field is only populated when a project or organization + lists the available retention polices, and is used to indicate if a project is consuiming a retention policy from the project directly + or from the organization itself + """ + source: String + } + + """ + AddRetentionPolicyInput is used as the input for updating a retention policy, this is a union type + Currently only the 'harbor' type is supported as an input, if other retention policies are added in the future + They will be subfields of this input, the RetentionPolicyType must match the subfield input type + """ + input AddRetentionPolicyInput { + id: Int + name: String! + type: RetentionPolicyType! + harbor: HarborRetentionPolicyInput + history: HistoryRetentionPolicyInput + } + + """ + UpdateRetentionPolicyPatchInput is used as the input for updating a retention policy, this is a union type + Currently only the 'harbor' type is supported as a patch input, if other retention policies are added in the future + They will be subfields of this patch input + """ + input UpdateRetentionPolicyPatchInput { + name: String + harbor: HarborRetentionPolicyInput + history: HistoryRetentionPolicyInput + } + + """ + UpdateRetentionPolicyInput is used as the input for updating a retention policy + """ + input UpdateRetentionPolicyInput { + id: Int! + patch: UpdateRetentionPolicyPatchInput + } + + """ + RetentionPolicyScope is the types of retention policies scopes in Lagoon + """ + enum RetentionPolicyScope { + GLOBAL + ORGANIZATION + PROJECT + } + + """ + AddRetentionPolicyLinkInput is used as the input for associating a retention policy with a scope + """ + input AddRetentionPolicyLinkInput { + id: Int! + scope: RetentionPolicyScope! + scopeName: String + } + type Mutation { """ Add Environment or update if it is already existing @@ -2558,6 +2734,26 @@ const typeDefs = gql` deleteEnvironmentService(input: DeleteEnvironmentServiceInput!): String addPlatformRoleToUser(user: UserInput!, role: PlatformRole!): User removePlatformRoleFromUser(user: UserInput!, role: PlatformRole!): User + """ + Create a retention policy + """ + createRetentionPolicy(input: AddRetentionPolicyInput!): RetentionPolicy + """ + Update a retention policy + """ + updateRetentionPolicy(input: UpdateRetentionPolicyInput!): RetentionPolicy + """ + Delete a retention policy + """ + deleteRetentionPolicy(id: Int!): String + """ + Add an existing retention policy to a resource type + """ + addRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): RetentionPolicy + """ + Remove an existing retention policy from a resource type + """ + removeRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): String } type Subscription { diff --git a/services/logs2notifications/main.go b/services/logs2notifications/main.go index 0ab9fe92a4..dcf362edd0 100644 --- a/services/logs2notifications/main.go +++ b/services/logs2notifications/main.go @@ -98,15 +98,15 @@ func main() { flag.BoolVar(&disableS3, "disable-s3", false, "Disable the logs2s3 feature.") flag.StringVar(&s3FilesAccessKeyID, "s3-files-access-key", "minio", - "The jwt audience.") + "The S3 files access key.") flag.StringVar(&s3FilesSecretAccessKey, "s3-files-secret-access-key", "minio123", - "The jwt audience.") + "The S3 files secret access key.") flag.StringVar(&s3FilesBucket, "s3-files-bucket", "lagoon-files", - "The jwt audience.") + "The S3 files bucket.") flag.StringVar(&s3FilesRegion, "s3-files-region", "auto", - "The jwt audience.") + "The S3 files region.") flag.StringVar(&s3FilesOrigin, "s3-files-origin", "http://minio.127.0.0.1.nip.io:9000", - "The jwt audience.") + "The S3 files origin.") flag.BoolVar(&s3isGCS, "s3-google-cloud", false, "If the storage backend is google cloud.") From 321741b3e23e1e05af8a729419d9017e9e33434b Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 10:54:08 +1000 Subject: [PATCH 02/15] chore: support s3 file export in actions-handler --- .../actions-handler/handler/action_deploy.go | 6 +- .../handler/action_retention.go | 91 ++++++++++++++++--- .../handler/controller_builds.go | 26 +++--- .../handler/controller_remove.go | 8 +- .../handler/controller_tasks.go | 14 +-- services/actions-handler/handler/handler.go | 28 +++--- 6 files changed, 117 insertions(+), 56 deletions(-) diff --git a/services/actions-handler/handler/action_deploy.go b/services/actions-handler/handler/action_deploy.go index abd2157bc0..852789d257 100644 --- a/services/actions-handler/handler/action_deploy.go +++ b/services/actions-handler/handler/action_deploy.go @@ -24,7 +24,7 @@ func (m *Messenger) handleDeployEnvironment(ctx context.Context, messageQueue *m if err != nil { // the token wasn't generated if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to generate token: %v", prefix, err)) + log.Printf("%sERROR: unable to generate token: %v", prefix, err) } return nil } @@ -39,7 +39,7 @@ func (m *Messenger) handleDeployEnvironment(ctx context.Context, messageQueue *m "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to deploy latest: %v", prefix, err)) + log.Printf("%sERROR: unable to deploy latest: %v", prefix, err) } return err } @@ -51,7 +51,7 @@ func (m *Messenger) handleDeployEnvironment(ctx context.Context, messageQueue *m "message": fmt.Sprintf("deployed latest environment: %s", deployment.DeployEnvironmentLatest), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sdeployed latest environment: %s", prefix, deployment.DeployEnvironmentLatest)) + log.Printf("%sdeployed latest environment: %s", prefix, deployment.DeployEnvironmentLatest) } return nil } diff --git a/services/actions-handler/handler/action_retention.go b/services/actions-handler/handler/action_retention.go index 87fa8c4c8c..bd8612b7e4 100644 --- a/services/actions-handler/handler/action_retention.go +++ b/services/actions-handler/handler/action_retention.go @@ -1,16 +1,17 @@ package handler import ( - "context" + "bytes" "encoding/json" "fmt" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - mq "github.com/cheshir/go-mq/v2" + "github.com/uselagoon/machinery/api/schema" "github.com/uselagoon/machinery/utils/namespace" ) @@ -26,7 +27,14 @@ type S3RetentionCleanUp struct { RemoteID string `json:"remoteId"` } -func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.MessageQueue, action *Action, messageID string) error { +type S3SaveHistory struct { + Environment schema.Environment `json:"environment"` + Project schema.Project `json:"project"` + TaskHistory []map[string]interface{} `json:"taskHistory"` + DeploymentHistory []map[string]interface{} `json:"deploymentHistory"` +} + +func (m *Messenger) handleRetention(action *Action, messageID string) error { prefix := fmt.Sprintf("(messageid:%s) %s: ", messageID, action.EventType) data, _ := json.Marshal(action.Data) retention := S3RetentionCleanUp{} @@ -55,7 +63,7 @@ func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.Messag retention, ) if err != nil { - log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + log.Printf("%sError: %v", prefix, err) return err } // handle cleaning up task logs @@ -65,7 +73,7 @@ func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.Messag retention, ) if err != nil { - log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + log.Printf("%sError: %v", prefix, err) return err } case "buildCleanup": @@ -81,7 +89,32 @@ func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.Messag retention, ) if err != nil { - log.Println(fmt.Sprintf("%sError: %v", prefix, err)) + log.Printf("%sError: %v", prefix, err) + return err + } + } + return nil +} + +// handleSaveHistory will save the deployment and task history into a bucket for long term storage, this can be used later to retrieve stats history for deleted environments in the future +func (m *Messenger) handleSaveHistory(action *Action, messageID string) error { + prefix := fmt.Sprintf("(messageid:%s) %s: ", messageID, action.EventType) + data, _ := json.Marshal(action.Data) + retention := S3SaveHistory{} + json.Unmarshal(data, &retention) + switch action.EventType { + case "saveHistory": + if m.EnableDebug { + log.Printf("%sSaving history for project %s-%d, environment %s-%d", prefix, retention.Project.Name, retention.Project.ID, retention.Environment.Name, retention.Environment.ID) + } + // save the file into a history directory in the lagoon files bucket + filePath := fmt.Sprintf("history/%s-%d/%s-%d/history-%d.json", + retention.Project.Name, retention.Project.ID, + retention.Environment.Name, retention.Environment.ID, + time.Now().Unix(), + ) + err := m.uploadFileS3(prefix, filePath, "application/json", data) + if err != nil { return err } } @@ -90,8 +123,7 @@ func (m *Messenger) handleRetention(ctx context.Context, messageQueue *mq.Messag // deleteFileS3 func (m *Messenger) deleteFileS3(prefix, fileName string, retention S3RetentionCleanUp) error { - var forcePath bool - forcePath = true + forcePath := true session, err := session.NewSession(&aws.Config{ Region: aws.String(m.S3Configuration.S3FilesRegion), Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), @@ -111,15 +143,14 @@ func (m *Messenger) deleteFileS3(prefix, fileName string, retention S3RetentionC return err } if m.EnableDebug { - log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, fileName, retention.EnvironmentName, retention.EnvironmentID)) + log.Printf("%sDeleted file %s for environment: %v, id: %v", prefix, fileName, retention.EnvironmentName, retention.EnvironmentID) } return nil } -// deleteDirFileS3 +// deleteDirFileS3 deletes files from a directory in s3 func (m *Messenger) deleteFileInDirS3(prefix, fileName string, retention S3RetentionCleanUp) error { - var forcePath bool - forcePath = true + forcePath := true session, err := session.NewSession(&aws.Config{ Region: aws.String(m.S3Configuration.S3FilesRegion), Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), @@ -141,11 +172,11 @@ func (m *Messenger) deleteFileInDirS3(prefix, fileName string, retention S3Reten Key: c.Key, }) if err != nil { - log.Println(fmt.Sprintf("%sError deleting file %s for environment: %v, id: %v: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID, err)) + log.Printf("%sError deleting file %s for environment: %v, id: %v: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID, err) continue // try other files } if m.EnableDebug { - log.Println(fmt.Sprintf("%sDeleted file %s for environment: %v, id: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID)) + log.Printf("%sDeleted file %s for environment: %v, id: %v", prefix, *c.Key, retention.EnvironmentName, retention.EnvironmentID) } } return *page.IsTruncated @@ -155,3 +186,35 @@ func (m *Messenger) deleteFileInDirS3(prefix, fileName string, retention S3Reten } return nil } + +// uploadFileS3 saves a file into s3 +func (m *Messenger) uploadFileS3(prefix, fileName, contentType string, message []byte) error { + forcePath := true + session, err := session.NewSession(&aws.Config{ + Region: aws.String(m.S3Configuration.S3FilesRegion), + Endpoint: aws.String(m.S3Configuration.S3FilesOrigin), + Credentials: credentials.NewStaticCredentials(m.S3Configuration.S3FilesAccessKeyID, m.S3Configuration.S3FilesSecretAccessKey, ""), + S3ForcePathStyle: &forcePath, + }) + if err != nil { + return err + } + + object := s3.PutObjectInput{ + Bucket: aws.String(m.S3Configuration.S3FilesBucket), + Key: aws.String(fileName), + Body: bytes.NewReader(message), + ContentType: aws.String(contentType), + } + if !m.S3Configuration.S3IsGCS { + object.ACL = aws.String("private") + } + _, err = s3.New(session).PutObject(&object) + if err != nil { + return err + } + if m.EnableDebug { + log.Printf("%sUploaded file %s", prefix, fileName) + } + return nil +} diff --git a/services/actions-handler/handler/controller_builds.go b/services/actions-handler/handler/controller_builds.go index 8a60b2de22..643bca7274 100644 --- a/services/actions-handler/handler/controller_builds.go +++ b/services/actions-handler/handler/controller_builds.go @@ -26,13 +26,13 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue // use BuildStatus so BuildPhase can be removed buildStatus = message.Meta.BuildStatus } - log.Println(fmt.Sprintf("%sreceived deployment status update - %s", prefix, buildStatus)) + log.Printf("%sreceived deployment status update - %s", prefix, buildStatus) // generate a lagoon token with a expiry of 60 seconds from now token, err := jwt.GenerateAdminToken(m.LagoonAPI.TokenSigningKey, m.LagoonAPI.JWTAudience, m.LagoonAPI.JWTSubject, m.LagoonAPI.JWTIssuer, time.Now().Unix(), 60) if err != nil { // the token wasn't generated if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR:unable to generate token: %v", prefix, err)) + log.Printf("%sERROR:unable to generate token: %v", prefix, err) } return nil } @@ -49,7 +49,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR:unable to get environment by namespace - %v", prefix, err)) + log.Printf("%sERROR:unable to get environment by namespace - %v", prefix, err) } return err } @@ -64,7 +64,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR:unable to get deployment - %v", prefix, err)) + log.Printf("%sERROR:unable to get deployment - %v", prefix, err) } return err } @@ -72,7 +72,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue case "complete", "failed", "cancelled": // the build/deployment is already in a finished state, don't process any additional messages for this deployment if m.EnableDebug { - log.Println(fmt.Sprintf("%sWARNING:deployment is already %s doing nothing - %v", prefix, strings.ToLower(deployment.Status), err)) + log.Printf("%sWARNING:deployment is already %s doing nothing - %v", prefix, strings.ToLower(deployment.Status), err) } return nil } @@ -108,7 +108,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update deployment - %v", prefix, err)) + log.Printf("%sERROR: unable to update deployment - %v", prefix, err) } return err } @@ -143,11 +143,11 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update environment - %v", prefix, err)) + log.Printf("%sERROR: unable to update environment - %v", prefix, err) } return err } - log.Println(fmt.Sprintf("%supdated environment", prefix)) + log.Printf("%supdated environment", prefix) // @TODO START @DEPRECATED this should be removed when the `setEnvironmentServices` mutation gets removed from the API if message.Meta.Services != nil { // @DEPRECATED existingServices := []string{} @@ -165,11 +165,11 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update environment services - %v", prefix, err)) + log.Printf("%sERROR: unable to update environment services - %v", prefix, err) } return err } - log.Println(fmt.Sprintf("%supdated environment services - %v", prefix, strings.Join(message.Meta.Services, ","))) + log.Printf("%supdated environment services - %v", prefix, strings.Join(message.Meta.Services, ",")) } } // END @DEPRECATED // services now provide additional information @@ -198,7 +198,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to delete environment services - %v", prefix, err)) + log.Printf("%sERROR: unable to delete environment services - %v", prefix, err) } errs = append(errs, err) } @@ -222,7 +222,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update environment services - %v", prefix, err)) + log.Printf("%sERROR: unable to update environment services - %v", prefix, err) } errs = append(errs, err) } @@ -239,7 +239,7 @@ func (m *Messenger) handleBuild(ctx context.Context, messageQueue *mq.MessageQue if errMsgs { return fmt.Errorf(strings.Join(errMsg, ",")) } - log.Println(fmt.Sprintf("%supdated environment services", prefix)) + log.Printf("%supdated environment services", prefix) } } return nil diff --git a/services/actions-handler/handler/controller_remove.go b/services/actions-handler/handler/controller_remove.go index 0e1fe4d5b0..387bc00f3b 100644 --- a/services/actions-handler/handler/controller_remove.go +++ b/services/actions-handler/handler/controller_remove.go @@ -15,13 +15,13 @@ import ( func (m *Messenger) handleRemoval(ctx context.Context, messageQueue *mq.MessageQueue, message *schema.LagoonMessage, messageID string) error { prefix := fmt.Sprintf("(messageid:%s) %s: ", messageID, message.Namespace) - log.Println(fmt.Sprintf("%sreceived remove environment status update", prefix)) + log.Printf("%sreceived remove environment status update", prefix) // generate a lagoon token with a expiry of 60 seconds from now token, err := jwt.GenerateAdminToken(m.LagoonAPI.TokenSigningKey, m.LagoonAPI.JWTAudience, m.LagoonAPI.JWTSubject, m.LagoonAPI.JWTIssuer, time.Now().Unix(), 60) if err != nil { // the token wasn't generated if m.EnableDebug { - log.Println(fmt.Sprintf("ERROR: unable to generate token: %v", err)) + log.Printf("ERROR: unable to generate token: %v", err) } return nil } @@ -42,10 +42,10 @@ func (m *Messenger) handleRemoval(ctx context.Context, messageQueue *mq.MessageQ "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to delete environment: %v", prefix, err)) + log.Printf("%sERROR: unable to delete environment: %v", prefix, err) } return err } - log.Println(fmt.Sprintf("%sdeleted environment: %v", prefix, deletedEnvironment.DeleteEnvironment)) + log.Printf("%sdeleted environment: %v", prefix, deletedEnvironment.DeleteEnvironment) return nil } diff --git a/services/actions-handler/handler/controller_tasks.go b/services/actions-handler/handler/controller_tasks.go index b967492cd7..b22a5f7891 100644 --- a/services/actions-handler/handler/controller_tasks.go +++ b/services/actions-handler/handler/controller_tasks.go @@ -19,13 +19,13 @@ import ( func (m *Messenger) handleTask(ctx context.Context, messageQueue *mq.MessageQueue, message *schema.LagoonMessage, messageID string) error { prefix := fmt.Sprintf("(messageid:%s) %s/%s: ", messageID, message.Namespace, message.Meta.Task.Name) - log.Println(fmt.Sprintf("%sreceived task status update: %s", prefix, message.Meta.JobStatus)) + log.Printf("%sreceived task status update: %s", prefix, message.Meta.JobStatus) // generate a lagoon token with a expiry of 60 seconds from now token, err := jwt.GenerateAdminToken(m.LagoonAPI.TokenSigningKey, m.LagoonAPI.JWTAudience, m.LagoonAPI.JWTSubject, m.LagoonAPI.JWTIssuer, time.Now().Unix(), 60) if err != nil { // the token wasn't generated if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to generate token: %v", prefix, err)) + log.Printf("%sERROR: unable to generate token: %v", prefix, err) } return nil } @@ -48,7 +48,7 @@ func (m *Messenger) handleTask(ctx context.Context, messageQueue *mq.MessageQueu "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to project information: %v", prefix, err)) + log.Printf("%sERROR: unable to project information: %v", prefix, err) } return err } @@ -80,11 +80,11 @@ func (m *Messenger) handleTask(ctx context.Context, messageQueue *mq.MessageQueu "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update project with active/standby result: %v", prefix, err)) + log.Printf("%sERROR: unable to update project with active/standby result: %v", prefix, err) } return err } - log.Println(fmt.Sprintf("%supdated project %s with active/standby result: %v", prefix, message.Meta.Project, "success")) + log.Printf("%supdated project %s with active/standby result: %v", prefix, message.Meta.Project, "success") } } // continue on to updating the task as normal @@ -113,10 +113,10 @@ func (m *Messenger) handleTask(ctx context.Context, messageQueue *mq.MessageQueu "message": err.Error(), }) if m.EnableDebug { - log.Println(fmt.Sprintf("%sERROR: unable to update task: %v", prefix, err)) + log.Printf("%sERROR: unable to update task: %v", prefix, err) } return err } - log.Println(fmt.Sprintf("%supdated task: %s", prefix, message.Meta.JobStatus)) + log.Printf("%supdated task: %s", prefix, message.Meta.JobStatus) return nil } diff --git a/services/actions-handler/handler/handler.go b/services/actions-handler/handler/handler.go index a5bbf66b86..8a5bcfcc28 100644 --- a/services/actions-handler/handler/handler.go +++ b/services/actions-handler/handler/handler.go @@ -41,12 +41,6 @@ type Action struct { Data map[string]interface{} `json:"data"` // contains the payload for the action, this could be any json so using a map } -type messenger interface { - Consumer() - Publish(string, []byte) - handleRemoval(context.Context, mq.MessageQueue, *schema.LagoonMessage, string) error -} - // Messenger is used for the config and client information for the messaging queue. type Messenger struct { Config mq.Config @@ -103,14 +97,14 @@ func (m *Messenger) Consumer() { go func() { for err := range messageQueue.Error() { - log.Println(fmt.Sprintf("Caught error from message queue: %v", err)) + log.Printf("Caught error from message queue: %v", err) } }() forever := make(chan bool) // Handle any tasks that go to the queue - log.Println(fmt.Sprintf("Listening for messages in queue %s", m.ActionsQueueName)) + log.Printf("Listening for messages in queue %s\n", m.ActionsQueueName) err = messageQueue.SetConsumerHandler(m.ActionsQueueName, func(message mq.Message) { action := &Action{} json.Unmarshal(message.Body(), action) @@ -128,23 +122,27 @@ func (m *Messenger) Consumer() { // check if this a `retentionCleanup` type of action // and perform the steps to clean up anything related to the retention clean up event type case "retentionCleanup": - err = m.handleRetention(ctx, messageQueue, action, messageID) + err = m.handleRetention(action, messageID) + // check if this a `retentionHistory` type of action + // and perform the steps to save anything related to the retention save event type + case "retentionHistory": + err = m.handleSaveHistory(action, messageID) } // if there aren't any errors, then ack the message, an error indicates that there may have been an issue with the api handling the request // skipping this means the message will remain in the queue if LagoonAPIRetryErrorCheck(err) != nil { - log.Println(fmt.Sprintf("Lagoon API error retry: %v", err)) + log.Printf("Lagoon API error retry: %v", err) message.Nack(false, true) // resubmit the message to the queue for processing } else { message.Ack(false) // ack to remove from queue } }) if err != nil { - log.Println(fmt.Sprintf("Failed to set handler to consumer `%s`: %v", m.ActionsQueueName, err)) + log.Printf("Failed to set handler to consumer `%s`: %v", m.ActionsQueueName, err) } // Handle any tasks that go to the lagoon-tasks:controller queue - log.Println(fmt.Sprintf("Listening for messages in queue %s", m.ControllerQueueName)) + log.Printf("Listening for messages in queue %s", m.ControllerQueueName) err = messageQueue.SetConsumerHandler(m.ControllerQueueName, func(message mq.Message) { logMsg := &schema.LagoonMessage{} json.Unmarshal(message.Body(), logMsg) @@ -163,14 +161,14 @@ func (m *Messenger) Consumer() { // if there aren't any errors, then ack the message, an error indicates that there may have been an issue with the api handling the request // skipping this means the message will remain in the queue if LagoonAPIRetryErrorCheck(err) != nil { - log.Println(fmt.Sprintf("Lagoon API error retry: %v", err)) + log.Printf("Lagoon API error retry: %v", err) message.Nack(false, true) // resubmit the message to the queue for processing } else { message.Ack(false) // ack to remove from queue } }) if err != nil { - log.Println(fmt.Sprintf("Failed to set handler to consumer `%s`: %v", m.ControllerQueueName, err)) + log.Printf("Failed to set handler to consumer `%s`: %v", m.ControllerQueueName, err) } <-forever @@ -186,7 +184,7 @@ func (m *Messenger) toLagoonLogs(messageQueue *mq.MessageQueue, message map[stri } producer, err := messageQueue.AsyncProducer("lagoon-logs") if err != nil { - log.Println(fmt.Sprintf("Failed to get async producer: %v", err)) + log.Printf("Failed to get async producer: %v", err) return } producer.Produce(msgBytes) From 1a98fcf2fea96f8644148bfd9caaa7f923626133 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 10:55:16 +1000 Subject: [PATCH 03/15] chore: use knex where possible --- services/api/src/resources/deployment/sql.ts | 80 +++++++++++------ services/api/src/resources/task/sql.ts | 90 +++++++++++++------- 2 files changed, 113 insertions(+), 57 deletions(-) diff --git a/services/api/src/resources/deployment/sql.ts b/services/api/src/resources/deployment/sql.ts index d22b8602be..6535fdded7 100644 --- a/services/api/src/resources/deployment/sql.ts +++ b/services/api/src/resources/deployment/sql.ts @@ -77,54 +77,78 @@ export const Sql = { .toString(), // this selects all deployments for the environment and returns everything outside of the requested retain value selectDeploymentHistoryRetention: (environment: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM deployment - WHERE environment=`+environment+` AND id NOT IN ( - SELECT id - FROM ( - SELECT id - FROM deployment - WHERE environment=`+environment+` - ORDER BY id DESC - LIMIT `+retain+` - ) d - );`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .whereNotIn('id', function() { + this.select('id') + .from(function() { + this.select('id') + .from('deployment') + .where('environment', environment) + .orderBy('id','desc') + .limit(retain) + .as('d') + }) + }) .toString(), // this selects all tasks for the environment and returns everything outside of the requested retain days value selectDeploymentHistoryRetentionDays: (environment: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .where(knex.raw('created >= NOW() - interval ' + retain + 'DAY')) .toString(), // this selects all tasks for the environment and returns everything outside of the requested retain months value selectDeploymentHistoryRetentionMonths: (environment: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .where(knex.raw('created >= NOW() - interval ' + retain + 'MONTH')) .toString(), // this selects all tasks for the environment and returns everything selectDeploymentHistoryForEnvironment: (environment: number) => - knex.raw(`SELECT id, name, remote_id FROM deployment WHERE environment=`+environment+`;`) + knex('deployment') + .where('environment', '=', environment) .toString(), // same as select, except it deletes all deployments for the environment outside of the requested retain value deleteDeploymentHistory: (environment: number, retain: number) => - knex.raw(`DELETE FROM deployment - WHERE environment=`+environment+` AND id NOT IN ( - SELECT id - FROM ( - SELECT id - FROM deployment - WHERE environment=`+environment+` - ORDER BY id DESC - LIMIT `+retain+` - ) d - );`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .whereNotIn('id', function() { + this.select('id') + .from(function() { + this.select('id') + .from('deployment') + .where('environment', environment) + .orderBy('id','desc') + .limit(retain) + .as('d') + }) + }) + .delete() .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteDeploymentHistoryDays: (environment: number, retain: number) => - knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` DAY;`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .where(knex.raw('created >= NOW() - interval ' + retain + 'DAY')) + .delete() .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteDeploymentHistoryMonths: (environment: number, retain: number) => - knex.raw(`DELETE FROM deployment WHERE environment=`+environment+` AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + knex('deployment') + .select('id','name','remote_id') + .where('environment', environment) + .where(knex.raw('created >= NOW() - interval ' + retain + 'MONTH')) + .delete() .toString(), // delete all deployments for environment deleteDeploymentHistoryForEnvironment: (environment: number) => - knex.raw(`DELETE FROM deployment WHERE environment=`+environment+`;`) + knex('deployment') + .where('environment', '=', environment) + .delete() .toString(), }; diff --git a/services/api/src/resources/task/sql.ts b/services/api/src/resources/task/sql.ts index 385b13a49c..6e1627bb2c 100644 --- a/services/api/src/resources/task/sql.ts +++ b/services/api/src/resources/task/sql.ts @@ -278,55 +278,87 @@ export const Sql = { .del() .toString(), // this selects all tasks for the environment and returns everything outside of the requested retain value - selectTaskHistoryRetention: (id: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM task - WHERE environment=`+id+` AND admin_only_view=0 AND id NOT IN ( - SELECT id - FROM ( - SELECT id - FROM task - WHERE environment=`+id+` AND admin_only_view=0 - ORDER BY id DESC - LIMIT `+retain+` - ) t - );`) + selectTaskHistoryRetention: (environment: number, retain: number) => + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .whereNotIn('id', function() { + this.select('id') + .from(function() { + this.select('id') + .from('task') + .where('environment', environment) + .where('admin_only_view', 0) + .orderBy('id','desc') + .limit(retain) + .as('t') + }) + }) .toString(), // this selects all tasks for the environment and returns everything outside of the requested retain days value selectTaskHistoryRetentionDays: (environment: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .where(knex.raw('created >= NOW() - interval ' + retain + 'DAY')) .toString(), // this selects all tasks for the environment and returns everything outside of the requested retain months value selectTaskHistoryRetentionMonths: (environment: number, retain: number) => - knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .where(knex.raw('created >= NOW() - interval ' + retain + 'MONTH')) .toString(), // this selects all tasks for the environment and returns everything selectTaskHistoryForEnvironment: (environment: number) => - knex.raw(`SELECT id, name, remote_id FROM task WHERE environment=`+environment+`;`) + knex('task') + .where('environment', '=', environment) .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteTaskHistory: (environment: number, retain: number) => - knex.raw(`DELETE FROM task - WHERE environment=`+environment+` AND admin_only_view=0 AND id NOT IN ( - SELECT id - FROM ( - SELECT id - FROM task - WHERE environment=`+environment+` AND admin_only_view=0 - ORDER BY id DESC - LIMIT `+retain+` - ) t - );`) + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .whereNotIn('id', function() { + this.select('id') + .from(function() { + this.select('id') + .from('task') + .where('environment', environment) + .where('admin_only_view', 0) + .orderBy('id','desc') + .limit(retain) + .as('t') + }) + }) + .delete() .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteTaskHistoryDays: (environment: number, retain: number) => - knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` DAY;`) + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .where(knex.raw('created >= NOW() - interval ' + retain + 'DAY')) + .delete() .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteTaskHistoryMonths: (environment: number, retain: number) => - knex.raw(`DELETE FROM task WHERE environment=`+environment+` AND admin_only_view=0 AND created >= NOW() - INTERVAL `+retain+` MONTH;`) + knex('task') + .select('id','name','remote_id') + .where('environment', environment) + .where('admin_only_view', 0) + .where(knex.raw('created >= NOW() - interval ' + retain + 'MONTH')) + .delete() .toString(), // same as select, except it deletes all tasks for the environment outside of the requested retain value deleteTaskHistoryForEnvironment: (environment: number) => - knex.raw(`DELETE FROM task WHERE environment=`+environment+`;`) + knex('task') + .where('environment', '=', environment) + .delete() .toString(), }; From 92cf028bb9216698f50b0c114af7ff75a5117c93 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 10:56:45 +1000 Subject: [PATCH 04/15] chore: when project is deleted, remove environment references --- services/api/src/resources/project/helpers.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/services/api/src/resources/project/helpers.ts b/services/api/src/resources/project/helpers.ts index 5f2a4d296e..03c9cac153 100644 --- a/services/api/src/resources/project/helpers.ts +++ b/services/api/src/resources/project/helpers.ts @@ -3,6 +3,7 @@ import { Pool } from 'mariadb'; import { asyncPipe } from '@lagoon/commons/dist/util/func'; import { query } from '../../util/db'; import { Sql } from './sql'; +import { Sql as environmentSql } from '../environment/sql'; // import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool) => { @@ -192,6 +193,12 @@ export const Helpers = (sqlClientPool: Pool) => { sqlClientPool, Sql.deleteDeployTargetConfigs(id) ); + // logger.debug(`deleting project ${id} leftover environment rows`) + // clean up environments table so environments don't remain in limbo once the project is deleted + await query( + sqlClientPool, + environmentSql.deleteEnvironmentsByProjectID(id) + ); // logger.debug(`deleting project ${id}`) // delete the project await query( From 0ae4bf67049dd7bd3cc30a378eec0a35f6dcc750 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 10:58:56 +1000 Subject: [PATCH 05/15] chore: add exporting of history support --- .../api/src/resources/environment/helpers.ts | 77 ++++++++++++------- services/api/src/resources/environment/sql.ts | 15 ++++ .../src/resources/retentionpolicy/history.ts | 24 ++++++ 3 files changed, 87 insertions(+), 29 deletions(-) diff --git a/services/api/src/resources/environment/helpers.ts b/services/api/src/resources/environment/helpers.ts index aa731bb88d..874cbfb939 100644 --- a/services/api/src/resources/environment/helpers.ts +++ b/services/api/src/resources/environment/helpers.ts @@ -6,8 +6,6 @@ import { Sql } from './sql'; import { Sql as problemSql } from '../problem/sql'; import { Sql as factSql } from '../fact/sql'; import { Helpers as projectHelpers } from '../project/helpers'; -import { Sql as deploymentSql } from '../deployment/sql'; -import { Sql as taskSql } from '../task/sql'; import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { logger } from '../../loggers/logger'; @@ -36,27 +34,60 @@ export const Helpers = (sqlClientPool: Pool) => { deleteEnvironment: async (name: string, eid: number, pid: number) => { const environmentData = await Helpers(sqlClientPool).getEnvironmentById(eid); const projectData = await projectHelpers(sqlClientPool).getProjectById(pid); - // clean up environment variables - // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment variables`) - await query( - sqlClientPool, - Sql.deleteEnvironmentVariables(eid) - ); - // clean up services - // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment services`) - await query( - sqlClientPool, - Sql.deleteServices(eid) - ); - // @TODO: environment_storage, environment_backup, environment_problem, environment_fact + try { + // clean up environment variables + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment variables`) + await query( + sqlClientPool, + Sql.deleteEnvironmentVariables(eid) + ); + // clean up service containers + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment service containers`) + await query( + sqlClientPool, + Sql.deleteServiceContainersByEnvironmentId( + eid + ) + ); + // clean up services + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment services`) + await query( + sqlClientPool, + Sql.deleteServices(eid) + ); + // Here we clean up insights attached to the environment + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment facts`) + await query( + sqlClientPool, + factSql.deleteFactsForEnvironment(eid) + ); + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment problems`) + await query( + sqlClientPool, + problemSql.deleteProblemsForEnvironment(eid) + ); + + // @TODO: environment_storage, environment_backup + } catch (e) { + logger.error(`error cleaning up linked environment tables: ${e}`) + } + + try { + // export a dump of the project, environment data, and associated task and deployment history before the environment is deleted + await HistoryRetentionEnforcer().saveEnvironmentHistoryBeforeDeletion(projectData, environmentData) + } catch (e) { + logger.error(`error running save environment history: ${e}`) + } // purge all history for this environment, including logs and files from s3 try { - await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) // remove all deployments and associated files + // remove all deployments and associated files + await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) } catch (e) { logger.error(`error running deployment retention enforcer: ${e}`) } try { - await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) // remove all tasks and associated files + // remove all tasks and associated files + await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) } catch (e) { logger.error(`error running task retention enforcer: ${e}`) } @@ -67,18 +98,6 @@ export const Helpers = (sqlClientPool: Pool) => { Sql.deleteEnvironment(name, pid) ); - // Here we clean up insights attached to the environment - - await query( - sqlClientPool, - factSql.deleteFactsForEnvironment(eid) - ); - - await query( - sqlClientPool, - problemSql.deleteProblemsForEnvironment(eid) - ); - }, getEnvironmentsDeploytarget: async (eid) => { const rows = await query( diff --git a/services/api/src/resources/environment/sql.ts b/services/api/src/resources/environment/sql.ts index 684bea49e4..639635c323 100644 --- a/services/api/src/resources/environment/sql.ts +++ b/services/api/src/resources/environment/sql.ts @@ -172,6 +172,15 @@ export const Sql = { .where('service_id', '=', id) .delete() .toString(), + deleteServiceContainersByEnvironmentId: (eid: number) => + knex('environment_service_container') + .whereIn('service_id', function() { + this.select('id') + .from('environment_service') + .where('environment', eid); + }) + .delete() + .toString(), // add a new service container insertServiceContainer: ( serviceId: number, @@ -183,4 +192,10 @@ export const Sql = { name, }) .toString(), + // delete all environments from environment table that match project id + deleteEnvironmentsByProjectID: (projectId: number) => + knex('environment') + .where('project', '=', projectId) + .delete() + .toString(), }; diff --git a/services/api/src/resources/retentionpolicy/history.ts b/services/api/src/resources/retentionpolicy/history.ts index 12c1bd0445..6397efc494 100644 --- a/services/api/src/resources/retentionpolicy/history.ts +++ b/services/api/src/resources/retentionpolicy/history.ts @@ -196,6 +196,29 @@ export const HistoryRetentionEnforcer = () => { await query(sqlClientPool, taskSql.deleteTaskHistoryForEnvironment(environmentData.id)); } } + const saveEnvironmentHistoryBeforeDeletion = async (projectData: any, environmentData: any) => { + // ENABLE_SAVED_HISTORY_EXPORT will save the deployment and task history if set to true + // this is a way to export a full copy of the environment data (id, name, created, deleted etc..), the project, and the task/deployment history + // this is a JSON payload that could later be consumed for historical purposes + // by default this feature is DISABLED. you should enable this feature if you want to save deleted environment history + // the deleted data ends up in the lagoon files bucket in a directory called history + const ENABLE_SAVED_HISTORY_EXPORT = process.env.ENABLE_SAVED_HISTORY_EXPORT || "false" + if (ENABLE_SAVED_HISTORY_EXPORT == "true" ) { + const taskHistory = await query(sqlClientPool, taskSql.selectTaskHistoryForEnvironment(environmentData.id)); + const deploymentHistory = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryForEnvironment(environmentData.id)); + const actionData = { + type: "retentionHistory", + eventType: "saveHistory", + data: { + environment: environmentData, + project: projectData, + taskHistory: taskHistory, + deploymentHistory: deploymentHistory + } + } + sendToLagoonActions("retentionHistory", actionData) + } + } return { cleanupDeployment, cleanupDeployments, @@ -203,5 +226,6 @@ export const HistoryRetentionEnforcer = () => { cleanupTasks, cleanupAllDeployments, cleanupAllTasks, + saveEnvironmentHistoryBeforeDeletion, }; }; \ No newline at end of file From 73f65db49aec37029a9baea74b9ead50f9dca013 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 10:59:14 +1000 Subject: [PATCH 06/15] chore: add other helpful seeding data --- docker-compose.yaml | 1 + .../01-populate-api-data-lagoon-demo.gql | 112 ++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index 2515a26257..20f599c733 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -99,6 +99,7 @@ services: - SIDECAR_HANDLER_HOST=apisidecarhandler - SSH_TOKEN_ENDPOINT=localhost - SSH_TOKEN_ENDPOINT_PORT=2020 + - ENABLE_SAVED_HISTORY_EXPORT=true depends_on: api-lagoon-migrations: condition: service_started diff --git a/local-dev/api-data-watcher-pusher/api-data/01-populate-api-data-lagoon-demo.gql b/local-dev/api-data-watcher-pusher/api-data/01-populate-api-data-lagoon-demo.gql index 80106d3e9f..f8530ab8fe 100644 --- a/local-dev/api-data-watcher-pusher/api-data/01-populate-api-data-lagoon-demo.gql +++ b/local-dev/api-data-watcher-pusher/api-data/01-populate-api-data-lagoon-demo.gql @@ -845,4 +845,116 @@ mutation PopulateApi { ) { id } + + UIProject1Environment2addServices1: addOrUpdateEnvironmentService( + input: { + environment: 4 + name: "cli" + type: "cli-persistent" + containers: [{name: "cli"}] + } + ){ + id + name + type + } + UIProject1Environment2addServices2: addOrUpdateEnvironmentService( + input: { + environment: 4 + name: "nginx" + type: "nginx-php-persistent" + containers: [{name: "nginx"},{name:"php"}] + } + ){ + id + name + type + } + UIProject1Environment2addServices3: addOrUpdateEnvironmentService( + input: { + environment: 4 + name: "mariadb" + type: "mariadb-single" + containers: [{name: "mariadb"}] + } + ){ + id + name + type + } + + UIProject1Environment3addServices1: addOrUpdateEnvironmentService( + input: { + environment: 5 + name: "cli" + type: "cli-persistent" + containers: [{name: "cli"}] + } + ){ + id + name + type + } + UIProject1Environment3addServices2: addOrUpdateEnvironmentService( + input: { + environment: 5 + name: "nginx" + type: "nginx-php-persistent" + containers: [{name: "nginx"},{name:"php"}] + } + ){ + id + name + type + } + UIProject1Environment3addServices3: addOrUpdateEnvironmentService( + input: { + environment: 5 + name: "mariadb" + type: "mariadb-single" + containers: [{name: "mariadb"}] + } + ){ + id + name + type + } + + UIProject1Environment4addServices1: addOrUpdateEnvironmentService( + input: { + environment: 6 + name: "cli" + type: "cli-persistent" + containers: [{name: "cli"}] + } + ){ + id + name + type + } + UIProject1Environment4addServices2: addOrUpdateEnvironmentService( + input: { + environment: 6 + name: "nginx" + type: "nginx-php-persistent" + containers: [{name: "nginx"},{name:"php"}] + } + ){ + id + name + type + } + UIProject1Environment4addServices3: addOrUpdateEnvironmentService( + input: { + environment: 6 + name: "mariadb" + type: "mariadb-single" + containers: [{name: "mariadb"}] + } + ){ + id + name + type + } + } From 271808b1dad0f48328cfdd10b498b00291037a4b Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 11:09:33 +1000 Subject: [PATCH 07/15] chore: fix task resolver --- services/api/src/resources/task/resolvers.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/api/src/resources/task/resolvers.ts b/services/api/src/resources/task/resolvers.ts index dcce916ad7..7a7f8d637c 100644 --- a/services/api/src/resources/task/resolvers.ts +++ b/services/api/src/resources/task/resolvers.ts @@ -324,14 +324,14 @@ export const addTask: ResolverFn = async ( export const deleteTask: ResolverFn = async ( root, { input: { id } }, - { sqlClientPool, hasPermission, userActivityLogger } + { sqlClientPool, hasPermission, userActivityLogger, adminScopes } ) => { const rows = await query(sqlClientPool, Sql.selectPermsForTask(id)); await hasPermission('task', 'delete', { project: R.path(['0', 'pid'], rows) }); - const task = await Helpers(sqlClientPool, hasPermission).getTaskByTaskInput({id: id}) + const task = await Helpers(sqlClientPool, hasPermission, adminScopes).getTaskByTaskInput({id: id}) if (!task) { throw new Error( From 41cf56be15f59e9091f03f19c3fb9dfe2201753c Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 11:09:49 +1000 Subject: [PATCH 08/15] chore: rename migration --- ...000_retention_policy.js => 20240708000000_retention_policy.js} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename services/api/database/migrations/{20240502000000_retention_policy.js => 20240708000000_retention_policy.js} (100%) diff --git a/services/api/database/migrations/20240502000000_retention_policy.js b/services/api/database/migrations/20240708000000_retention_policy.js similarity index 100% rename from services/api/database/migrations/20240502000000_retention_policy.js rename to services/api/database/migrations/20240708000000_retention_policy.js From a9e3c74e1c285afbd8545c7ab81bf31fe128c675 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 11:16:21 +1000 Subject: [PATCH 09/15] chore: update build image tag --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 702f361fe4..19e72977cb 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ BUILD_DEPLOY_IMAGE_TAG ?= edge # OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG and OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY # set this to a particular build image if required, defaults to nothing to consume what the chart provides -OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=retention-policy +OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=pr-243 OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY= # To build k3d with Calico instead of Flannel, set this to true. Note that the Calico install in lagoon-charts is always From ec3619cd3fb9c36ecda721481bdad9f21f69193c Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 8 Jul 2024 12:56:38 +1000 Subject: [PATCH 10/15] chore: update retention readme with note about history export data --- services/api/src/resources/retentionpolicy/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/api/src/resources/retentionpolicy/README.md b/services/api/src/resources/retentionpolicy/README.md index 355318c0cd..e974c5cb93 100644 --- a/services/api/src/resources/retentionpolicy/README.md +++ b/services/api/src/resources/retentionpolicy/README.md @@ -96,6 +96,9 @@ The configuration options for history are * taskType - can be one of `COUNT`, `DAYS`, `MONTHS` * taskHistory - depending on the type selected, will retain task history (logs, status, etc...) to this number accordingly +> Note: There is a variable `ENABLE_SAVED_HISTORY_EXPORT` that is `false` by default, but can be set to `true`. This variable will export data for any deleted environments to the s3 files bucket before the environment is deleted. This exports the current `project`, `environment`, and the associated environments `task` and `deployment` history at the time of deletion. The path of this file will be `history/${projectname}-${projectid}/${environmentname}-${environmentid}/history-${unixtimestamp}.json`. +> If a `history` based retention policy is run against an environment before it is deleted, the exported history snapshot will not contain data that was purged by a retention policy. + ### enforcement history policies are enforced on demand. For example, when a new task or deployment is triggered, a hook is called that will check if the environment needs to enforce the policy or not based on the policy configuration. From 22d2c7a1412cb9a616749327c38f34c4e51d1271 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 14 Oct 2024 15:54:54 +1100 Subject: [PATCH 11/15] refactor: use separate functions for different policy types --- ...populate-api-data-ci-local-control-k8s.gql | 86 +++-- .../20240708000000_retention_policy.js | 3 +- services/api/src/models/retentionpolicy.ts | 102 ------ services/api/src/resolvers.js | 63 ++-- .../src/resources/retentionpolicy/README.md | 50 ++- .../src/resources/retentionpolicy/helpers.ts | 5 + .../resources/retentionpolicy/resolvers.ts | 303 +++++++++++++----- .../api/src/resources/retentionpolicy/sql.ts | 5 + .../src/resources/retentionpolicy/types.ts | 50 ++- services/api/src/typeDefs.js | 238 ++++++++------ 10 files changed, 496 insertions(+), 409 deletions(-) delete mode 100644 services/api/src/models/retentionpolicy.ts diff --git a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql index 51d5df5460..370a8cdb7f 100644 --- a/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql +++ b/local-dev/api-data-watcher-pusher/api-data/03-populate-api-data-ci-local-control-k8s.gql @@ -128,92 +128,78 @@ mutation PopulateApi { id } - RetPol1: createRetentionPolicy(input:{ + RetPol1: createHarborRetentionPolicy(input:{ name: "harbor-policy" - type: HARBOR - harbor: { - enabled: true - rules: [ - { - name: "all branches, excluding pullrequests" - pattern: "[^pr-]*/*" - latestPulled: 3 - }, - { - name: "pullrequests" - pattern: "pr-*" - latestPulled: 1 - } - ] - schedule: "3 * * * *" - } + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 * * * *" }) { id name configuration { - ... on HarborRetentionPolicy { - enabled - rules { - name - pattern - latestPulled - } - schedule + enabled + rules { + name + pattern + latestPulled } + schedule } - type created updated } - RetPol2: createRetentionPolicy(input:{ + RetPol2: createHistoryRetentionPolicy(input:{ name: "history-policy" - type: HISTORY - history: { - enabled: true - deploymentHistory: 5 - deploymentType: COUNT - taskHistory: 10 - taskType: COUNT - } + enabled: true + deploymentHistory: 5 + deploymentType: COUNT + taskHistory: 10 + taskType: COUNT }) { id name configuration { - ... on HistoryRetentionPolicy { - enabled - deploymentHistory - deploymentType - taskHistory - taskType - } + enabled + deploymentHistory + deploymentType + taskHistory + taskType } - type created updated } - RetPolLink1: addRetentionPolicyLink(input:{ - id: 1 + RetPolLink1: addHarborRetentionPolicyLink(input:{ + name: "harbor-policy" scope: GLOBAL scopeName: "global", }) { id name - type source created updated } - RetPolLink2: addRetentionPolicyLink(input:{ - id: 2 + RetPolLink2: addHistoryRetentionPolicyLink(input:{ + name: "history-policy" scope: GLOBAL scopeName: "global", }) { id name - type source created updated diff --git a/services/api/database/migrations/20240708000000_retention_policy.js b/services/api/database/migrations/20240708000000_retention_policy.js index 0ff5ba450c..0b9e73ecdd 100644 --- a/services/api/database/migrations/20240708000000_retention_policy.js +++ b/services/api/database/migrations/20240708000000_retention_policy.js @@ -6,11 +6,12 @@ exports.up = async function(knex) { return knex.schema .createTable('retention_policy', function (table) { table.increments('id').notNullable().primary(); - table.string('name', 300).unique({indexName: 'name'}); + table.string('name', 300); table.enu('type',['harbor','history']).notNullable(); table.text('configuration'); table.timestamp('updated').notNullable().defaultTo(knex.fn.now()); table.timestamp('created').notNullable().defaultTo(knex.fn.now()); + table.unique(['name', 'type'], {indexName: 'retention_policy'}); }) .createTable('retention_policy_reference', function (table) { table.integer('retention_policy'); diff --git a/services/api/src/models/retentionpolicy.ts b/services/api/src/models/retentionpolicy.ts deleted file mode 100644 index 3e6c74eecf..0000000000 --- a/services/api/src/models/retentionpolicy.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { logger } from "../loggers/logger" - -export interface HarborRetentionPolicy { - enabled: boolean - branchRetention: number - pullrequestRetention: number - schedule: string -} - -export interface HistoryRetentionPolicy { - enabled: boolean - deploymentHistory: number - taskHistory: number -} - -export const RetentionPolicy = () => { - const convertHarborRetentionPolicyToJSON = async ( - harbor: HarborRetentionPolicy - ): Promise => { - const c = JSON.stringify(harbor) - return c - }; - - const convertHistoryRetentionPolicyToJSON = async ( - history: HistoryRetentionPolicy - ): Promise => { - const c = JSON.stringify(history) - return c - }; - - const convertJSONToHarborRetentionPolicy = async ( - configuration: string - ): Promise => { - const c = JSON.parse(configuration) - if (typeof c.enabled != "boolean") { - throw new Error("enabled must be a boolean"); - } - if (typeof c.branchRetention != "number") { - throw new Error("branchRetention must be a number"); - } - if (typeof c.pullrequestRetention != "number") { - throw new Error("pullrequestRetention must be a number"); - } - if (typeof c.schedule != "string") { - throw new Error("schedule must be a string"); - } - return c - }; - - const convertJSONToHistoryRetentionPolicy = async ( - configuration: string - ): Promise => { - const c = JSON.parse(configuration) - if (typeof c.enabled != "boolean") { - throw new Error("enabled must be a boolean"); - } - if (typeof c.deploymentHistory != "number") { - throw new Error("deploymentHistory must be a number"); - } - if (typeof c.taskHistory != "number") { - throw new Error("taskHistory must be a number"); - } - return c - }; - - // run the configuration patches through the validation process - const returnValidatedConfiguration = async (type: string, patch: any): Promise => { - const c = JSON.stringify(patch[type]) - switch (type) { - case "harbor": - try { - await convertJSONToHarborRetentionPolicy(c) - return c - } catch (e) { - throw new Error( - `Provided configuration is not valid for type ${type}: ${e}` - ); - } - case "history": - try { - await convertJSONToHistoryRetentionPolicy(c) - return c - } catch (e) { - throw new Error( - `Provided configuration is not valid for type ${type}: ${e}` - ); - } - default: - throw new Error( - `Provided configuration is not valid for type ${type}` - ); - } - } - - return { - convertHarborRetentionPolicyToJSON, - convertHistoryRetentionPolicyToJSON, - convertJSONToHarborRetentionPolicy, - convertJSONToHistoryRetentionPolicy, - returnValidatedConfiguration - }; -}; \ No newline at end of file diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 3b2ff29f6e..b15eabd174 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -279,14 +279,22 @@ const { } = require('./resources/backup/resolvers'); const { - createRetentionPolicy, - updateRetentionPolicy, - deleteRetentionPolicy, - getRetentionPoliciesByProjectId, - getRetentionPoliciesByOrganizationId, - listRetentionPolicies, - addRetentionPolicyLink, - removeRetentionPolicyLink, + createHarborRetentionPolicy, + updateHarborRetentionPolicy, + createHistoryRetentionPolicy, + updateHistoryRetentionPolicy, + deleteHarborRetentionPolicy, + deleteHistoryRetentionPolicy, + getHarborRetentionPoliciesByProjectId, + getHarborRetentionPoliciesByOrganizationId, + getHistoryRetentionPoliciesByProjectId, + getHistoryRetentionPoliciesByOrganizationId, + listHarborRetentionPolicies, + listHistoryRetentionPolicies, + addHarborRetentionPolicyLink, + addHistoryRetentionPolicyLink, + removeHarborRetentionPolicyLink, + removeHistoryRetentionPolicyLink, } = require('./resources/retentionpolicy/resolvers'); const { @@ -431,7 +439,8 @@ const resolvers = { groups: getGroupsByProjectId, privateKey: getPrivateKey, publicKey: getProjectDeployKey, - retentionPolicies: getRetentionPoliciesByProjectId, + harborRetentionPolicies: getHarborRetentionPoliciesByProjectId, + historyRetentionPolicies: getHistoryRetentionPoliciesByProjectId, }, GroupInterface: { __resolveType(group) { @@ -483,13 +492,15 @@ const resolvers = { owners: getOwnersByOrganizationId, deployTargets: getDeployTargetsByOrganizationId, notifications: getNotificationsByOrganizationId, - retentionPolicies: getRetentionPoliciesByOrganizationId + harborRetentionPolicies: getHarborRetentionPoliciesByOrganizationId, + historyRetentionPolicies: getHistoryRetentionPoliciesByOrganizationId }, OrgProject: { groups: getGroupsByOrganizationsProject, groupCount: getGroupCountByOrganizationProject, notifications: getNotificationsForOrganizationProjectId, - retentionPolicies: getRetentionPoliciesByProjectId, + harborRetentionPolicies: getHarborRetentionPoliciesByProjectId, + historyRetentionPolicies: getHistoryRetentionPoliciesByProjectId, }, OrgEnvironment: { project: getProjectById, @@ -537,18 +548,6 @@ const resolvers = { } } }, - RetentionPolicyConfiguration: { - __resolveType(obj) { - switch (obj.type) { - case 'harbor': - return 'HarborRetentionPolicy'; - case 'history': - return 'HistoryRetentionPolicy'; - default: - return null; - } - } - }, AdvancedTaskDefinition: { __resolveType (obj) { switch(obj.type) { @@ -631,7 +630,8 @@ const resolvers = { getEnvVariablesByProjectEnvironmentName, checkBulkImportProjectsAndGroupsToOrganization, allPlatformUsers: getAllPlatformUsers, - listRetentionPolicies + listHarborRetentionPolicies, + listHistoryRetentionPolicies }, Mutation: { addProblem, @@ -758,11 +758,16 @@ const resolvers = { deleteEnvironmentService, addPlatformRoleToUser, removePlatformRoleFromUser, - createRetentionPolicy, - updateRetentionPolicy, - deleteRetentionPolicy, - addRetentionPolicyLink, - removeRetentionPolicyLink + createHarborRetentionPolicy, + updateHarborRetentionPolicy, + deleteHarborRetentionPolicy, + addHarborRetentionPolicyLink, + removeHarborRetentionPolicyLink, + createHistoryRetentionPolicy, + updateHistoryRetentionPolicy, + deleteHistoryRetentionPolicy, + addHistoryRetentionPolicyLink, + removeHistoryRetentionPolicyLink, }, Subscription: { backupChanged: backupSubscriber, diff --git a/services/api/src/resources/retentionpolicy/README.md b/services/api/src/resources/retentionpolicy/README.md index e974c5cb93..cad9e5ff88 100644 --- a/services/api/src/resources/retentionpolicy/README.md +++ b/services/api/src/resources/retentionpolicy/README.md @@ -43,25 +43,22 @@ If the organization based policy is removed from the organization, then the enfo ``` mutation createHarborPolicy { - createRetentionPolicy(input:{ + createHarborRetentionPolicy(input:{ name: "custom-harbor-policy" - type: HARBOR - harbor: { - enabled: true - rules: [ - { - name: "all branches, excluding pullrequests" - pattern: "[^pr\\-]*/*" - latestPulled: 3 - }, - { - name: "pullrequests" - pattern: "pr-*" - latestPulled: 1 - } - ] - schedule: "3 3 * * 3" - } + enabled: true + rules: [ + { + name: "all branches, excluding pullrequests" + pattern: "[^pr\\-]*/*" + latestPulled: 3 + }, + { + name: "pullrequests" + pattern: "pr-*" + latestPulled: 1 + } + ] + schedule: "3 3 * * 3" }) { id name @@ -76,7 +73,6 @@ mutation createHarborPolicy { schedule } } - type created updated } @@ -107,16 +103,13 @@ history policies are enforced on demand. For example, when a new task or deploym ``` mutation createHistoryPolicy { - createRetentionPolicy(input:{ + createHistoryRetentionPolicy(input:{ name: "custom-history-policy" - type: HISTORY - history: { - enabled: true - deploymentHistory: 15 - deploymentType: DAYS - taskHistory: 3 - taskType: MONTHS - } + enabled: true + deploymentHistory: 15 + deploymentType: DAYS + taskHistory: 3 + taskType: MONTHS }) { id name @@ -129,7 +122,6 @@ mutation createHistoryPolicy { taskType } } - type created updated } diff --git a/services/api/src/resources/retentionpolicy/helpers.ts b/services/api/src/resources/retentionpolicy/helpers.ts index fa8d8efae9..78fea05e9a 100644 --- a/services/api/src/resources/retentionpolicy/helpers.ts +++ b/services/api/src/resources/retentionpolicy/helpers.ts @@ -18,6 +18,10 @@ export const Helpers = (sqlClientPool: Pool) => { const rows = await query(sqlClientPool, Sql.selectRetentionPolicyByName(name)); return R.prop(0, rows); }; + const getRetentionPolicyByNameAndType = async (name: string, type: string) => { + const rows = await query(sqlClientPool, Sql.selectRetentionPolicyByNameAndType(name, type)); + return R.prop(0, rows); + }; const getRetentionPolicyByTypeAndLink = async (type: string, sid: number, scope: string) => { const rows = await query(sqlClientPool, Sql.selectRetentionPoliciesByTypeAndLink(type, sid, scope)); return R.prop(0, rows); // ? R.prop(0, rows) : null; @@ -325,6 +329,7 @@ export const Helpers = (sqlClientPool: Pool) => { return { getRetentionPolicy, getRetentionPolicyByName, + getRetentionPolicyByNameAndType, getRetentionPoliciesByProjectWithType, getRetentionPoliciesByOrganizationWithType, getRetentionPoliciesByGlobalWithType, diff --git a/services/api/src/resources/retentionpolicy/resolvers.ts b/services/api/src/resources/retentionpolicy/resolvers.ts index cec63b151b..398aaab494 100644 --- a/services/api/src/resources/retentionpolicy/resolvers.ts +++ b/services/api/src/resources/retentionpolicy/resolvers.ts @@ -1,5 +1,4 @@ -import * as R from 'ramda'; import { ResolverFn } from '..'; import { logger } from '../../loggers/logger'; import { isPatchEmpty, query, knex } from '../../util/db'; @@ -9,30 +8,10 @@ import { Helpers as organizationHelpers } from '../organization/helpers'; import { Helpers as projectHelpers } from '../project/helpers'; import { Sql } from './sql'; -export const createRetentionPolicy: ResolverFn = async ( - _root, - { input }, - { sqlClientPool, hasPermission, userActivityLogger } -) => { +const createRetentionPolicy = async (sqlClientPool, hasPermission, userActivityLogger, input, type) => { await hasPermission('retention_policy', 'add'); - if (input.id) { - const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) - if (retpol) { - throw new Error( - `Retention policy with ID ${input.id} already exists` - ); - } - } - - // @ts-ignore - if (!input.type) { - throw new Error( - 'Must provide type' - ); - } - - const retpol = await Helpers(sqlClientPool).getRetentionPolicyByName(input.name) + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByNameAndType(input.name, type) if (retpol) { throw new Error( `Retention policy with name ${input.name} already exists` @@ -40,8 +19,22 @@ export const createRetentionPolicy: ResolverFn = async ( } // convert the type to the configuration json on import after passing through the validator + let event try { - input.configuration = await RetentionPolicy().returnValidatedConfiguration(input.type, input) + switch (type) { + case "harbor": + event = 'api:createHarborRetentionPolicy' + input.configuration = await RetentionPolicy().returnValidatedHarborConfiguration(input) + break; + case "history": + event = 'api:createHistoryRetentionPolicy' + input.configuration = await RetentionPolicy().returnValidatedHistoryConfiguration(input) + break; + default: + throw new Error( + `No matching type` + ); + } } catch (e) { throw new Error( `${e}` @@ -51,14 +44,15 @@ export const createRetentionPolicy: ResolverFn = async ( const { insertId } = await query( sqlClientPool, Sql.createRetentionPolicy({ + type: type, ...input, })); const row = await Helpers(sqlClientPool).getRetentionPolicy(insertId); - userActivityLogger(`User created a retention policy`, { + userActivityLogger(`User created a ${type} retention policy`, { project: '', - event: 'api:createRetentionPolicy', + event: event, payload: { patch: { name: input.name, @@ -68,23 +62,33 @@ export const createRetentionPolicy: ResolverFn = async ( } }); - return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; - // return row; +} + +export const createHarborRetentionPolicy: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await createRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, input, 'harbor'); }; -export const updateRetentionPolicy: ResolverFn = async ( - root, +export const createHistoryRetentionPolicy: ResolverFn = async ( + _root, { input }, { sqlClientPool, hasPermission, userActivityLogger } ) => { + return await createRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, input, 'history'); +}; + +const updateRetentionPolicy = async (sqlClientPool, hasPermission, userActivityLogger, input, type) => { await hasPermission('retention_policy', 'update'); if (isPatchEmpty(input)) { throw new Error('input.patch requires at least 1 attribute'); } - const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByNameAndType(input.name, type) if (!retpol) { throw new Error( `Retention policy does not exist` @@ -95,28 +99,36 @@ export const updateRetentionPolicy: ResolverFn = async ( name: input.patch.name } - if (!input.patch[retpol.type]) { - throw new Error( - `Missing configuration for type ${retpol.type}, patch not provided` - ); - } - // convert the type to the configuration json on import after passing through the validator + let event try { - patch["configuration"] = await RetentionPolicy().returnValidatedConfiguration(retpol.type, input.patch) + switch (type) { + case "harbor": + event = 'api:updateHarborRetentionPolicy' + patch["configuration"] = await RetentionPolicy().returnValidatedHarborConfiguration(input.patch) + break; + case "history": + event = 'api:updateHistoryRetentionPolicy' + patch["configuration"] = await RetentionPolicy().returnValidatedHistoryConfiguration(input.patch) + break; + default: + throw new Error( + `No matching type` + ); + } } catch (e) { throw new Error( `${e}` ); } - await Helpers(sqlClientPool).updateRetentionPolicy(input.id, patch); + await Helpers(sqlClientPool).updateRetentionPolicy(retpol.id, patch); - const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + const row = await Helpers(sqlClientPool).getRetentionPolicy(retpol.id); - userActivityLogger(`User updated retention policy`, { + userActivityLogger(`User updated ${type} retention policy`, { project: '', - event: 'api:updateRetentionPolicy', + event: event, payload: { patch: patch, data: row @@ -127,54 +139,89 @@ export const updateRetentionPolicy: ResolverFn = async ( // if a policy is updated, and the configuration is not the same as before the update // then run postRetentionPolicyUpdateHook to make sure that the policy enforcer does // any policy updates for any impacted projects - const policyEnabled = input.patch[retpol.type].enabled + const policyEnabled = input.patch.enabled await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, null, !policyEnabled) } return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; - // return row; +} + +export const updateHarborRetentionPolicy: ResolverFn = async ( + root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await updateRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, input, 'harbor'); }; -export const deleteRetentionPolicy: ResolverFn = async ( - _root, - { id: rid }, +export const updateHistoryRetentionPolicy: ResolverFn = async ( + root, + { input }, { sqlClientPool, hasPermission, userActivityLogger } ) => { + return await updateRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, input, 'history'); +}; + +const deleteRetentionPolicy = async (sqlClientPool, hasPermission, userActivityLogger, name, type) => { await hasPermission('retention_policy', 'delete'); - const retpol = await Helpers(sqlClientPool).getRetentionPolicy(rid) + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByNameAndType(name, type) if (!retpol) { throw new Error( `Retention policy does not exist` ); } - await Helpers(sqlClientPool).deleteRetentionPolicy(rid); + let event + switch (type) { + case "harbor": + event = 'api:deleteHarborRetentionPolicy' + break; + case "history": + event = 'api:deleteHistoryRetentionPolicy' + break; + default: + throw new Error( + `No matching type` + ); + } + + await Helpers(sqlClientPool).deleteRetentionPolicy(retpol.id); - userActivityLogger(`User deleted a retention policy '${retpol.name}'`, { + userActivityLogger(`User deleted a ${type} retention policy '${retpol.name}'`, { project: '', - event: 'api:deleteRetentionPolicy', + event: event, payload: { input: { - retentionPolicy: rid + retentionPolicy: retpol.id } } }); return 'success'; +} + +export const deleteHarborRetentionPolicy: ResolverFn = async ( + _root, + { name }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await deleteRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, name, 'harbor'); }; -export const listRetentionPolicies: ResolverFn = async ( - root, - { type, name }, - { sqlClientPool, hasPermission } +export const deleteHistoryRetentionPolicy: ResolverFn = async ( + _root, + { name }, + { sqlClientPool, hasPermission, userActivityLogger } ) => { + return await deleteRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, name, 'history'); +}; + +const listRetentionPolicies = async (sqlClientPool, hasPermission, name, type) => { await hasPermission('retention_policy', 'viewAll'); let queryBuilder = knex('retention_policy'); - if (type) { - queryBuilder = queryBuilder.and.where('type', type); - } + queryBuilder = queryBuilder.and.where('type', type); if (name) { queryBuilder = queryBuilder.where('name', name); @@ -182,19 +229,39 @@ export const listRetentionPolicies: ResolverFn = async ( const rows = await query(sqlClientPool, queryBuilder.toString()); return rows.map(row => ({ ...row, source: null, configuration: {type: row.type, ...JSON.parse(row.configuration)} })); -}; +} +export const listHarborRetentionPolicies: ResolverFn = async ( + root, + { name }, + { sqlClientPool, hasPermission } +) => { + return await listRetentionPolicies(sqlClientPool, hasPermission, name, 'harbor') +}; -export const addRetentionPolicyLink: ResolverFn = async ( - _root, - { input }, - { sqlClientPool, hasPermission, userActivityLogger } +export const listHistoryRetentionPolicies: ResolverFn = async ( + root, + { name }, + { sqlClientPool, hasPermission } ) => { + return await listRetentionPolicies(sqlClientPool, hasPermission, name, 'history') +}; +const addRetentionPolicyLink = async (sqlClientPool, hasPermission, userActivityLogger, input, type) => { let scopeId = 0 + let event, prefix + switch (type) { + case "harbor": + prefix = 'api:addHarbor' + break; + case "history": + prefix = 'api:addHistory' + break; + } switch (input.scope) { case "global": await hasPermission('retention_policy', 'addGlobal'); + event = `${prefix}RetentionPolicyGlobal` break; case "organization": const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) @@ -205,6 +272,7 @@ export const addRetentionPolicyLink: ResolverFn = async ( } await hasPermission('retention_policy', 'addOrganization'); scopeId = organization.id + event = `${prefix}RetentionPolicyOrganization` break; case "project": const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) @@ -215,6 +283,7 @@ export const addRetentionPolicyLink: ResolverFn = async ( } await hasPermission('retention_policy', 'addProject'); scopeId = project.id + event = `${prefix}RetentionPolicyProject` break; default: throw new Error( @@ -222,7 +291,7 @@ export const addRetentionPolicyLink: ResolverFn = async ( ); } - const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByNameAndType(input.name, type) if (!retpol) { throw new Error( `Retention policy does not exist` @@ -239,7 +308,7 @@ export const addRetentionPolicyLink: ResolverFn = async ( await query( sqlClientPool, Sql.addRetentionPolicyLink( - input.id, + retpol.id, input.scope, scopeId, ) @@ -250,9 +319,9 @@ export const addRetentionPolicyLink: ResolverFn = async ( // any policy updates for any impacted projects await Helpers(sqlClientPool).postRetentionPolicyLinkHook(scopeId, input.scope, retpol.type, retpol.id, false) - userActivityLogger(`User added a retention policy '${retpol.name}' to ${input.scope}`, { + userActivityLogger(`User added a ${type} retention policy '${retpol.name}' to ${input.scope}`, { project: '', - event: 'api:addRetentionPolicyOrganization', + event: event, payload: { input: { retentionPolicy: retpol.id, @@ -262,19 +331,41 @@ export const addRetentionPolicyLink: ResolverFn = async ( } }); - const row = await Helpers(sqlClientPool).getRetentionPolicy(input.id) + const row = await Helpers(sqlClientPool).getRetentionPolicy(retpol.id) return { ...row, configuration: {type: row.type, ...JSON.parse(row.configuration)} }; +} + +export const addHarborRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await addRetentionPolicyLink(sqlClientPool, hasPermission, userActivityLogger, input, 'harbor') }; -export const removeRetentionPolicyLink: ResolverFn = async ( +export const addHistoryRetentionPolicyLink: ResolverFn = async ( _root, { input }, { sqlClientPool, hasPermission, userActivityLogger } ) => { + return await addRetentionPolicyLink(sqlClientPool, hasPermission, userActivityLogger, input, 'history') +}; + +const removeRetentionPolicyLink = async (sqlClientPool, hasPermission, userActivityLogger, input, type) => { let scopeId = 0 + let event, prefix + switch (type) { + case "harbor": + prefix = 'api:removeHarbor' + break; + case "history": + prefix = 'api:removeHistory' + break; + } switch (input.scope) { case "global": await hasPermission('retention_policy', 'addGlobal'); + event = `${prefix}RetentionPolicyGlobal` break; case "organization": const organization = await organizationHelpers(sqlClientPool).getOrganizationByName(input.scopeName) @@ -285,6 +376,7 @@ export const removeRetentionPolicyLink: ResolverFn = async ( } await hasPermission('retention_policy', 'addOrganization'); scopeId = organization.id + event = `${prefix}RetentionPolicyOrganization` break; case "project": const project = await projectHelpers(sqlClientPool).getProjectByProjectInput({name: input.scopeName}) @@ -295,6 +387,7 @@ export const removeRetentionPolicyLink: ResolverFn = async ( } await hasPermission('retention_policy', 'addProject'); scopeId = project.id + event = `${prefix}RetentionPolicyProject` break; default: throw new Error( @@ -302,14 +395,14 @@ export const removeRetentionPolicyLink: ResolverFn = async ( ); } - const retpol = await Helpers(sqlClientPool).getRetentionPolicy(input.id); + const retpol = await Helpers(sqlClientPool).getRetentionPolicyByNameAndType(input.name, type); if (!retpol) { throw new Error( `Retention policy does not exist` ); } - const retpoltypes = await Helpers(sqlClientPool).getRetentionPoliciesByTypePolicyIDAndLink(retpol.type, input.id, scopeId, input.scope); + const retpoltypes = await Helpers(sqlClientPool).getRetentionPoliciesByTypePolicyIDAndLink(retpol.type, retpol.id, scopeId, input.scope); if (retpoltypes.length == 0) { throw new Error( `No matching retention policy attached to this ${input.scope}` @@ -326,7 +419,7 @@ export const removeRetentionPolicyLink: ResolverFn = async ( await query( sqlClientPool, Sql.deleteRetentionPolicyLink( - input.id, + retpol.id, input.scope, scopeId, ) @@ -346,9 +439,9 @@ export const removeRetentionPolicyLink: ResolverFn = async ( await Helpers(sqlClientPool).postRetentionPolicyUpdateHook(retpol.type, retpol.id, preDeleteProjectIds, true) } - userActivityLogger(`User removed a retention policy '${retpol.name}' from organization`, { + userActivityLogger(`User removed a ${type} retention policy '${retpol.name}' from organization`, { project: '', - event: 'api:removeRetentionPolicyOrganization', + event: event, payload: { input: { retentionPolicy: retpol.id, @@ -359,10 +452,42 @@ export const removeRetentionPolicyLink: ResolverFn = async ( }); return "success" +} + +export const removeHarborRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await removeRetentionPolicyLink(sqlClientPool, hasPermission, userActivityLogger, input, 'harbor') }; -// This is only called by the project resolver, so there is no need to do any permission checks -export const getRetentionPoliciesByProjectId: ResolverFn = async ( +export const removeHistoryRetentionPolicyLink: ResolverFn = async ( + _root, + { input }, + { sqlClientPool, hasPermission, userActivityLogger } +) => { + return await removeRetentionPolicyLink(sqlClientPool, hasPermission, userActivityLogger, input, 'history') +}; + +// This is only called by the project resolver, so there is no need to do any permission checks as they're already done by the project +export const getHarborRetentionPoliciesByProjectId: ResolverFn = async ( + project, + args, + { sqlClientPool } +) => { + + let pid = args.project; + if (project) { + pid = project.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('harbor', 'project', project.id); + return rows; +}; + +// This is only called by the project resolver, so there is no need to do any permission checks as they're already done by the project +export const getHistoryRetentionPoliciesByProjectId: ResolverFn = async ( project, args, { sqlClientPool } @@ -373,12 +498,28 @@ export const getRetentionPoliciesByProjectId: ResolverFn = async ( pid = project.id; } let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "project", project.id); + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('history', 'project', project.id); + return rows; +}; + +// This is only called by the organization resolver, so there is no need to do any permission checks as they're already done by the organization +export const getHarborRetentionPoliciesByOrganizationId: ResolverFn = async ( + organization, + args, + { sqlClientPool } +) => { + + let oid = args.organization; + if (organization) { + oid = organization.id; + } + let rows = [] + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('harbor', 'organization', oid); return rows; }; -// This is only called by the organization resolver, so there is no need to do any permission checks -export const getRetentionPoliciesByOrganizationId: ResolverFn = async ( +// This is only called by the organization resolver, so there is no need to do any permission checks as they're already done by the organization +export const getHistoryRetentionPoliciesByOrganizationId: ResolverFn = async ( organization, args, { sqlClientPool } @@ -389,6 +530,6 @@ export const getRetentionPoliciesByOrganizationId: ResolverFn = async ( oid = organization.id; } let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "organization", oid); + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('history', 'organization', oid); return rows; }; \ No newline at end of file diff --git a/services/api/src/resources/retentionpolicy/sql.ts b/services/api/src/resources/retentionpolicy/sql.ts index 58ae419caf..84cd64613c 100644 --- a/services/api/src/resources/retentionpolicy/sql.ts +++ b/services/api/src/resources/retentionpolicy/sql.ts @@ -19,6 +19,11 @@ export const Sql = { knex('retention_policy') .where('name', '=', name) .toString(), + selectRetentionPolicyByNameAndType: (name: string, type: string) => + knex('retention_policy') + .where('name', '=', name) + .where('type', '=', type) + .toString(), selectRetentionPoliciesByType: (type: string) => knex('retention_policy') .where('type', '=', type) diff --git a/services/api/src/resources/retentionpolicy/types.ts b/services/api/src/resources/retentionpolicy/types.ts index 1494d9b692..1f3f79d23c 100644 --- a/services/api/src/resources/retentionpolicy/types.ts +++ b/services/api/src/resources/retentionpolicy/types.ts @@ -101,32 +101,29 @@ export const RetentionPolicy = () => { return c }; + // run the configuration patches through the validation process + const returnValidatedHarborConfiguration = async (patch: any): Promise => { + const c = JSON.stringify(patch) + try { + await convertJSONToHarborRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided harbor configuration is not valid: ${e}` + ); + } + } + // run the configuration patches through the validation process - const returnValidatedConfiguration = async (type: string, patch: any): Promise => { - const c = JSON.stringify(patch[type]) - switch (type) { - case "harbor": - try { - await convertJSONToHarborRetentionPolicy(c) - return c - } catch (e) { - throw new Error( - `Provided configuration is not valid for type ${type}: ${e}` - ); - } - case "history": - try { - await convertJSONToHistoryRetentionPolicy(c) - return c - } catch (e) { - throw new Error( - `Provided configuration is not valid for type ${type}: ${e}` - ); - } - default: - throw new Error( - `Provided configuration is not valid for type ${type}` - ); + const returnValidatedHistoryConfiguration = async (patch: any): Promise => { + const c = JSON.stringify(patch) + try { + await convertJSONToHistoryRetentionPolicy(c) + return c + } catch (e) { + throw new Error( + `Provided history configuration is not valid: ${e}` + ); } } @@ -135,6 +132,7 @@ export const RetentionPolicy = () => { convertHistoryRetentionPolicyToJSON, convertJSONToHarborRetentionPolicy, convertJSONToHistoryRetentionPolicy, - returnValidatedConfiguration + returnValidatedHarborConfiguration, + returnValidatedHistoryConfiguration }; }; \ No newline at end of file diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 7472262fd3..5eb2c5db49 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -817,10 +817,15 @@ const typeDefs = gql` buildImage: String sharedBaasBucket: Boolean """ - retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + harborRetentionPolicies are the available harbor retention policies to a project, this will also include inherited policies from an organization if the project is associated to an organization, and the organization has any retention policies """ - retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] + harborRetentionPolicies: [HarborRetentionPolicy] + """ + historyRetentionPolicies are the available history retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + historyRetentionPolicies: [HistoryRetentionPolicy] } """ @@ -1112,9 +1117,13 @@ const typeDefs = gql` notifications(type: NotificationType): [Notification] created: String """ - retentionPolicies are the available retention policies to an organization + harborRetentionPolicies are the available harbor retention policies to an organization + """ + harborRetentionPolicies: [HarborRetentionPolicy] + """ + historyRetentionPolicies are the available history retention policies to an organization """ - retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] + historyRetentionPolicies: [HistoryRetentionPolicy] } input AddOrganizationInput { @@ -1161,10 +1170,15 @@ const typeDefs = gql` groupCount: Int notifications: [OrganizationNotification] """ - retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization + harborRetentionPolicies are the available harbor retention policies to a project, this will also include inherited policies from an organization if the project is associated to an organization, and the organization has any retention policies """ - retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] + harborRetentionPolicies: [HarborRetentionPolicy] + """ + historyRetentionPolicies are the available history retention policies to a project, this will also include inherited policies from an organization + if the project is associated to an organization, and the organization has any retention policies + """ + historyRetentionPolicies: [HistoryRetentionPolicy] } """ @@ -1463,7 +1477,8 @@ const typeDefs = gql` getEnvVariablesByProjectEnvironmentName(input: EnvVariableByProjectEnvironmentNameInput!): [EnvKeyValue] checkBulkImportProjectsAndGroupsToOrganization(input: AddProjectToOrganizationInput!): ProjectGroupsToOrganization allPlatformUsers(id: String, email: String, gitlabId: Int, role: PlatformRole): [User] - listRetentionPolicies(type: RetentionPolicyType, name: String): [RetentionPolicy] + listHarborRetentionPolicies(name: String): [HarborRetentionPolicy] + listHistoryRetentionPolicies(name: String): [HistoryRetentionPolicy] } type ProjectGroupsToOrganization { @@ -2355,22 +2370,15 @@ const typeDefs = gql` name: String } - """ - RetentionPolicyType is the types of retention policies supported in Lagoon - """ - enum RetentionPolicyType { - HARBOR - HISTORY - } - """ HarborRetentionPolicy is the type for harbor retention policies """ - type HarborRetentionPolicy { + type HarborRetentionPolicyConfiguration { enabled: Boolean rules: [HarborRetentionRule] schedule: String } + type HarborRetentionRule { name: String """ @@ -2383,32 +2391,9 @@ const typeDefs = gql` } """ - HarborRetentionPolicyInput is the input for a HarborRetentionPolicy - """ - input HarborRetentionPolicyInput { - enabled: Boolean! - rules: [HarborRetentionRuleInput!] - schedule: String! - } - input HarborRetentionRuleInput { - name: String! - pattern: String! - latestPulled: Int! - } - - """ - HistoryRetentionType is the types of retention policies supported in Lagoon - """ - enum HistoryRetentionType { - COUNT - DAYS - MONTHS - } - - """ - HistoryRetentionPolicy is the type for history retention policies + HistoryRetentionPolicyConfiguration is the type for history retention policies """ - type HistoryRetentionPolicy { + type HistoryRetentionPolicyConfiguration { enabled: Boolean deploymentHistory: Int """ @@ -2429,93 +2414,143 @@ const typeDefs = gql` } """ - HistoryRetentionPolicyInput is the input for a HistoryRetentionPolicy + HistoryRetentionType is the types of retention policies supported in Lagoon """ - input HistoryRetentionPolicyInput { - enabled: Boolean! - deploymentHistory: Int! - deploymentType: HistoryRetentionType! - taskHistory: Int! - taskType: HistoryRetentionType! + enum HistoryRetentionType { + COUNT + DAYS + MONTHS } """ - RetentionPolicyConfiguration is a union type of different retention policies supported in Lagoon + HarborRetentionPolicy is the return type for harbor retention policies in Lagoon """ - union RetentionPolicyConfiguration = HarborRetentionPolicy | HistoryRetentionPolicy + type HarborRetentionPolicy { + id: Int + name: String + configuration: HarborRetentionPolicyConfiguration + created: String + updated: String + """ + source is where the retention policy source is coming from, this field is only populated when a project or organization + lists the available retention polices, and is used to indicate if a project is consuiming a retention policy from the project directly + or from an organization itself + """ + source: String + } """ - RetentionPolicy is the return type for retention policies in Lagoon + HistoryRetentionPolicy is the return type for history retention policies in Lagoon """ - type RetentionPolicy { + type HistoryRetentionPolicy { id: Int name: String - type: String - """ - configuration is the return type of union based retention policy configurations, the type of retention policy - influences the return type needed here - """ - configuration: RetentionPolicyConfiguration + configuration: HistoryRetentionPolicyConfiguration created: String updated: String """ source is where the retention policy source is coming from, this field is only populated when a project or organization lists the available retention polices, and is used to indicate if a project is consuiming a retention policy from the project directly - or from the organization itself + or from an organization itself """ source: String } """ - AddRetentionPolicyInput is used as the input for updating a retention policy, this is a union type - Currently only the 'harbor' type is supported as an input, if other retention policies are added in the future - They will be subfields of this input, the RetentionPolicyType must match the subfield input type + AddHarborRetentionPolicyInput is used as the input for creating a harbor retention policy """ - input AddRetentionPolicyInput { + input AddHarborRetentionPolicyInput { id: Int name: String! - type: RetentionPolicyType! - harbor: HarborRetentionPolicyInput - history: HistoryRetentionPolicyInput + enabled: Boolean! + rules: [HarborRetentionRuleInput!] + schedule: String! + } + + input HarborRetentionRuleInput { + name: String! + pattern: String! + latestPulled: Int! } """ - UpdateRetentionPolicyPatchInput is used as the input for updating a retention policy, this is a union type - Currently only the 'harbor' type is supported as a patch input, if other retention policies are added in the future - They will be subfields of this patch input + AddHistoryRetentionPolicyInput is used as the input for creating a history retention policy """ - input UpdateRetentionPolicyPatchInput { + input AddHistoryRetentionPolicyInput { + id: Int + name: String! + enabled: Boolean! + deploymentHistory: Int! + deploymentType: HistoryRetentionType! + taskHistory: Int! + taskType: HistoryRetentionType! + } + + """ + UpdateHarborRetentionPolicyInput is used as the input for updating a harbor retention policy + """ + input UpdateHarborRetentionPolicyInput { + name: String! + patch: UpdateHarborRetentionPolicyPatchInput + } + + """ + UpdateHarborRetentionPolicyPatchInput is used as the patch for updating a harbor retention policy + """ + input UpdateHarborRetentionPolicyPatchInput { name: String - harbor: HarborRetentionPolicyInput - history: HistoryRetentionPolicyInput + enabled: Boolean! + rules: [HarborRetentionRuleInput!] + schedule: String! } """ - UpdateRetentionPolicyInput is used as the input for updating a retention policy + UpdateHistoryRetentionPolicyInput is used as the input for updating a history retention policy """ - input UpdateRetentionPolicyInput { - id: Int! - patch: UpdateRetentionPolicyPatchInput + input UpdateHistoryRetentionPolicyInput { + name: String! + patch: UpdateHistoryRetentionPolicyPatchInput } """ - RetentionPolicyScope is the types of retention policies scopes in Lagoon + UpdateHistoryRetentionPolicyPatchInput is used as the patch for updating a history retention policy """ - enum RetentionPolicyScope { - GLOBAL - ORGANIZATION - PROJECT + input UpdateHistoryRetentionPolicyPatchInput { + name: String + enabled: Boolean! + deploymentHistory: Int! + deploymentType: HistoryRetentionType! + taskHistory: Int! + taskType: HistoryRetentionType! } """ AddRetentionPolicyLinkInput is used as the input for associating a retention policy with a scope """ input AddRetentionPolicyLinkInput { - id: Int! + name: String! + scope: RetentionPolicyScope! + scopeName: String + } + + """ + RemoveRetentionPolicyLinkInput is used as the input for removing a harbor retention policy with a scope + """ + input RemoveRetentionPolicyLinkInput { + name: String! scope: RetentionPolicyScope! scopeName: String } + """ + RetentionPolicyScope is the types of retention policies scopes in Lagoon + """ + enum RetentionPolicyScope { + GLOBAL + ORGANIZATION + PROJECT + } + type Mutation { """ Add Environment or update if it is already existing @@ -2735,25 +2770,46 @@ const typeDefs = gql` addPlatformRoleToUser(user: UserInput!, role: PlatformRole!): User removePlatformRoleFromUser(user: UserInput!, role: PlatformRole!): User """ - Create a retention policy + Create a harbor retention policy + """ + createHarborRetentionPolicy(input: AddHarborRetentionPolicyInput!): HarborRetentionPolicy + """ + Update a harbor retention policy + """ + updateHarborRetentionPolicy(input: UpdateHarborRetentionPolicyInput!): HarborRetentionPolicy + """ + Delete a harbor retention policy + """ + deleteHarborRetentionPolicy(name: String!): String + """ + Add an existing harbor retention policy to a resource type + """ + addHarborRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): HarborRetentionPolicy + """ + Remove an existing harbor retention policy from a resource type + """ + removeHarborRetentionPolicyLink(input: RemoveRetentionPolicyLinkInput!): String + + """ + Create an environment history retention policy """ - createRetentionPolicy(input: AddRetentionPolicyInput!): RetentionPolicy + createHistoryRetentionPolicy(input: AddHistoryRetentionPolicyInput!): HistoryRetentionPolicy """ - Update a retention policy + Update an environment history retention policy """ - updateRetentionPolicy(input: UpdateRetentionPolicyInput!): RetentionPolicy + updateHistoryRetentionPolicy(input: UpdateHistoryRetentionPolicyInput!): HistoryRetentionPolicy """ - Delete a retention policy + Delete an environment history retention policy """ - deleteRetentionPolicy(id: Int!): String + deleteHistoryRetentionPolicy(name: String!): String """ Add an existing retention policy to a resource type """ - addRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): RetentionPolicy + addHistoryRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): HistoryRetentionPolicy """ - Remove an existing retention policy from a resource type + Remove an existing history retention policy from a resource type """ - removeRetentionPolicyLink(input: AddRetentionPolicyLinkInput!): String + removeHistoryRetentionPolicyLink(input: RemoveRetentionPolicyLinkInput!): String } type Subscription { From 041b613fea66b3fb458a8d4a12e5982b64901c6f Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Fri, 18 Oct 2024 13:04:01 +1100 Subject: [PATCH 12/15] refactor: use union type for listing policies --- services/api/src/resolvers.js | 37 ++++++---- .../resources/retentionpolicy/resolvers.ts | 70 ++++--------------- services/api/src/typeDefs.js | 40 +++++------ 3 files changed, 53 insertions(+), 94 deletions(-) diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index b15eabd174..dbfc73a901 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -285,12 +285,9 @@ const { updateHistoryRetentionPolicy, deleteHarborRetentionPolicy, deleteHistoryRetentionPolicy, - getHarborRetentionPoliciesByProjectId, - getHarborRetentionPoliciesByOrganizationId, - getHistoryRetentionPoliciesByProjectId, - getHistoryRetentionPoliciesByOrganizationId, - listHarborRetentionPolicies, - listHistoryRetentionPolicies, + getRetentionPoliciesByProjectId, + getRetentionPoliciesByOrganizationId, + listAllRetentionPolicies, addHarborRetentionPolicyLink, addHistoryRetentionPolicyLink, removeHarborRetentionPolicyLink, @@ -417,6 +414,10 @@ const resolvers = { DAYS: 'days', MONTHS: 'months', }, + RetentionPolicyType: { + HARBOR: 'harbor', + HISTORY: 'history', + }, Openshift: { projectUser: getProjectUser, token: getToken, @@ -439,8 +440,7 @@ const resolvers = { groups: getGroupsByProjectId, privateKey: getPrivateKey, publicKey: getProjectDeployKey, - harborRetentionPolicies: getHarborRetentionPoliciesByProjectId, - historyRetentionPolicies: getHistoryRetentionPoliciesByProjectId, + retentionPolicies: getRetentionPoliciesByProjectId, }, GroupInterface: { __resolveType(group) { @@ -492,15 +492,13 @@ const resolvers = { owners: getOwnersByOrganizationId, deployTargets: getDeployTargetsByOrganizationId, notifications: getNotificationsByOrganizationId, - harborRetentionPolicies: getHarborRetentionPoliciesByOrganizationId, - historyRetentionPolicies: getHistoryRetentionPoliciesByOrganizationId + retentionPolicies: getRetentionPoliciesByOrganizationId, }, OrgProject: { groups: getGroupsByOrganizationsProject, groupCount: getGroupCountByOrganizationProject, notifications: getNotificationsForOrganizationProjectId, - harborRetentionPolicies: getHarborRetentionPoliciesByProjectId, - historyRetentionPolicies: getHistoryRetentionPoliciesByProjectId, + retentionPolicies: getRetentionPoliciesByProjectId, }, OrgEnvironment: { project: getProjectById, @@ -548,6 +546,18 @@ const resolvers = { } } }, + RetentionPolicy: { + __resolveType(obj) { + switch (obj.type) { + case 'harbor': + return 'HarborRetentionPolicy'; + case 'history': + return 'HistoryRetentionPolicy'; + default: + return null; + } + } + }, AdvancedTaskDefinition: { __resolveType (obj) { switch(obj.type) { @@ -630,8 +640,7 @@ const resolvers = { getEnvVariablesByProjectEnvironmentName, checkBulkImportProjectsAndGroupsToOrganization, allPlatformUsers: getAllPlatformUsers, - listHarborRetentionPolicies, - listHistoryRetentionPolicies + listAllRetentionPolicies }, Mutation: { addProblem, diff --git a/services/api/src/resources/retentionpolicy/resolvers.ts b/services/api/src/resources/retentionpolicy/resolvers.ts index 398aaab494..c40623959c 100644 --- a/services/api/src/resources/retentionpolicy/resolvers.ts +++ b/services/api/src/resources/retentionpolicy/resolvers.ts @@ -217,11 +217,18 @@ export const deleteHistoryRetentionPolicy: ResolverFn = async ( return await deleteRetentionPolicy(sqlClientPool, hasPermission, userActivityLogger, name, 'history'); }; -const listRetentionPolicies = async (sqlClientPool, hasPermission, name, type) => { +export const listAllRetentionPolicies: ResolverFn = async ( + root, + { name, type }, + { sqlClientPool, hasPermission } +) => { await hasPermission('retention_policy', 'viewAll'); let queryBuilder = knex('retention_policy'); - queryBuilder = queryBuilder.and.where('type', type); + + if (type) { + queryBuilder = queryBuilder.and.where('type', type); + } if (name) { queryBuilder = queryBuilder.where('name', name); @@ -229,22 +236,6 @@ const listRetentionPolicies = async (sqlClientPool, hasPermission, name, type) = const rows = await query(sqlClientPool, queryBuilder.toString()); return rows.map(row => ({ ...row, source: null, configuration: {type: row.type, ...JSON.parse(row.configuration)} })); -} - -export const listHarborRetentionPolicies: ResolverFn = async ( - root, - { name }, - { sqlClientPool, hasPermission } -) => { - return await listRetentionPolicies(sqlClientPool, hasPermission, name, 'harbor') -}; - -export const listHistoryRetentionPolicies: ResolverFn = async ( - root, - { name }, - { sqlClientPool, hasPermission } -) => { - return await listRetentionPolicies(sqlClientPool, hasPermission, name, 'history') }; const addRetentionPolicyLink = async (sqlClientPool, hasPermission, userActivityLogger, input, type) => { @@ -470,24 +461,7 @@ export const removeHistoryRetentionPolicyLink: ResolverFn = async ( return await removeRetentionPolicyLink(sqlClientPool, hasPermission, userActivityLogger, input, 'history') }; -// This is only called by the project resolver, so there is no need to do any permission checks as they're already done by the project -export const getHarborRetentionPoliciesByProjectId: ResolverFn = async ( - project, - args, - { sqlClientPool } -) => { - - let pid = args.project; - if (project) { - pid = project.id; - } - let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('harbor', 'project', project.id); - return rows; -}; - -// This is only called by the project resolver, so there is no need to do any permission checks as they're already done by the project -export const getHistoryRetentionPoliciesByProjectId: ResolverFn = async ( +export const getRetentionPoliciesByProjectId: ResolverFn = async ( project, args, { sqlClientPool } @@ -498,12 +472,12 @@ export const getHistoryRetentionPoliciesByProjectId: ResolverFn = async ( pid = project.id; } let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('history', 'project', project.id); + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "project", project.id); return rows; }; -// This is only called by the organization resolver, so there is no need to do any permission checks as they're already done by the organization -export const getHarborRetentionPoliciesByOrganizationId: ResolverFn = async ( +// This is only called by the organization resolver, so there is no need to do any permission checks +export const getRetentionPoliciesByOrganizationId: ResolverFn = async ( organization, args, { sqlClientPool } @@ -514,22 +488,6 @@ export const getHarborRetentionPoliciesByOrganizationId: ResolverFn = async ( oid = organization.id; } let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('harbor', 'organization', oid); + rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink(args.type, "organization", oid); return rows; }; - -// This is only called by the organization resolver, so there is no need to do any permission checks as they're already done by the organization -export const getHistoryRetentionPoliciesByOrganizationId: ResolverFn = async ( - organization, - args, - { sqlClientPool } -) => { - - let oid = args.organization; - if (organization) { - oid = organization.id; - } - let rows = [] - rows = await Helpers(sqlClientPool).getRetentionPoliciesByScopeWithTypeAndLink('history', 'organization', oid); - return rows; -}; \ No newline at end of file diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 5eb2c5db49..a86f0c48c7 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -817,15 +817,10 @@ const typeDefs = gql` buildImage: String sharedBaasBucket: Boolean """ - harborRetentionPolicies are the available harbor retention policies to a project, this will also include inherited policies from an organization + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization if the project is associated to an organization, and the organization has any retention policies """ - harborRetentionPolicies: [HarborRetentionPolicy] - """ - historyRetentionPolicies are the available history retention policies to a project, this will also include inherited policies from an organization - if the project is associated to an organization, and the organization has any retention policies - """ - historyRetentionPolicies: [HistoryRetentionPolicy] + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1117,13 +1112,9 @@ const typeDefs = gql` notifications(type: NotificationType): [Notification] created: String """ - harborRetentionPolicies are the available harbor retention policies to an organization + retentionPolicies are the available retention policies to an organization """ - harborRetentionPolicies: [HarborRetentionPolicy] - """ - historyRetentionPolicies are the available history retention policies to an organization - """ - historyRetentionPolicies: [HistoryRetentionPolicy] + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } input AddOrganizationInput { @@ -1170,15 +1161,10 @@ const typeDefs = gql` groupCount: Int notifications: [OrganizationNotification] """ - harborRetentionPolicies are the available harbor retention policies to a project, this will also include inherited policies from an organization + retentionPolicies are the available retention policies to a project, this will also include inherited policies from an organization if the project is associated to an organization, and the organization has any retention policies """ - harborRetentionPolicies: [HarborRetentionPolicy] - """ - historyRetentionPolicies are the available history retention policies to a project, this will also include inherited policies from an organization - if the project is associated to an organization, and the organization has any retention policies - """ - historyRetentionPolicies: [HistoryRetentionPolicy] + retentionPolicies(type: RetentionPolicyType): [RetentionPolicy] } """ @@ -1477,8 +1463,7 @@ const typeDefs = gql` getEnvVariablesByProjectEnvironmentName(input: EnvVariableByProjectEnvironmentNameInput!): [EnvKeyValue] checkBulkImportProjectsAndGroupsToOrganization(input: AddProjectToOrganizationInput!): ProjectGroupsToOrganization allPlatformUsers(id: String, email: String, gitlabId: Int, role: PlatformRole): [User] - listHarborRetentionPolicies(name: String): [HarborRetentionPolicy] - listHistoryRetentionPolicies(name: String): [HistoryRetentionPolicy] + listAllRetentionPolicies(name: String, type: RetentionPolicyType): [RetentionPolicy] } type ProjectGroupsToOrganization { @@ -2371,7 +2356,7 @@ const typeDefs = gql` } """ - HarborRetentionPolicy is the type for harbor retention policies + HarborRetentionPolicyConfiguration is the type for harbor retention policies configuration """ type HarborRetentionPolicyConfiguration { enabled: Boolean @@ -2391,7 +2376,7 @@ const typeDefs = gql` } """ - HistoryRetentionPolicyConfiguration is the type for history retention policies + HistoryRetentionPolicyConfiguration is the type for history retention policies configuration """ type HistoryRetentionPolicyConfiguration { enabled: Boolean @@ -2456,6 +2441,13 @@ const typeDefs = gql` source: String } + union RetentionPolicy = HarborRetentionPolicy | HistoryRetentionPolicy + + enum RetentionPolicyType { + HARBOR + HISTORY + } + """ AddHarborRetentionPolicyInput is used as the input for creating a harbor retention policy """ From 8cf82976103fdffb61618bc207f771869ddb2e8e Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Tue, 22 Oct 2024 11:16:03 +1100 Subject: [PATCH 13/15] refactor: save environment history enabled by default, and also save backup and storage history --- .../src/resources/retentionpolicy/history.ts | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/services/api/src/resources/retentionpolicy/history.ts b/services/api/src/resources/retentionpolicy/history.ts index 6397efc494..1ef90b2c05 100644 --- a/services/api/src/resources/retentionpolicy/history.ts +++ b/services/api/src/resources/retentionpolicy/history.ts @@ -4,6 +4,8 @@ import { Helpers } from './helpers'; import { sqlClientPool } from '../../clients/sqlClient'; import { Sql as deploymentSql } from '../deployment/sql'; import { Sql as taskSql } from '../task/sql'; +import { Sql as backupSql } from '../backup/sql'; +import { Sql as environmentSql } from '../environment/sql'; import { sendToLagoonActions, // @ts-ignore @@ -198,22 +200,27 @@ export const HistoryRetentionEnforcer = () => { } const saveEnvironmentHistoryBeforeDeletion = async (projectData: any, environmentData: any) => { // ENABLE_SAVED_HISTORY_EXPORT will save the deployment and task history if set to true - // this is a way to export a full copy of the environment data (id, name, created, deleted etc..), the project, and the task/deployment history + // this is a way to export a full copy of the environment data (id, name, created, deleted etc..), the project, and the task/deployment/backup/storage history // this is a JSON payload that could later be consumed for historical purposes - // by default this feature is DISABLED. you should enable this feature if you want to save deleted environment history + // by default this feature is ENABLED // the deleted data ends up in the lagoon files bucket in a directory called history - const ENABLE_SAVED_HISTORY_EXPORT = process.env.ENABLE_SAVED_HISTORY_EXPORT || "false" - if (ENABLE_SAVED_HISTORY_EXPORT == "true" ) { + // the format of the path is history/{projectname}-{projectid}/{environmentname}-{environmentid}/history-{deletedunixtimestamp}.json + const ENABLE_SAVED_HISTORY_EXPORT = process.env.ENABLE_SAVED_HISTORY_EXPORT || "true" + if (ENABLE_SAVED_HISTORY_EXPORT == "true") { const taskHistory = await query(sqlClientPool, taskSql.selectTaskHistoryForEnvironment(environmentData.id)); const deploymentHistory = await query(sqlClientPool, deploymentSql.selectDeploymentHistoryForEnvironment(environmentData.id)); + const backupHistory = await query(sqlClientPool, backupSql.selectBackupsByEnvironmentId(environmentData.id)); + const environmentStorage = await query(sqlClientPool, environmentSql.selectEnvironmentStorageByEnvironmentId(environmentData.id)); const actionData = { type: "retentionHistory", eventType: "saveHistory", data: { environment: environmentData, project: projectData, - taskHistory: taskHistory, - deploymentHistory: deploymentHistory + tasks: taskHistory, + deployments: deploymentHistory, + backups: backupHistory, + storage: environmentStorage, } } sendToLagoonActions("retentionHistory", actionData) From 5332cbc8af48e24b58436d3223533092596fa1a4 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Tue, 22 Oct 2024 11:16:20 +1100 Subject: [PATCH 14/15] chore: update delete environment and project helpers to clear more stale data when removed --- services/api/src/resources/backup/sql.ts | 20 +++++- .../api/src/resources/environment/helpers.ts | 64 +++++++++++++------ services/api/src/resources/environment/sql.ts | 10 +++ services/api/src/resources/project/helpers.ts | 18 ++++++ 4 files changed, 91 insertions(+), 21 deletions(-) diff --git a/services/api/src/resources/backup/sql.ts b/services/api/src/resources/backup/sql.ts index 223c254a24..d28af324c2 100644 --- a/services/api/src/resources/backup/sql.ts +++ b/services/api/src/resources/backup/sql.ts @@ -13,6 +13,12 @@ export const Sql = { knex('env_vars') .where('project', projectId) .toString(), + selectBackupsByEnvironmentId: (environmentId: number) => + knex('environment_backup') + .where('environment', '=', environmentId) + .orderBy('created', 'desc') + .orderBy('id', 'desc') + .toString(), insertBackup: ({ id, environment, @@ -123,5 +129,17 @@ export const Sql = { 'environment.id' ) .where('environment_backup.backup_id', backupId) - .toString() + .toString(), + // delete all environments backups from backup table that match environment id + deleteBackupsByEnvironmentId: (environmentId: number) => + knex('environment_backup') + .where('environment', '=', environmentId) + .delete() + .toString(), + // delete all environments backups from backup table that match environment ids + deleteBackupsByEnvironmentIds: (environmentIds: number[]) => + knex('environment_backup') + .whereIn('environment', environmentIds) + .delete() + .toString(), }; diff --git a/services/api/src/resources/environment/helpers.ts b/services/api/src/resources/environment/helpers.ts index 874cbfb939..1645e88da5 100644 --- a/services/api/src/resources/environment/helpers.ts +++ b/services/api/src/resources/environment/helpers.ts @@ -5,6 +5,7 @@ import { query } from '../../util/db'; import { Sql } from './sql'; import { Sql as problemSql } from '../problem/sql'; import { Sql as factSql } from '../fact/sql'; +// import { Sql as backupSql } from '../backup/sql'; import { Helpers as projectHelpers } from '../project/helpers'; import { HistoryRetentionEnforcer } from '../retentionpolicy/history'; import { logger } from '../../loggers/logger'; @@ -34,6 +35,29 @@ export const Helpers = (sqlClientPool: Pool) => { deleteEnvironment: async (name: string, eid: number, pid: number) => { const environmentData = await Helpers(sqlClientPool).getEnvironmentById(eid); const projectData = await projectHelpers(sqlClientPool).getProjectById(pid); + + // attempt to run any retention policy processes before the environment is deleted + try { + // export a dump of the project, environment data, and associated task and deployment history before the environment is deleted + await HistoryRetentionEnforcer().saveEnvironmentHistoryBeforeDeletion(projectData, environmentData) + } catch (e) { + logger.error(`error running save environment history: ${e}`) + } + // purge all history for this environment, including logs and files from s3 + try { + // remove all deployments and associated files + await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) + } catch (e) { + logger.error(`error running deployment retention enforcer: ${e}`) + } + try { + // remove all tasks and associated files + await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) + } catch (e) { + logger.error(`error running task retention enforcer: ${e}`) + } + + // then proceed to purge related data try { // clean up environment variables // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment variables`) @@ -67,30 +91,30 @@ export const Helpers = (sqlClientPool: Pool) => { problemSql.deleteProblemsForEnvironment(eid) ); - // @TODO: environment_storage, environment_backup + // delete the environment backups rows + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment backups`) + // @TODO: this could be done here, but it would mean that to recover all the backup ids of a deleted environment + // in the event that an environment is "accidentally deleted" it would require accessing the bucket + // to retrieve them from the saved history export JSON dump + // this is disabled for now, but when a project is deleted, all of the backups for any environments of that project + // will have the table cleaned out to keep the database leaner + // await query( + // sqlClientPool, + // backupSql.deleteBackupsByEnvironmentId(eid) + // ); + // clean up storage data + // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid} environment storage`) + // @TODO: this could be done here, but amazee.io might still use this data for environments that are deleted + // this is disabled for now, but when a project is deleted, all of the storages for any environments of that project + // will have the table cleaned out to keep the database leaner + // await query( + // sqlClientPool, + // Sql.deleteEnvironmentStorageByEnvironmentId(eid) + // ); } catch (e) { logger.error(`error cleaning up linked environment tables: ${e}`) } - try { - // export a dump of the project, environment data, and associated task and deployment history before the environment is deleted - await HistoryRetentionEnforcer().saveEnvironmentHistoryBeforeDeletion(projectData, environmentData) - } catch (e) { - logger.error(`error running save environment history: ${e}`) - } - // purge all history for this environment, including logs and files from s3 - try { - // remove all deployments and associated files - await HistoryRetentionEnforcer().cleanupAllDeployments(projectData, environmentData) - } catch (e) { - logger.error(`error running deployment retention enforcer: ${e}`) - } - try { - // remove all tasks and associated files - await HistoryRetentionEnforcer().cleanupAllTasks(projectData, environmentData) - } catch (e) { - logger.error(`error running task retention enforcer: ${e}`) - } // delete the environment // logger.debug(`deleting environment ${name}/id:${eid}/project:${pid}`) await query( diff --git a/services/api/src/resources/environment/sql.ts b/services/api/src/resources/environment/sql.ts index 639635c323..ca343d175f 100644 --- a/services/api/src/resources/environment/sql.ts +++ b/services/api/src/resources/environment/sql.ts @@ -198,4 +198,14 @@ export const Sql = { .where('project', '=', projectId) .delete() .toString(), + deleteEnvironmentStorageByEnvironmentId: (id: number) => + knex('environment_storage') + .where('environment', '=', id) + .delete() + .toString(), + deleteEnvironmentStorageByEnvironmentIds: (ids: number[]) => + knex('environment_storage') + .whereIn('environment', ids) + .delete() + .toString(), }; diff --git a/services/api/src/resources/project/helpers.ts b/services/api/src/resources/project/helpers.ts index 03c9cac153..ae7144c9ea 100644 --- a/services/api/src/resources/project/helpers.ts +++ b/services/api/src/resources/project/helpers.ts @@ -4,6 +4,7 @@ import { asyncPipe } from '@lagoon/commons/dist/util/func'; import { query } from '../../util/db'; import { Sql } from './sql'; import { Sql as environmentSql } from '../environment/sql'; +import { Sql as backupSql } from '../backup/sql'; // import { logger } from '../../loggers/logger'; export const Helpers = (sqlClientPool: Pool) => { @@ -193,6 +194,23 @@ export const Helpers = (sqlClientPool: Pool) => { sqlClientPool, Sql.deleteDeployTargetConfigs(id) ); + // logger.debug(`deleting project ${id} environment leftover backups rows`) + // clean up backups table so backups for environments don't remain in limbo once the project is deleted + // @INFO: this step is to clear out any backup rows from the database for a project that is being deleted + // at the moment this only happens when the project is deleted + // but there is a commented section in the environment helpers where when an environment + // is deleted, the deletion of the data for the environment could handle this step instead + const projectEnvironmentIds = await query(sqlClientPool, environmentSql.selectEnvironmentsByProjectID(id, true)); + await query( + sqlClientPool, + backupSql.deleteBackupsByEnvironmentIds(projectEnvironmentIds) + ); + // same for environment storage + // logger.debug(`deleting project ${id} environment leftover storage rows`) + await query( + sqlClientPool, + environmentSql.deleteEnvironmentStorageByEnvironmentIds(projectEnvironmentIds) + ); // logger.debug(`deleting project ${id} leftover environment rows`) // clean up environments table so environments don't remain in limbo once the project is deleted await query( From 9c35fd8724e6fe7ae6e26e9cd476784f97aa8a43 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Tue, 19 Nov 2024 15:41:34 +1100 Subject: [PATCH 15/15] chore: update remote-controller build image for v1beta2 api controller --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 19e72977cb..a91079269e 100644 --- a/Makefile +++ b/Makefile @@ -54,7 +54,7 @@ BUILD_DEPLOY_IMAGE_TAG ?= edge # OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG and OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY # set this to a particular build image if required, defaults to nothing to consume what the chart provides -OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=pr-243 +OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=pr-267 OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY= # To build k3d with Calico instead of Flannel, set this to true. Note that the Calico install in lagoon-charts is always