diff --git a/.env.defaults b/.env.defaults
index 4e5a213b8c..300b8e6dc3 100644
--- a/.env.defaults
+++ b/.env.defaults
@@ -1,11 +1,4 @@
# These credential information is no secret, it's just for easy
# development. Don't use these values for production deployment!
JWTSECRET=super-secret-string
-JWTAUDIENCE=api.dev
-AWS_ACCESS_KEY_ID=XXXXXXXXXXXXXXXXXXXX
-AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxx
-AWS_BUCKET=aws-bucket
-HARBOR_REGISTRY_STORAGE_AMAZON_BUCKET=bucket-name
-HARBOR_REGISTRY_STORAGE_AMAZON_REGION=bucket-region
-REGISTRY_STORAGE_S3_ACCESSKEY=AWS-ID
-REGISTRY_STORAGE_S3_SECRETKEY=AWS-Secret
\ No newline at end of file
+JWTAUDIENCE=api.dev
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 58a6ae60b6..c0e063a91c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,10 +7,12 @@ deploytest
startVagrant.sh
local-dev/oc
local-dev/helm
-minishift
-minikube
+local-dev/minishift
+local-dev/minikube
+local-dev/k3d
+local-dev/jq
k3d
-kubectl
+local-dev/kubectl
**/v8-*
node_modules/
build/*
@@ -24,3 +26,7 @@ docs/_build
yarn-debug.log*
yarn-error.log*
site
+kubeconfig.*
+kindconfig.*
+kind
+lagoon-charts.*
diff --git a/.lagoon.harbor-secrets.yaml b/.lagoon.harbor-secrets.yaml
deleted file mode 100644
index 9c3df25bf5..0000000000
--- a/.lagoon.harbor-secrets.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-secret-environment-template
-parameters:
- - name: HARBOR_CORE_SECRET
- description: The secret used to connect to harbor's core service
- generate: expression
- from: "[a-zA-Z0-9]{16}"
- - name: HARBOR_JOBSERVICE_SECRET
- description: The secret used to connect to harbor's jobservice service
- generate: expression
- from: "[a-zA-Z0-9]{16}"
- - name: HARBOR_REGISTRY_SECRET
- description: The secret used to connect to harbor's registry service
- generate: expression
- from: "[a-zA-Z0-9]{16}"
- - name: HARBOR_ADMIN_PASSWORD
- description: Harbor's admin password
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: CLAIR_DB_PASSWORD
- description: The password clair should use to talk to the postgres db
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
-objects:
-- kind: Secret
- apiVersion: v1
- metadata:
- name: harbor-core-secret
- stringData:
- HARBOR_CORE_SECRET: ${HARBOR_CORE_SECRET}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: harbor-jobservice-secret
- stringData:
- HARBOR_JOBSERVICE_SECRET: ${HARBOR_JOBSERVICE_SECRET}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: harborregistry-secret
- stringData:
- HARBOR_REGISTRY_SECRET: ${HARBOR_REGISTRY_SECRET}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: harbor-admin-password
- stringData:
- HARBOR_ADMIN_PASSWORD: ${HARBOR_ADMIN_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: clair-db-password
- stringData:
- CLAIR_DB_PASSWORD: ${CLAIR_DB_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: postgresql-password
- stringData:
- POSTGRESQL_PASSWORD: ${CLAIR_DB_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: notary-db-url
- stringData:
- NOTARY_DB_URL: postgres://postgres:${CLAIR_DB_PASSWORD}@harbor-database:5432/notaryserver?sslmode=disable
-- apiVersion: v1
- kind: ConfigMap
- metadata:
- name: harborclair
- labels:
- service: harborclair
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- type: Opaque
- data:
- config.yaml: |
- clair:
- database:
- type: pgsql
- options:
- source: "postgres://postgres:${CLAIR_DB_PASSWORD}@harbor-database:5432/postgres?sslmode=disable"
- # Number of elements kept in the cache
- # Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.
- cachesize: 16384
- api:
- # API server port
- port: 6060
- healthport: 6061
- # Deadline before an API request will respond with a 503
- timeout: 300s
- updater:
- interval: 1h
- redis: "redis://harbor-redis:6379/4"
- database: "postgres://postgres:${CLAIR_DB_PASSWORD}@harbor-database:5432/postgres?sslmode=disable"
diff --git a/.lagoon.keycloak-secrets.yaml b/.lagoon.keycloak-secrets.yaml
deleted file mode 100644
index 9d15e2603f..0000000000
--- a/.lagoon.keycloak-secrets.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-secret-environment-template
-parameters:
- - name: KEYCLOAK_LAGOON_ADMIN_PASSWORD
- description: super admin password of keycloak
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: KEYCLOAK_ADMIN_PASSWORD
- description: admin user password of keycloak
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: KEYCLOAK_AUTH_SERVER_CLIENT_SECRET
- description: client secret of the auth server client
- generate: expression
- from: "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
- - name: KEYCLOAK_API_CLIENT_SECRET
- description: client secret of the api client
- generate: expression
- from: "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
-objects:
-- kind: Secret
- apiVersion: v1
- metadata:
- name: keycloak-lagoon-admin-password
- stringData:
- KEYCLOAK_LAGOON_ADMIN_PASSWORD: ${KEYCLOAK_LAGOON_ADMIN_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: keycloak-admin-password
- stringData:
- KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: keycloak-auth-server-client-secret
- stringData:
- KEYCLOAK_AUTH_SERVER_CLIENT_SECRET: ${KEYCLOAK_AUTH_SERVER_CLIENT_SECRET}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: keycloak-api-client-secret
- stringData:
- KEYCLOAK_API_CLIENT_SECRET: ${KEYCLOAK_API_CLIENT_SECRET}
diff --git a/.lagoon.logs-db-secrets.yaml b/.lagoon.logs-db-secrets.yaml
deleted file mode 100644
index ce9c3a2a30..0000000000
--- a/.lagoon.logs-db-secrets.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-secret-environment-template
-parameters:
- - name: LOGSDB_ADMIN_PASSWORD
- description: admin password of logs-db
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: LOGSDB_KIBANASERVER_PASSWORD
- description: kibana password of logs-db
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
-objects:
-- kind: Secret
- apiVersion: v1
- metadata:
- name: logs-db-admin-password
- stringData:
- LOGSDB_ADMIN_PASSWORD: ${LOGSDB_ADMIN_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: logs-db-kibanaserver-password
- stringData:
- LOGSDB_KIBANASERVER_PASSWORD: ${LOGSDB_KIBANASERVER_PASSWORD}
\ No newline at end of file
diff --git a/.lagoon.secrets.yaml b/.lagoon.secrets.yaml
deleted file mode 100644
index 6a411006cb..0000000000
--- a/.lagoon.secrets.yaml
+++ /dev/null
@@ -1,111 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-secret-environment-template
-parameters:
- - name: JWTSECRET
- description: JSON Web Token generation secret
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: RABBITMQ_PASSWORD
- description: Password to connect to rabbitmq to
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: LOGSTASH_USERNAME
- description: Username to for incoming logstash http connections
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: LOGSTASH_PASSWORD
- description: Password to for incoming logstash http connections
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: OPENDISTRO_SECURITY_COOKIE_PASSWORD
- description: Password to for opendistro-security cookies
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: API_DB_PASSWORD
- description: Password used for connecting to the api-db
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: KEYCLOAK_DB_PASSWORD
- description: Password used for connecting to the keycloak-db
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: API_REDIS_PASSWORD
- description: Password used for connecting to the api-redis
- generate: expression
- from: "[a-zA-Z0-9]{32}"
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
-objects:
-- kind: Secret
- apiVersion: v1
- metadata:
- name: jwtsecret
- stringData:
- JWTSECRET: ${JWTSECRET}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: rabbitmq-password
- stringData:
- RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: rabbitmq-username
- stringData:
- RABBITMQ_USERNAME: lagoon
-- kind: Secret
- apiVersion: v1
- metadata:
- name: api-db-password
- stringData:
- API_DB_PASSWORD: ${API_DB_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: keycloak-db-password
- stringData:
- KEYCLOAK_DB_PASSWORD: ${KEYCLOAK_DB_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: logstash-username
- stringData:
- LOGSTASH_USERNAME: ${LOGSTASH_USERNAME}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: logstash-password
- stringData:
- LOGSTASH_PASSWORD: ${LOGSTASH_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: opendistro-security-cookie-password
- stringData:
- OPENDISTRO_SECURITY_COOKIE_PASSWORD: ${OPENDISTRO_SECURITY_COOKIE_PASSWORD}
-- kind: Secret
- apiVersion: v1
- metadata:
- name: api-redis-password
- stringData:
- API_REDIS_PASSWORD: ${API_REDIS_PASSWORD}
diff --git a/.lagoon.yml b/.lagoon.yml
deleted file mode 100644
index b5fbfed2f0..0000000000
--- a/.lagoon.yml
+++ /dev/null
@@ -1,59 +0,0 @@
-docker-compose-yaml: docker-compose.yaml
-
-additional-yaml:
- secrets:
- path: .lagoon.secrets.yaml
- command: create
- ignore_error: true
-
- logs-db-secrets:
- path: .lagoon.logs-db-secrets.yaml
- command: create
- ignore_error: true
-
- keycloak-secrets:
- path: .lagoon.keycloak-secrets.yaml
- command: create
- ignore_error: true
-
- harbor-secrets:
- path: .lagoon.harbor-secrets.yaml
- command: create
- ignore_error: true
-tasks:
-# pre-rollout:
-# - run:
-# name: 'disable shard allocation on elasticsearch and run synced flush' # see https://www.elastic.co/guide/en/elasticsearch/reference/current/restart-upgrade.html
-# command: |
-# es-curl PUT _cluster/settings -d '{"persistent":{"cluster.routing.allocation.enable":"none"}}'
-# es-curl POST _flush/synced
-# service: logs-db
-
- post-rollout:
- - run:
- name: update database schema
- command: /rerun_initdb.sh
- service: api-db
- - run:
- name: 'enable shard allocation on elasticsearch'
- command: es-curl PUT _cluster/settings -d '{"persistent":{"cluster.routing.allocation.enable":null}}'
- service: logs-db
-
-environments:
- master:
- types:
- logs-db: elasticsearch-cluster
- logs-collector: custom
- templates:
- logs-db: services/logs-db/.lagoon.cluster.yml
- logs-forwarder: services/logs-forwarder/.lagoon.multi.yml
- rollouts:
- logs-db: statefulset
- logs-forwarder: statefulset
- develop:
- types:
- logs-db: elasticsearch-cluster
- templates:
- logs-db: services/logs-db/.lagoon.cluster.yml
- rollouts:
- logs-db: statefulset
\ No newline at end of file
diff --git a/Jenkinsfile b/Jenkinsfile
index 72e040c407..0e737c2216 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -1,201 +1,124 @@
-node {
-
- openshift_version = 'v3.11.0'
- minishift_version = '1.34.1'
- kubernetes_versions = [
- // ["kubernetes": "v1.15", "k3s": "v0.9.1", "kubectl": "v1.15.4"],
- // ["kubernetes": "v1.16", "k3s": "v1.0.1", "kubectl": "v1.16.3"],
- ["kubernetes": "v1.17", "k3s": "v1.17.0-k3s.1", "kubectl": "v1.17.0"]
- ]
-
- env.MINISHIFT_HOME = "/data/jenkins/.minishift"
-
- withEnv(['AWS_BUCKET=jobs.amazeeio.services', 'AWS_DEFAULT_REGION=us-east-2']) {
- withCredentials([
- usernamePassword(credentialsId: 'aws-s3-lagoon', usernameVariable: 'AWS_ACCESS_KEY_ID', passwordVariable: 'AWS_SECRET_ACCESS_KEY'),
- string(credentialsId: 'SKIP_IMAGE_PUBLISH', variable: 'SKIP_IMAGE_PUBLISH')
- ]) {
- try {
- env.CI_BUILD_TAG = env.BUILD_TAG.replaceAll('%2f','').replaceAll("[^A-Za-z0-9]+", "").toLowerCase()
- env.SAFEBRANCH_NAME = env.BRANCH_NAME.replaceAll('%2f','-').replaceAll("[^A-Za-z0-9]+", "-").toLowerCase()
- env.SYNC_MAKE_OUTPUT = 'target'
- // make/tests will synchronise (buffer) output by default to avoid interspersed
- // lines from multiple jobs run in parallel. However this means that output for
- // each make target is not written until the command completes.
- //
- // See `man -P 'less +/-O' make` for more information about this option.
- //
- // Uncomment the line below to disable output synchronisation.
- env.SYNC_MAKE_OUTPUT = 'none'
-
- stage ('env') {
- sh "env"
- }
-
- deleteDir()
-
- stage ('Checkout') {
- def checkout = checkout scm
- env.GIT_COMMIT = checkout["GIT_COMMIT"]
- }
-
- // in order to have the newest images from upstream (with all the security updates) we clean our local docker cache on tag deployments
- // we don't do this all the time to still profit from image layer caching
- // but we want this on tag deployments in order to ensure that we publish images always with the newest possible images.
- if (env.TAG_NAME) {
- stage ('clean docker image cache') {
- sh script: "docker image prune -af", label: "Pruning images"
- }
- }
-
- stage ('check PR labels') {
- if (env.BRANCH_NAME ==~ /PR-\d+/) {
- pullRequest.labels.each{
- echo "This PR has labels: $it"
- }
- }
- }
-
- stage ('build images') {
- sh script: "make -O${SYNC_MAKE_OUTPUT} -j6 build", label: "Building images"
- }
-
- try {
- parallel (
- '1 tests': {
- kubernetes_versions.each { kubernetes_version ->
- stage ("kubernetes ${kubernetes_version['kubernetes']} tests") {
- try {
- sh script: "make k3d/clean K3S_VERSION=${kubernetes_version['k3s']} KUBECTL_VERSION=${kubernetes_version['kubectl']}", label: "Removing any previous k3d versions"
- sh script: "make k3d K3S_VERSION=${kubernetes_version['k3s']} KUBECTL_VERSION=${kubernetes_version['kubectl']}", label: "Making k3d"
- sh script: "make -O${SYNC_MAKE_OUTPUT} k8s-tests -j2", label: "Making kubernetes tests"
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- cleanup()
- throw e
- }
- }
- }
- stage ('minishift tests') {
- try {
- if (env.CHANGE_ID && pullRequest.labels.contains("skip-openshift-tests")) {
- sh script: 'echo "PR identified as not needing Openshift testing."', label: "Skipping Openshift testing stage"
- } else {
- sh 'make minishift/cleanall || echo'
- sh script: "make minishift MINISHIFT_CPUS=\$(nproc --ignore 3) MINISHIFT_MEMORY=24GB MINISHIFT_DISK_SIZE=70GB MINISHIFT_VERSION=${minishift_version} OPENSHIFT_VERSION=${openshift_version}", label: "Making openshift"
- sh script: "make -O${SYNC_MAKE_OUTPUT} push-minishift -j5", label: "Pushing built images into openshift"
- sh script: "make -O${SYNC_MAKE_OUTPUT} openshift-tests -j2", label: "Making openshift tests"
- }
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- cleanup()
- throw e
- }
- }
- stage ('cleanup') {
- cleanup()
- }
- },
- '2 start services': {
- stage ('start services') {
- try {
- notifySlack()
- sh "make kill"
- sh "make up"
- sh "make logs"
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- cleanup()
- throw e
- }
- }
- },
- '3 push images to amazeeiolagoon': {
- stage ('push images to amazeeiolagoon/*') {
- withCredentials([string(credentialsId: 'amazeeiojenkins-dockerhub-password', variable: 'PASSWORD')]) {
- try {
- if (env.SKIP_IMAGE_PUBLISH != 'true') {
- sh script: 'docker login -u amazeeiojenkins -p $PASSWORD', label: "Docker login"
- sh script: "make -O${SYNC_MAKE_OUTPUT} -j4 publish-amazeeiolagoon-baseimages publish-amazeeiolagoon-serviceimages publish-amazeeiolagoon-taskimages BRANCH_NAME=${SAFEBRANCH_NAME}", label: "Publishing built images"
- } else {
- sh script: 'echo "skipped because of SKIP_IMAGE_PUBLISH env variable"', label: "Skipping image publishing"
- }
- if (env.BRANCH_NAME == 'main' ) {
- withCredentials([string(credentialsId: 'vshn-gitlab-helmfile-ci-trigger', variable: 'TOKEN')]) {
- sh script: "curl -X POST -F token=$TOKEN -F ref=master https://git.vshn.net/api/v4/projects/1263/trigger/pipeline", label: "Trigger lagoon-core helmfile sync on amazeeio-test6"
- }
- }
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- cleanup()
- throw e
- }
- }
- }
- }
- )
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- cleanup()
- throw e
- }
+pipeline {
+ agent any
+ environment {
+ // configure build params
+ CI_BUILD_TAG = env.BUILD_TAG.replaceAll('%2f','').replaceAll('[^A-Za-z0-9]+', '').toLowerCase()
+ SAFEBRANCH_NAME = env.BRANCH_NAME.replaceAll('%2f','-').replaceAll('[^A-Za-z0-9]+', '-').toLowerCase()
+ NPROC = "${sh(script:'getconf _NPROCESSORS_ONLN', returnStdout: true).trim()}"
+ }
- if (env.TAG_NAME && env.SKIP_IMAGE_PUBLISH != 'true') {
- stage ('publish-amazeeio') {
- withCredentials([string(credentialsId: 'amazeeiojenkins-dockerhub-password', variable: 'PASSWORD')]) {
- sh script: 'docker login -u amazeeiojenkins -p $PASSWORD', label: "Docker login"
- sh script: "make -O${SYNC_MAKE_OUTPUT} -j4 publish-amazeeio-baseimages", label: "Publishing built images"
- }
- }
+ stages {
+ stage ('notify started') {
+ steps {
+ notifySlack('STARTED')
+ }
+ }
+ stage ('env') {
+ steps {
+ sh 'env'
+ }
+ }
+ // in order to have the newest images from upstream (with all the security
+ // updates) we clean our local docker cache on tag deployments
+ // we don't do this all the time to still profit from image layer caching
+ // but we want this on tag deployments in order to ensure that we publish
+ // images always with the newest possible images.
+ stage ('clean docker image cache') {
+ when {
+ buildingTag()
+ }
+ steps {
+ sh script: "docker image prune -af", label: "Pruning images"
+ }
+ }
+ stage ('build images') {
+ steps {
+ sh script: "make -O -j$NPROC build", label: "Building images"
+ }
+ }
+ stage ('show trivy scan results') {
+ steps {
+ sh script: "cat scan.txt", label: "Display scan results"
+ }
+ }
+ stage ('push images to testlagoon/*') {
+ when {
+ not {
+ environment name: 'SKIP_IMAGE_PUBLISH', value: 'true'
}
-
- if (env.BRANCH_NAME == 'master' && env.SKIP_IMAGE_PUBLISH != 'true') {
- stage ('save-images-s3') {
- sh script: "make -O${SYNC_MAKE_OUTPUT} -j8 s3-save", label: "Saving images to AWS S3"
- }
+ }
+ environment {
+ PASSWORD = credentials('amazeeiojenkins-dockerhub-password')
+ }
+ steps {
+ sh script: 'docker login -u amazeeiojenkins -p $PASSWORD', label: "Docker login"
+ sh script: "make -O -j$NPROC publish-testlagoon-baseimages publish-testlagoon-serviceimages publish-testlagoon-taskimages BRANCH_NAME=${SAFEBRANCH_NAME}", label: "Publishing built images"
+ }
+ }
+ stage ('run test suite') {
+ steps {
+ sh script: "make -j$NPROC kind/test BRANCH_NAME=${SAFEBRANCH_NAME}", label: "Running tests on kind cluster"
+ }
+ }
+ stage ('push images to testlagoon/* with :latest tag') {
+ when {
+ branch 'main'
+ }
+ environment {
+ PASSWORD = credentials('amazeeiojenkins-dockerhub-password')
+ }
+ steps {
+ sh script: 'docker login -u amazeeiojenkins -p $PASSWORD', label: "Docker login"
+ sh script: "make -O -j$NPROC publish-testlagoon-baseimages publish-testlagoon-serviceimages publish-testlagoon-taskimages BRANCH_NAME=latest", label: "Publishing built images with :latest tag"
+ }
+ }
+ stage ('deploy to test environment') {
+ when {
+ branch 'main'
+ }
+ environment {
+ TOKEN = credentials('vshn-gitlab-helmfile-ci-trigger')
+ }
+ steps {
+ sh script: "curl -X POST -F token=$TOKEN -F ref=master https://git.vshn.net/api/v4/projects/1263/trigger/pipeline", label: "Trigger lagoon-core helmfile sync on amazeeio-test6"
+ }
+ }
+ stage ('push images to uselagoon/*') {
+ when {
+ buildingTag()
+ not {
+ environment name: 'SKIP_IMAGE_PUBLISH', value: 'true'
}
-
- } catch (e) {
- currentBuild.result = 'FAILURE'
- throw e
- } finally {
- notifySlack(currentBuild.result)
+ }
+ environment {
+ PASSWORD = credentials('amazeeiojenkins-dockerhub-password')
+ }
+ steps {
+ sh script: 'docker login -u amazeeiojenkins -p $PASSWORD', label: "Docker login"
+ sh script: "make -O -j$NPROC publish-uselagoon-baseimages publish-uselagoon-serviceimages publish-uselagoon-taskimages", label: "Publishing built images to uselagoon"
}
}
}
-}
-
-def cleanup() {
- try {
- sh "make minishift/cleanall"
- sh "make k3d/cleanall"
- sh "make down || true"
- sh "make kill"
- sh "make down"
- sh "make clean"
- } catch (error) {
- echo "cleanup failed, ignoring this."
+ post {
+ always {
+ sh "make clean kind/clean"
+ }
+ success {
+ notifySlack('SUCCESS')
+ deleteDir()
+ }
+ failure {
+ notifySlack('FAILURE')
+ }
+ aborted {
+ notifySlack('ABORTED')
+ }
}
}
-def notifySlack(String buildStatus = 'STARTED') {
- // Build status of null means success.
- buildStatus = buildStatus ?: 'SUCCESS'
-
- def color
-
- if (buildStatus == 'STARTED') {
- color = '#68A1D1'
- } else if (buildStatus == 'SUCCESS') {
- color = '#BDFFC3'
- } else if (buildStatus == 'UNSTABLE') {
- color = '#FFFE89'
- } else {
- color = '#FF9FA1'
- }
-
- def msg = "${buildStatus}: `${env.JOB_NAME}` #${env.BUILD_NUMBER}:\n${env.BUILD_URL}"
-
- slackSend(color: color, message: msg)
+def notifySlack(String status) {
+ slackSend(
+ color: ([STARTED: '#68A1D1', SUCCESS: '#BDFFC3', FAILURE: '#FF9FA1', ABORTED: '#949393'][status]),
+ message: "${status}: `${env.JOB_NAME}` #${env.BUILD_NUMBER}:\n${env.BUILD_URL}")
}
diff --git a/Jenkinsfile.end2end b/Jenkinsfile.end2end
deleted file mode 100644
index 1a612a4aa9..0000000000
--- a/Jenkinsfile.end2end
+++ /dev/null
@@ -1,21 +0,0 @@
-node {
-
- stage ('Checkout') {
- deleteDir()
- checkout scm
- }
-
- ansiColor('xterm') {
- stage ('run test') {
- try {
- sh "make end2end-tests -j1"
- } catch (e) {
- echo "Something went wrong, trying to cleanup"
- sh "make end2end-tests/clean"
- throw e
- }
- sh "make end2end-tests/clean"
- }
- }
-
-}
\ No newline at end of file
diff --git a/Lagoon_OG.png b/Lagoon_OG.png
new file mode 100644
index 0000000000..1c4f0de0c7
Binary files /dev/null and b/Lagoon_OG.png differ
diff --git a/Makefile b/Makefile
index 583f1f9792..cd4868b274 100644
--- a/Makefile
+++ b/Makefile
@@ -58,6 +58,9 @@ DOCKER_BUILD_PARAMS := --quiet
# CI systems to define an Environment variable CI_BUILD_TAG which uniquely identifies each build.
# If it's not set we assume that we are running local and just call it lagoon.
CI_BUILD_TAG ?= lagoon
+# SOURCE_REPO is the repos where the upstream images are found (usually uselagoon, but can substiture for testlagoon)
+UPSTREAM_REPO ?= uselagoon
+UPSTREAM_TAG ?= latest
# Local environment
ARCH := $(shell uname | tr '[:upper:]' '[:lower:]')
@@ -73,8 +76,8 @@ MINISHIFT_DISK_SIZE := 30GB
# Version and Hash of the minikube cli that should be downloaded
K3S_VERSION := v1.17.0-k3s.1
-KUBECTL_VERSION := v1.17.0
-HELM_VERSION := v3.0.3
+KUBECTL_VERSION := v1.20.2
+HELM_VERSION := v3.5.0
MINIKUBE_VERSION := 1.5.2
MINIKUBE_PROFILE := $(CI_BUILD_TAG)-minikube
MINIKUBE_CPUS := $(nproc --ignore 2)
@@ -86,8 +89,12 @@ K3D_VERSION := 1.4.0
K3D_NAME := k3s-$(shell echo $(CI_BUILD_TAG) | sed -E 's/.*(.{31})$$/\1/')
# Name of the Branch we are currently in
-BRANCH_NAME :=
-DEFAULT_ALPINE_VERSION := 3.11
+BRANCH_NAME := $(shell git rev-parse --abbrev-ref HEAD)
+SAFE_BRANCH_NAME := $(shell echo $(BRANCH_NAME) | sed -E 's:/:_:g')
+
+# Init the file that is used to hold the image tag cross-reference table
+$(shell >build.txt)
+$(shell >scan.txt)
#######
####### Functions
@@ -95,32 +102,15 @@ DEFAULT_ALPINE_VERSION := 3.11
# Builds a docker image. Expects as arguments: name of the image, location of Dockerfile, path of
# Docker Build Context
-docker_build = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg ALPINE_VERSION=$(DEFAULT_ALPINE_VERSION) -t $(CI_BUILD_TAG)/$(1) -f $(2) $(3)
-
-# Build a Python docker image. Expects as arguments:
-# 1. Python version
-# 2. Location of Dockerfile
-# 3. Path of Docker Build context
-docker_build_python = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg PYTHON_VERSION=$(1) --build-arg ALPINE_VERSION=$(2) -t $(CI_BUILD_TAG)/python:$(3) -f $(4) $(5)
-
-docker_build_elastic = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) -t $(CI_BUILD_TAG)/$(2):$(1) -f $(3) $(4)
-
-# Build a PHP docker image. Expects as arguments:
-# 1. PHP version
-# 2. PHP version and type of image (ie 7.3-fpm, 7.3-cli etc)
-# 3. Location of Dockerfile
-# 4. Path of Docker Build Context
-docker_build_php = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg PHP_VERSION=$(1) --build-arg PHP_IMAGE_VERSION=$(1) --build-arg ALPINE_VERSION=$(2) -t $(CI_BUILD_TAG)/php:$(3) -f $(4) $(5)
+docker_build = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg UPSTREAM_REPO=$(UPSTREAM_REPO) --build-arg UPSTREAM_TAG=$(UPSTREAM_TAG) -t $(CI_BUILD_TAG)/$(1) -f $(2) $(3)
-docker_build_node = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg NODE_VERSION=$(1) --build-arg ALPINE_VERSION=$(2) -t $(CI_BUILD_TAG)/node:$(3) -f $(4) $(5)
+scan_image = docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v $(HOME)/Library/Caches:/root/.cache/ aquasec/trivy --timeout 5m0s $(CI_BUILD_TAG)/$(1) >> scan.txt
-docker_build_solr = docker build $(DOCKER_BUILD_PARAMS) --build-arg LAGOON_VERSION=$(LAGOON_VERSION) --build-arg IMAGE_REPO=$(CI_BUILD_TAG) --build-arg SOLR_MAJ_MIN_VERSION=$(1) -t $(CI_BUILD_TAG)/solr:$(2) -f $(3) $(4)
+# Tags an image with the `testlagoon` repository and pushes it
+docker_publish_testlagoon = docker tag $(CI_BUILD_TAG)/$(1) testlagoon/$(2) && docker push testlagoon/$(2) | cat
-# Tags an image with the `amazeeio` repository and pushes it
-docker_publish_amazeeio = docker tag $(CI_BUILD_TAG)/$(1) amazeeio/$(2) && docker push amazeeio/$(2) | cat
-
-# Tags an image with the `amazeeiolagoon` repository and pushes it
-docker_publish_amazeeiolagoon = docker tag $(CI_BUILD_TAG)/$(1) amazeeiolagoon/$(2) && docker push amazeeiolagoon/$(2) | cat
+# Tags an image with the `uselagoon` repository and pushes it
+docker_publish_uselagoon = docker tag $(CI_BUILD_TAG)/$(1) uselagoon/$(2) && docker push uselagoon/$(2) | cat
#######
@@ -130,29 +120,13 @@ docker_publish_amazeeiolagoon = docker tag $(CI_BUILD_TAG)/$(1) amazeeiolagoon/$
images := oc \
kubectl \
- mariadb \
- mariadb-drupal \
- postgres \
- postgres-ckan \
- postgres-drupal \
oc-build-deploy-dind \
kubectl-build-deploy-dind \
- commons \
- nginx \
- nginx-drupal \
- varnish \
- varnish-drupal \
- varnish-persistent \
- varnish-persistent-drupal \
- redis \
- redis-persistent \
rabbitmq \
rabbitmq-cluster \
- mongo \
athenapdf-service \
curator \
- docker-host \
- toolbox
+ docker-host
# base-images is a variable that will be constantly filled with all base image there are
base-images += $(images)
@@ -167,6 +141,8 @@ $(build-images):
$(eval image = $(subst build/,,$@))
# Call the docker build
$(call docker_build,$(image),images/$(image)/Dockerfile,images/$(image))
+#scan created image with Trivy
+ $(call scan_image,$(image),)
# Touch an empty file which make itself is using to understand when the image has been last build
touch $@
@@ -176,217 +152,17 @@ $(build-images):
# if the parent has been built
# 2. Dockerfiles of the Images itself, will cause make to rebuild the images if something has
# changed on the Dockerfiles
-build/mariadb: build/commons images/mariadb/Dockerfile
-build/mariadb-drupal: build/mariadb images/mariadb-drupal/Dockerfile
-build/postgres: build/commons images/postgres/Dockerfile
-build/postgres-ckan: build/postgres images/postgres-ckan/Dockerfile
-build/postgres-drupal: build/postgres images/postgres-drupal/Dockerfile
-build/commons: images/commons/Dockerfile
-build/nginx: build/commons images/nginx/Dockerfile
-build/nginx-drupal: build/nginx images/nginx-drupal/Dockerfile
-build/varnish: build/commons images/varnish/Dockerfile
-build/varnish-drupal: build/varnish images/varnish-drupal/Dockerfile
-build/varnish-persistent: build/varnish images/varnish/Dockerfile
-build/varnish-persistent-drupal: build/varnish-persistent images/varnish-drupal/Dockerfile
-build/redis: build/commons images/redis/Dockerfile
-build/redis-persistent: build/redis images/redis-persistent/Dockerfile
-build/rabbitmq: build/commons images/rabbitmq/Dockerfile
+build/rabbitmq: images/rabbitmq/Dockerfile
build/rabbitmq-cluster: build/rabbitmq images/rabbitmq-cluster/Dockerfile
-build/mongo: build/commons images/mongo/Dockerfile
-build/docker-host: build/commons images/docker-host/Dockerfile
-build/oc: build/commons images/oc/Dockerfile
-build/kubectl: build/commons images/kubectl/Dockerfile
-build/curator: build/commons images/curator/Dockerfile
+build/docker-host: images/docker-host/Dockerfile
+build/oc: images/oc/Dockerfile
+build/kubectl: images/kubectl/Dockerfile
+build/curator: images/curator/Dockerfile
build/oc-build-deploy-dind: build/oc images/oc-build-deploy-dind
-build/athenapdf-service: build/commons images/athenapdf-service/Dockerfile
-build/toolbox: build/commons images/toolbox/Dockerfile
+build/athenapdf-service:images/athenapdf-service/Dockerfile
build/kubectl-build-deploy-dind: build/kubectl images/kubectl-build-deploy-dind
-#######
-####### Elastic Images
-#######
-
-elasticimages := elasticsearch__6 \
- elasticsearch__7 \
- kibana__6 \
- kibana__7 \
- logstash__6 \
- logstash__7
-
-build-elasticimages = $(foreach image,$(elasticimages),build/$(image))
-
-# Define the make recipe for all base images
-$(build-elasticimages): build/commons
- $(eval clean = $(subst build/,,$@))
- $(eval tool = $(word 1,$(subst __, ,$(clean))))
- $(eval version = $(word 2,$(subst __, ,$(clean))))
-# Call the docker build
- $(call docker_build_elastic,$(version),$(tool),images/$(tool)/Dockerfile$(version),images/$(tool))
-# Touch an empty file which make itself is using to understand when the image has been last build
- touch $@
-
-base-images-with-versions += $(elasticimages)
-s3-images += $(elasticimages)
-
-build/elasticsearch__6 build/elasticsearch__7 build/kibana__6 build/kibana__7 build/logstash__6 build/logstash__7: images/commons
-
-#######
-####### Python Images
-#######
-####### Python Images are alpine linux based Python images.
-
-pythonimages := python__2.7 \
- python__3.7 \
- python__2.7-ckan \
- python__2.7-ckandatapusher
-
-build-pythonimages = $(foreach image,$(pythonimages),build/$(image))
-
-# Define the make recipe for all base images
-$(build-pythonimages): build/commons
- $(eval clean = $(subst build/python__,,$@))
- $(eval version = $(word 1,$(subst -, ,$(clean))))
- $(eval type = $(word 2,$(subst -, ,$(clean))))
- $(eval alpine_version = $(DEFAULT_ALPINE_VERSION))
-# this fills variables only if $type is existing, if not they are just empty
- $(eval type_dash = $(if $(type),-$(type)))
-# Call the docker build
- $(call docker_build_python,$(version),$(alpine_version),$(version)$(type_dash),images/python$(type_dash)/Dockerfile,images/python$(type_dash))
-# Touch an empty file which make itself is using to understand when the image has been last build
- touch $@
-
-base-images-with-versions += $(pythonimages)
-s3-images += $(pythonimages)
-
-build/python__2.7 build/python__3.7: images/commons
-build/python__2.7-ckan: build/python__2.7
-build/python__2.7-ckandatapusher: build/python__2.7
-
-
-#######
-####### PHP Images
-#######
-####### PHP Images are alpine linux based PHP images.
-
-phpimages := php__7.2-fpm \
- php__7.3-fpm \
- php__7.4-fpm \
- php__7.2-cli \
- php__7.3-cli \
- php__7.4-cli \
- php__7.2-cli-drupal \
- php__7.3-cli-drupal \
- php__7.4-cli-drupal
-
-
-build-phpimages = $(foreach image,$(phpimages),build/$(image))
-
-# Define the make recipe for all base images
-$(build-phpimages): build/commons
- $(eval clean = $(subst build/php__,,$@))
- $(eval version = $(word 1,$(subst -, ,$(clean))))
- $(eval type = $(word 2,$(subst -, ,$(clean))))
- $(eval subtype = $(word 3,$(subst -, ,$(clean))))
- $(eval alpine_version := $(shell case $(version) in (5.6) echo "3.8" ;; (7.0) echo "3.7" ;; (7.1) echo "3.10" ;; (*) echo $(DEFAULT_ALPINE_VERSION) ;; esac ))
-# this fills variables only if $type is existing, if not they are just empty
- $(eval type_dash = $(if $(type),-$(type)))
- $(eval type_slash = $(if $(type),/$(type)))
-# if there is a subtype, add it. If not, just keep what we already had
- $(eval type_dash = $(if $(subtype),-$(type)-$(subtype),$(type_dash)))
- $(eval type_slash = $(if $(subtype),/$(type)-$(subtype),$(type_slash)))
-
-# Call the docker build
- $(call docker_build_php,$(version),$(alpine_version),$(version)$(type_dash),images/php$(type_slash)/Dockerfile,images/php$(type_slash))
-# Touch an empty file which make itself is using to understand when the image has been last build
- touch $@
-
-base-images-with-versions += $(phpimages)
-s3-images += $(phpimages)
-
-build/php__7.2-fpm build/php__7.3-fpm build/php__7.4-fpm: images/commons
-build/php__7.2-cli: build/php__7.2-fpm
-build/php__7.3-cli: build/php__7.3-fpm
-build/php__7.4-cli: build/php__7.4-fpm
-build/php__7.2-cli-drupal: build/php__7.2-cli
-build/php__7.3-cli-drupal: build/php__7.3-cli
-build/php__7.4-cli-drupal: build/php__7.4-cli
-
-#######
-####### Solr Images
-#######
-####### Solr Images are alpine linux based Solr images.
-
-solrimages := solr__5.5 \
- solr__6.6 \
- solr__7.7 \
- solr__5.5-drupal \
- solr__6.6-drupal \
- solr__7.7-drupal \
- solr__5.5-ckan \
- solr__6.6-ckan
-
-
-build-solrimages = $(foreach image,$(solrimages),build/$(image))
-
-# Define the make recipe for all base images
-$(build-solrimages): build/commons
- $(eval clean = $(subst build/solr__,,$@))
- $(eval version = $(word 1,$(subst -, ,$(clean))))
- $(eval type = $(word 2,$(subst -, ,$(clean))))
-# this fills variables only if $type is existing, if not they are just empty
- $(eval type_dash = $(if $(type),-$(type)))
-# Call the docker build
- $(call docker_build_solr,$(version),$(version)$(type_dash),images/solr$(type_dash)/Dockerfile,images/solr$(type_dash))
-# Touch an empty file which make itself is using to understand when the image has been last build
- touch $@
-
-base-images-with-versions += $(solrimages)
-s3-images += $(solrimages)
-
-build/solr__5.5 build/solr__6.6 build/solr__7.7: images/commons
-build/solr__5.5-drupal: build/solr__5.5
-build/solr__6.6-drupal: build/solr__6.6
-build/solr__7.7-drupal: build/solr__7.7
-build/solr__5.5-ckan: build/solr__5.5
-build/solr__6.6-ckan: build/solr__6.6
-
-#######
-####### Node Images
-#######
-####### Node Images are alpine linux based Node images.
-
-nodeimages := node__14 \
- node__12 \
- node__10 \
- node__14-builder \
- node__12-builder \
- node__10-builder \
-
-build-nodeimages = $(foreach image,$(nodeimages),build/$(image))
-
-# Define the make recipe for all base images
-$(build-nodeimages): build/commons
- $(eval clean = $(subst build/node__,,$@))
- $(eval version = $(word 1,$(subst -, ,$(clean))))
- $(eval type = $(word 2,$(subst -, ,$(clean))))
- $(eval alpine_version := $(shell case $(version) in (6) echo "" ;; (9) echo "" ;; (*) echo $(DEFAULT_ALPINE_VERSION) ;; esac ))
-# this fills variables only if $type is existing, if not they are just empty
- $(eval type_dash = $(if $(type),-$(type)))
- $(eval type_slash = $(if $(type),/$(type)))
-# Call the docker build
- $(call docker_build_node,$(version),$(alpine_version),$(version)$(type_dash),images/node$(type_slash)/Dockerfile,images/node$(type_slash))
-# Touch an empty file which make itself is using to understand when the image has been last build
- touch $@
-
-base-images-with-versions += $(nodeimages)
-s3-images += $(nodeimages)
-
-build/node__10 build/node__12 build/node__14: images/commons images/node/Dockerfile
-build/node__14-builder: build/node__14 images/node/builder/Dockerfile
-build/node__12-builder: build/node__12 images/node/builder/Dockerfile
-build/node__10-builder: build/node__10 images/node/builder/Dockerfile
-
#######
####### Service Images
#######
@@ -396,9 +172,10 @@ build/node__10-builder: build/node__10 images/node/builder/Dockerfile
# Yarn Workspace Image which builds the Yarn Workspace within a single image. This image will be
# used by all microservices based on Node.js to not build similar node packages again
build-images += yarn-workspace-builder
-build/yarn-workspace-builder: build/node__10-builder images/yarn-workspace-builder/Dockerfile
+build/yarn-workspace-builder: images/yarn-workspace-builder/Dockerfile
$(eval image = $(subst build/,,$@))
$(call docker_build,$(image),images/$(image)/Dockerfile,.)
+ $(call scan_image,$(image),)
touch $@
#######
@@ -422,46 +199,35 @@ build-taskimages = $(foreach image,$(taskimages),build/task-$(image))
$(build-taskimages):
$(eval image = $(subst build/task-,,$@))
$(call docker_build,task-$(image),taskimages/$(image)/Dockerfile,taskimages/$(image))
+ $(call scan_image,task-$(image),)
touch $@
# Variables of service images we manage and build
-services := api \
- auth-server \
- logs2email \
- logs2slack \
- logs2rocketchat \
- logs2microsoftteams \
- controllerhandler \
- webhook-handler \
- webhooks2tasks \
- backup-handler \
- broker \
- broker-single \
- logs-concentrator \
- logs-dispatcher \
- logs-tee \
- logs-forwarder \
- logs-db \
- logs-db-ui \
- logs-db-curator \
- logs2logs-db \
- auto-idler \
- storage-calculator \
- api-db \
- drush-alias \
- keycloak \
- keycloak-db \
- ui \
- harbor-core \
- harbor-database \
- harbor-jobservice \
- harbor-nginx \
- harbor-portal \
- harbor-redis \
- harborregistry \
- harborregistryctl \
- harbor-trivy \
- api-redis
+services := api \
+ api-db \
+ api-redis \
+ auth-server \
+ auto-idler \
+ backup-handler \
+ broker \
+ broker-single \
+ controllerhandler \
+ drush-alias \
+ keycloak \
+ keycloak-db \
+ logs-concentrator \
+ logs-db-curator \
+ logs-dispatcher \
+ logs-tee \
+ logs2email \
+ logs2microsoftteams \
+ logs2rocketchat \
+ logs2slack \
+ storage-calculator \
+ ui \
+ webhook-handler \
+ webhooks2tasks
+
service-images += $(services)
@@ -471,44 +237,50 @@ build-services = $(foreach image,$(services),build/$(image))
$(build-services):
$(eval image = $(subst build/,,$@))
$(call docker_build,$(image),services/$(image)/Dockerfile,services/$(image))
+ $(call scan_image,$(image),)
touch $@
# Dependencies of Service Images
build/auth-server build/logs2email build/logs2slack build/logs2rocketchat build/logs2microsoftteams build/backup-handler build/controllerhandler build/webhook-handler build/webhooks2tasks build/api build/ui: build/yarn-workspace-builder
-build/logs2logs-db: build/logstash__7
-build/logs-db: build/elasticsearch__7
-build/logs-db-ui: build/kibana__7
-build/logs-db-curator: build/curator
+build/api-db: services/api-db/Dockerfile
+build/api-redis: services/api-redis/Dockerfile
build/auto-idler: build/oc
-build/storage-calculator: build/oc
-build/api-db build/keycloak-db: build/mariadb
-build/broker: build/rabbitmq-cluster build/broker-single
build/broker-single: build/rabbitmq
-build/drush-alias: build/nginx
-build/keycloak: build/commons
-build/harbor-database: build/postgres
-build/harbor-trivy build/local-minio: build/harbor-database services/harbor-redis/Dockerfile
-build/harborregistry: services/harbor-jobservice/Dockerfile
-build/harborregistryctl: build/harborregistry
-build/harbor-nginx: build/harborregistryctl services/harbor-core/Dockerfile services/harbor-portal/Dockerfile
-build/tests: build/python__2.7
+build/broker: build/rabbitmq-cluster build/broker-single
+build/drush-alias: services/drush-alias/Dockerfile
+build/keycloak-db: services/keycloak-db/Dockerfile
+build/keycloak: services/keycloak/Dockerfile
+build/logs-concentrator: services/logs-concentrator/Dockerfile
+build/logs-db-curator: build/curator
+build/logs-dispatcher: services/logs-dispatcher/Dockerfile
+build/logs-tee: services/logs-tee/Dockerfile
+build/storage-calculator: build/oc
+build/tests-controller-kubernetes: build/tests
build/tests-kubernetes: build/tests
build/tests-openshift: build/tests
-build/toolbox: build/mariadb
-build/api-redis: build/redis
-
+build/tests: tests/Dockerfile
+build/local-minio:
# Auth SSH needs the context of the root folder, so we have it individually
-build/ssh: build/commons
+build/ssh: services/ssh/Dockerfile
$(eval image = $(subst build/,,$@))
$(call docker_build,$(image),services/$(image)/Dockerfile,.)
+ $(call scan_image,$(image),)
touch $@
service-images += ssh
+build/local-git: local-dev/git/Dockerfile
+build/local-api-data-watcher-pusher: local-dev/api-data-watcher-pusher/Dockerfile
+build/local-registry: local-dev/registry/Dockerfile
+build/local-dbaas-provider: local-dev/dbaas-provider/Dockerfile
+build/local-mongodb-dbaas-provider: local-dev/mongodb-dbaas-provider/Dockerfile
+
# Images for local helpers that exist in another folder than the service images
localdevimages := local-git \
local-api-data-watcher-pusher \
- local-registry\
- local-dbaas-provider
+ local-registry \
+ local-dbaas-provider \
+ local-mongodb-dbaas-provider
+
service-images += $(localdevimages)
build-localdevimages = $(foreach image,$(localdevimages),build/$(image))
@@ -516,12 +288,14 @@ $(build-localdevimages):
$(eval folder = $(subst build/local-,,$@))
$(eval image = $(subst build/,,$@))
$(call docker_build,$(image),local-dev/$(folder)/Dockerfile,local-dev/$(folder))
+ $(call scan_image,$(image),)
touch $@
# Image with ansible test
build/tests:
$(eval image = $(subst build/,,$@))
$(call docker_build,$(image),$(image)/Dockerfile,$(image))
+ $(call scan_image,$(image),)
touch $@
service-images += tests
@@ -534,7 +308,7 @@ s3-images += $(service-images)
# Builds all Images
.PHONY: build
-build: $(foreach image,$(base-images) $(base-images-with-versions) $(service-images) $(task-images),build/$(image))
+build: $(foreach image,$(base-images) $(service-images) $(task-images),build/$(image))
# Outputs a list of all Images we manage
.PHONY: build-list
build-list:
@@ -543,10 +317,15 @@ build-list:
done
# Define list of all tests
-all-k8s-tests-list:= features-kubernetes \
- nginx \
- drupal \
- active-standby-kubernetes
+all-k8s-tests-list:= nginx \
+ drupal-php72 \
+ drupal-php73 \
+ drupal-php74 \
+ python \
+ active-standby-kubernetes \
+ features-kubernetes \
+ node-mongodb
+
all-k8s-tests = $(foreach image,$(all-k8s-tests-list),k8s-tests/$(image))
# Run all k8s tests
@@ -557,15 +336,15 @@ k8s-tests: $(all-k8s-tests)
$(all-k8s-tests): k3d k8s-test-services-up
$(MAKE) push-local-registry -j6
$(eval testname = $(subst k8s-tests/,,$@))
- IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility run --rm \
- tests-kubernetes ansible-playbook --skip-tags="skip-on-kubernetes" \
+ IMAGE_REPO=$(CI_BUILD_TAG) UPSTREAM_REPO=$(UPSTREAM_REPO) UPSTREAM_TAG=$(UPSTREAM_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility run --rm \
+ tests-kubernetes ansible-playbook --skip-tags="skip-on-kubernetes,skip-on-jenkins" --extra-vars="@/ansible/tests/vars/test_vars.yaml" \
/ansible/tests/$(testname).yaml \
--extra-vars \
"$$(cat $$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)') | \
jq -rcsR '{kubeconfig: .}')"
# push command of our base images into minishift
-push-local-registry-images = $(foreach image,$(base-images) $(base-images-with-versions) $(task-images),[push-local-registry]-$(image))
+push-local-registry-images = $(foreach image,$(base-images) $(task-images),[push-local-registry]-$(image))
# tag and push all images
.PHONY: push-local-registry
push-local-registry: $(push-local-registry-images)
@@ -586,14 +365,17 @@ $(push-local-registry-images):
# Define list of all tests
all-openshift-tests-list:= features-openshift \
node \
- drupal \
+ drupal-php72 \
+ drupal-php73 \
+ drupal-php74 \
drupal-postgres \
github \
gitlab \
bitbucket \
nginx \
elasticsearch \
- active-standby-openshift
+ active-standby-openshift \
+ node-mongodb
all-openshift-tests = $(foreach image,$(all-openshift-tests-list),openshift-tests/$(image))
.PHONY: openshift-tests
@@ -610,13 +392,13 @@ wait-for-keycloak:
grep -m 1 "Config of Keycloak done." <(docker-compose -p $(CI_BUILD_TAG) --compatibility logs -f keycloak 2>&1)
# Define a list of which Lagoon Services are needed for running any deployment testing
-main-test-services = broker logs2email logs2slack logs2rocketchat logs2microsoftteams api api-db keycloak keycloak-db ssh auth-server local-git local-api-data-watcher-pusher harbor-core harbor-database harbor-jobservice harbor-portal harbor-nginx harbor-redis harborregistry harborregistryctl harbor-trivy local-minio
+main-test-services = broker logs2email logs2slack logs2rocketchat logs2microsoftteams api api-db api-redis keycloak keycloak-db ssh auth-server local-git local-api-data-watcher-pusher local-minio
# Define a list of which Lagoon Services are needed for openshift testing
-openshift-test-services = openshiftremove openshiftbuilddeploy openshiftbuilddeploymonitor openshiftmisc tests-openshift
+openshift-test-services = openshiftremove openshiftbuilddeploy openshiftbuilddeploymonitor openshiftmisc tests-openshift local-dbaas-provider local-mongodb-dbaas-provider
# Define a list of which Lagoon Services are needed for kubernetes testing
-k8s-test-services = controllerhandler tests-kubernetes local-registry local-dbaas-provider drush-alias
+k8s-test-services = controllerhandler tests-kubernetes local-registry local-dbaas-provider local-mongodb-dbaas-provider drush-alias
# List of Lagoon Services needed for webhook endpoint testing
webhooks-test-services = webhook-handler webhooks2tasks backup-handler
@@ -628,11 +410,10 @@ drupal-test-services = drush-alias
webhook-tests = github gitlab bitbucket
# All Tests that use API endpoints
-api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby-openshift active-standby-kubernetes
+api-tests = node features-openshift features-kubernetes nginx elasticsearch active-standby-openshift active-standby-kubernetes node-mongodb
# All drupal tests
-drupal-tests = drupal drupal-postgres
-drupal-dependencies = build/varnish-drupal build/solr__5.5-drupal build/nginx-drupal build/redis build/php__7.2-cli-drupal build/php__7.3-cli-drupal build/php__7.4-cli-drupal build/postgres-drupal build/mariadb-drupal
+drupal-tests = drupal-php72 drupal-php73 drupal-php74 drupal-postgres
# These targets are used as dependencies to bring up containers in the right order.
.PHONY: main-test-services-up
@@ -674,7 +455,7 @@ $(openshift-run-api-tests): minishift build/oc-build-deploy-dind openshift-test-
openshift-run-drupal-tests = $(foreach image,$(drupal-tests),openshift-tests/$(image))
.PHONY: $(openshift-run-drupal-tests)
-$(openshift-run-drupal-tests): minishift build/oc-build-deploy-dind $(drupal-dependencies) openshift-test-services-up drupaltest-services-up push-minishift
+$(openshift-run-drupal-tests): minishift build/oc-build-deploy-dind openshift-test-services-up drupaltest-services-up push-minishift
$(eval testname = $(subst openshift-tests/,,$@))
IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility run --rm tests-openshift ansible-playbook /ansible/tests/$(testname).yaml
@@ -684,24 +465,8 @@ $(openshift-run-webhook-tests): minishift build/oc-build-deploy-dind openshift-t
$(eval testname = $(subst openshift-tests/,,$@))
IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility run --rm tests-openshift ansible-playbook /ansible/tests/$(testname).yaml
-end2end-all-tests = $(foreach image,$(all-tests-list),end2end-tests/$(image))
-
-.PHONY: end2end-tests
-end2end-tests: $(end2end-all-tests)
-
-.PHONY: start-end2end-ansible
-start-end2end-ansible: build/tests
- docker-compose -f docker-compose.yaml -f docker-compose.end2end.yaml -p end2end --compatibility up -d tests
-
-$(end2end-all-tests): start-end2end-ansible
- $(eval testname = $(subst end2end-tests/,,$@))
- docker exec -i $$(docker-compose -f docker-compose.yaml -f docker-compose.end2end.yaml -p end2end ps -q tests) ansible-playbook /ansible/tests/$(testname).yaml
-
-end2end-tests/clean:
- docker-compose -f docker-compose.yaml -f docker-compose.end2end.yaml -p end2end --compatibility down -v
-
# push command of our base images into minishift
-push-minishift-images = $(foreach image,$(base-images) $(base-images-with-versions),[push-minishift]-$(image))
+push-minishift-images = $(foreach image,$(base-images),[push-minishift]-$(image))
# tag and push all images
.PHONY: push-minishift
push-minishift: minishift/login-docker-registry $(push-minishift-images)
@@ -726,129 +491,112 @@ lagoon-kickstart: $(foreach image,$(deployment-test-services-rest),build/$(image
curl -X POST -H "Content-Type: application/json" --data 'mutation { deployEnvironmentBranch(input: { project: { name: "lagoon" }, branchName: "master" } )}' http://localhost:3000/graphql
make logs
-# Start only the local Harbor for testing purposes
-local-harbor: build/harbor-core build/harbor-database build/harbor-jobservice build/harbor-portal build/harbor-nginx build/harbor-redis build/harborregistry build/harborregistryctl build/harbor-trivy build/local-minio
- IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d harbor-core harbor-database harbor-jobservice harbor-portal harbor-nginx harbor-redis harborregistry harborregistryctl harbor-trivy local-minio
+#######
+####### Publishing Images
+#######
+####### All main&PR images are pushed to testlagoon repository
+#######
-# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
-publish-amazeeio-baseimages = $(foreach image,$(base-images),[publish-amazeeio-baseimages]-$(image))
-publish-amazeeio-baseimages-with-versions = $(foreach image,$(base-images-with-versions),[publish-amazeeio-baseimages-with-versions]-$(image))
+# Publish command to testlagoon docker hub, done on any main branch or PR
+publish-testlagoon-baseimages = $(foreach image,$(base-images),[publish-testlagoon-baseimages]-$(image))
# tag and push all images
-.PHONY: publish-amazeeio-baseimages
-publish-amazeeio-baseimages: $(publish-amazeeio-baseimages) $(publish-amazeeio-baseimages-with-versions)
+.PHONY: publish-testlagoon-baseimages
+publish-testlagoon-baseimages: $(publish-testlagoon-baseimages)
# tag and push of each image
-.PHONY: $(publish-amazeeio-baseimages)
-$(publish-amazeeio-baseimages):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeio-baseimages]-' first
- $(eval image = $(subst [publish-amazeeio-baseimages]-,,$@))
-# Publish images as :latest
- $(call docker_publish_amazeeio,$(image),$(image):latest)
+.PHONY: $(publish-testlagoon-baseimages)
+$(publish-testlagoon-baseimages):
+# Calling docker_publish for image, but remove the prefix '[publish-testlagoon-baseimages]-' first
+ $(eval image = $(subst [publish-testlagoon-baseimages]-,,$@))
# Publish images with version tag
- $(call docker_publish_amazeeio,$(image),$(image):$(LAGOON_VERSION))
+ $(call docker_publish_testlagoon,$(image),$(image):$(BRANCH_NAME))
-# tag and push of base image with version
-.PHONY: $(publish-amazeeio-baseimages-with-versions)
-$(publish-amazeeio-baseimages-with-versions):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeio-baseimages-with-versions]-' first
- $(eval image = $(subst [publish-amazeeio-baseimages-with-versions]-,,$@))
-# The underline is a placeholder for a colon, replace that
- $(eval image = $(subst __,:,$(image)))
-# These images already use a tag to differentiate between different versions of the service itself (like node:9 and node:10)
-# We push a version without the `-latest` suffix
- $(call docker_publish_amazeeio,$(image),$(image))
-# Plus a version with the `-latest` suffix, this makes it easier for people with automated testing
- $(call docker_publish_amazeeio,$(image),$(image)-latest)
-# We add the Lagoon Version just as a dash
- $(call docker_publish_amazeeio,$(image),$(image)-$(LAGOON_VERSION))
+# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
+publish-testlagoon-serviceimages = $(foreach image,$(service-images),[publish-testlagoon-serviceimages]-$(image))
+# tag and push all images
+.PHONY: publish-testlagoon-serviceimages
+publish-testlagoon-serviceimages: $(publish-testlagoon-serviceimages)
+# tag and push of each image
+.PHONY: $(publish-testlagoon-serviceimages)
+$(publish-testlagoon-serviceimages):
+# Calling docker_publish for image, but remove the prefix '[publish-testlagoon-serviceimages]-' first
+ $(eval image = $(subst [publish-testlagoon-serviceimages]-,,$@))
+# Publish images with version tag
+ $(call docker_publish_testlagoon,$(image),$(image):$(BRANCH_NAME))
# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
-publish-amazeeiolagoon-baseimages = $(foreach image,$(base-images),[publish-amazeeiolagoon-baseimages]-$(image))
-publish-amazeeiolagoon-baseimages-with-versions = $(foreach image,$(base-images-with-versions),[publish-amazeeiolagoon-baseimages-with-versions]-$(image))
+publish-testlagoon-taskimages = $(foreach image,$(task-images),[publish-testlagoon-taskimages]-$(image))
# tag and push all images
-.PHONY: publish-amazeeiolagoon-baseimages
-publish-amazeeiolagoon-baseimages: $(publish-amazeeiolagoon-baseimages) $(publish-amazeeiolagoon-baseimages-with-versions)
-
+.PHONY: publish-testlagoon-taskimages
+publish-testlagoon-taskimages: $(publish-testlagoon-taskimages)
# tag and push of each image
-.PHONY: $(publish-amazeeiolagoon-baseimages)
-$(publish-amazeeiolagoon-baseimages):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeiolagoon-baseimages]-' first
- $(eval image = $(subst [publish-amazeeiolagoon-baseimages]-,,$@))
+.PHONY: $(publish-testlagoon-taskimages)
+$(publish-testlagoon-taskimages):
+# Calling docker_publish for image, but remove the prefix '[publish-testlagoon-taskimages]-' first
+ $(eval image = $(subst [publish-testlagoon-taskimages]-,,$@))
# Publish images with version tag
- $(call docker_publish_amazeeiolagoon,$(image),$(image):$(BRANCH_NAME))
+ $(call docker_publish_testlagoon,$(image),$(image):$(BRANCH_NAME))
-# tag and push of base image with version
-.PHONY: $(publish-amazeeiolagoon-baseimages-with-versions)
-$(publish-amazeeiolagoon-baseimages-with-versions):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeiolagoon-baseimages-with-versions]-' first
- $(eval image = $(subst [publish-amazeeiolagoon-baseimages-with-versions]-,,$@))
-# The underline is a placeholder for a colon, replace that
- $(eval image = $(subst __,:,$(image)))
-# We add the Lagoon Version just as a dash
- $(call docker_publish_amazeeiolagoon,$(image),$(image)-$(BRANCH_NAME))
+#######
+####### All tagged releases are pushed to uselagoon repository with new semantic tags
+#######
+# Publish command to uselagoon docker hub, only done on tags
+publish-uselagoon-baseimages = $(foreach image,$(base-images),[publish-uselagoon-baseimages]-$(image))
-# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
-publish-amazeeiolagoon-serviceimages = $(foreach image,$(service-images),[publish-amazeeiolagoon-serviceimages]-$(image))
# tag and push all images
-.PHONY: publish-amazeeiolagoon-serviceimages
-publish-amazeeiolagoon-serviceimages: $(publish-amazeeiolagoon-serviceimages)
-
+.PHONY: publish-uselagoon-baseimages
+publish-uselagoon-baseimages: $(publish-uselagoon-baseimages)
# tag and push of each image
-.PHONY: $(publish-amazeeiolagoon-serviceimages)
-$(publish-amazeeiolagoon-serviceimages):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeiolagoon-serviceimages]-' first
- $(eval image = $(subst [publish-amazeeiolagoon-serviceimages]-,,$@))
+.PHONY: $(publish-uselagoon-baseimages)
+$(publish-uselagoon-baseimages):
+# Calling docker_publish for image, but remove the prefix '[publish-uselagoon-baseimages]-' first
+ $(eval image = $(subst [publish-uselagoon-baseimages]-,,$@))
+# Publish images as :latest
+ $(call docker_publish_uselagoon,$(image),$(image):latest)
# Publish images with version tag
- $(call docker_publish_amazeeiolagoon,$(image),$(image):$(BRANCH_NAME))
+ $(call docker_publish_uselagoon,$(image),$(image):$(LAGOON_VERSION))
# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
-publish-amazeeiolagoon-taskimages = $(foreach image,$(task-images),[publish-amazeeiolagoon-taskimages]-$(image))
+publish-uselagoon-serviceimages = $(foreach image,$(service-images),[publish-uselagoon-serviceimages]-$(image))
# tag and push all images
-.PHONY: publish-amazeeiolagoon-taskimages
-publish-amazeeiolagoon-taskimages: $(publish-amazeeiolagoon-taskimages)
-
+.PHONY: publish-uselagoon-serviceimages
+publish-uselagoon-serviceimages: $(publish-uselagoon-serviceimages)
# tag and push of each image
-.PHONY: $(publish-amazeeiolagoon-taskimages)
-$(publish-amazeeiolagoon-taskimages):
-# Calling docker_publish for image, but remove the prefix '[publish-amazeeiolagoon-taskimages]-' first
- $(eval image = $(subst [publish-amazeeiolagoon-taskimages]-,,$@))
+.PHONY: $(publish-uselagoon-serviceimages)
+$(publish-uselagoon-serviceimages):
+# Calling docker_publish for image, but remove the prefix '[publish-uselagoon-serviceimages]-' first
+ $(eval image = $(subst [publish-uselagoon-serviceimages]-,,$@))
+# Publish images as :latest
+ $(call docker_publish_uselagoon,$(image),$(image):latest)
# Publish images with version tag
- $(call docker_publish_amazeeiolagoon,$(image),$(image):$(BRANCH_NAME))
+ $(call docker_publish_uselagoon,$(image),$(image):$(LAGOON_VERSION))
-s3-save = $(foreach image,$(s3-images),[s3-save]-$(image))
-# save all images to s3
-.PHONY: s3-save
-s3-save: $(s3-save)
-# tag and push of each image
-.PHONY: $(s3-save)
-$(s3-save):
-# remove the prefix '[s3-save]-' first
- $(eval image = $(subst [s3-save]-,,$@))
- $(eval image = $(subst __,:,$(image)))
- docker save $(CI_BUILD_TAG)/$(image) $$(docker history -q $(CI_BUILD_TAG)/$(image) | grep -v missing) | gzip -9 | aws s3 cp - s3://lagoon-images/$(image).tar.gz
-
-s3-load = $(foreach image,$(s3-images),[s3-load]-$(image))
-# save all images to s3
-.PHONY: s3-load
-s3-load: $(s3-load)
+# Publish command to amazeeio docker hub, this should probably only be done during a master deployments
+publish-uselagoon-taskimages = $(foreach image,$(task-images),[publish-uselagoon-taskimages]-$(image))
+# tag and push all images
+.PHONY: publish-uselagoon-taskimages
+publish-uselagoon-taskimages: $(publish-uselagoon-taskimages)
+
# tag and push of each image
-.PHONY: $(s3-load)
-$(s3-load):
-# remove the prefix '[s3-load]-' first
- $(eval image = $(subst [s3-load]-,,$@))
- $(eval image = $(subst __,:,$(image)))
- curl -s https://s3.us-east-2.amazonaws.com/lagoon-images/$(image).tar.gz | gunzip -c | docker load
+.PHONY: $(publish-uselagoon-taskimages)
+$(publish-uselagoon-taskimages):
+# Calling docker_publish for image, but remove the prefix '[publish-uselagoon-taskimages]-' first
+ $(eval image = $(subst [publish-uselagoon-taskimages]-,,$@))
+# Publish images as :latest
+ $(call docker_publish_uselagoon,$(image),$(image):latest)
+# Publish images with version tag
+ $(call docker_publish_uselagoon,$(image),$(image):$(LAGOON_VERSION))
# Clean all build touches, which will case make to rebuild the Docker Images (Layer caching is
# still active, so this is a very safe command)
@@ -870,8 +618,6 @@ else
IMAGE_REPO=$(CI_BUILD_TAG) \
docker-compose -p $(CI_BUILD_TAG) --compatibility up -d
endif
- grep -m 1 ".opendistro_security index does not exist yet" <(docker-compose -p $(CI_BUILD_TAG) logs -f logs-db 2>&1)
- while ! docker exec "$$(docker-compose -p $(CI_BUILD_TAG) ps -q logs-db)" ./securityadmin_demo.sh; do sleep 5; done
$(MAKE) wait-for-keycloak
down:
@@ -928,7 +674,7 @@ minishift/login-docker-registry: minishift
openshift-lagoon-setup:
# Only use the minishift provided oc if we don't have one yet (allows system engineers to use their own oc)
if ! which oc; then eval $$(./local-dev/minishift/minishift --profile $(CI_BUILD_TAG) oc-env); fi; \
- oc -n default set env dc/router -e ROUTER_LOG_LEVEL=info -e ROUTER_SYSLOG_ADDRESS=router-logs.lagoon.svc:5140; \
+ oc -n default set env dc/router -e ROUTER_LOG_LEVEL=info -e ROUTER_SYSLOG_ADDRESS=router-logs.lagoon.svc:5141; \
oc new-project lagoon; \
oc adm pod-network make-projects-global lagoon; \
oc -n lagoon create serviceaccount openshiftbuilddeploy; \
@@ -941,16 +687,12 @@ openshift-lagoon-setup:
oc -n lagoon create serviceaccount docker-host; \
oc -n lagoon adm policy add-scc-to-user privileged -z docker-host; \
oc -n lagoon policy add-role-to-user edit -z docker-host; \
- oc -n lagoon create serviceaccount logs-collector; \
- oc -n lagoon adm policy add-cluster-role-to-user cluster-reader -z logs-collector; \
- oc -n lagoon adm policy add-scc-to-user hostaccess -z logs-collector; \
- oc -n lagoon adm policy add-scc-to-user privileged -z logs-collector; \
oc -n lagoon adm policy add-cluster-role-to-user daemonset-admin -z lagoon-deployer; \
oc -n lagoon create serviceaccount lagoon-deployer; \
oc -n lagoon policy add-role-to-user edit -z lagoon-deployer; \
oc -n lagoon create -f openshift-setup/clusterrole-daemonset-admin.yaml; \
oc -n lagoon adm policy add-cluster-role-to-user daemonset-admin -z lagoon-deployer; \
- bash -c "oc process -n lagoon -f services/docker-host/docker-host.yaml | oc -n lagoon apply -f -"; \
+ bash -c "oc process -n lagoon -f openshift-setup/docker-host.yaml | oc -n lagoon apply -f -"; \
oc -n lagoon create -f openshift-setup/dbaas-roles.yaml; \
oc -n dbaas-operator-system create -f openshift-setup/dbaas-operator.yaml; \
oc -n lagoon create -f openshift-setup/dbaas-providers.yaml; \
@@ -1022,21 +764,20 @@ ifeq ($(KUBECTL_VERSION), $(shell kubectl version --short --client 2>/dev/null |
ln -s $(shell command -v kubectl) ./local-dev/kubectl
else
$(info downloading kubectl version $(KUBECTL_VERSION) for $(ARCH))
- curl -Lo local-dev/kubectl https://storage.googleapis.com/kubernetes-release/release/$(KUBECTL_VERSION)/bin/$(ARCH)/amd64/kubectl
+ curl -sSLo local-dev/kubectl https://storage.googleapis.com/kubernetes-release/release/$(KUBECTL_VERSION)/bin/$(ARCH)/amd64/kubectl
chmod a+x local-dev/kubectl
endif
# Symlink the installed helm client if the correct version is already
# installed, otherwise downloads it.
-local-dev/helm/helm:
- @mkdir -p ./local-dev/helm
-ifeq ($(HELM_VERSION), $(shell helm version --short --client 2>/dev/null | sed -E 's/v([0-9.]+).*/\1/'))
+local-dev/helm:
+ifeq ($(HELM_VERSION), $(shell helm version --short --client 2>/dev/null | sed -nE 's/v([0-9.]+).*/\1/p'))
$(info linking local helm version $(HELM_VERSION))
ln -s $(shell command -v helm) ./local-dev/helm
else
$(info downloading helm version $(HELM_VERSION) for $(ARCH))
- curl -L https://get.helm.sh/helm-$(HELM_VERSION)-$(ARCH)-amd64.tar.gz | tar xzC local-dev/helm --strip-components=1
- chmod a+x local-dev/helm/helm
+ curl -sSL https://get.helm.sh/helm-$(HELM_VERSION)-$(ARCH)-amd64.tar.gz | tar -xzC local-dev --strip-components=1 $(ARCH)-amd64/helm
+ chmod a+x local-dev/helm
endif
ifeq ($(DOCKER_DRIVER), btrfs)
@@ -1046,7 +787,7 @@ else
K3D_BTRFS_VOLUME :=
endif
-k3d: local-dev/k3d local-dev/kubectl local-dev/helm/helm build/docker-host
+k3d: local-dev/k3d local-dev/kubectl local-dev/helm build/docker-host
$(MAKE) local-registry-up
$(MAKE) broker-up
$(info starting k3d with name $(K3D_NAME))
@@ -1061,33 +802,43 @@ endif
--image docker.io/rancher/k3s:$(K3S_VERSION) \
--volume $$PWD/local-dev/k3d-registries.yaml:/etc/rancher/k3s/registries.yaml \
$(K3D_BTRFS_VOLUME) \
- -x --no-deploy=traefik \
- --volume $$PWD/local-dev/k3d-nginx-ingress.yaml:/var/lib/rancher/k3s/server/manifests/k3d-nginx-ingress.yaml
+ -x --no-deploy=traefik
echo "$(K3D_NAME)" > $@
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" apply -f $$PWD/local-dev/k3d-storageclass-bulk.yaml; \
docker tag $(CI_BUILD_TAG)/docker-host localhost:5000/lagoon/docker-host; \
docker push localhost:5000/lagoon/docker-host; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace nginx-ingress; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add stable https://charts.helm.sh/stable; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n nginx-ingress nginx stable/nginx-ingress; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace k8up; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add appuio https://charts.appuio.ch; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n k8up k8up appuio/k8up; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add appuio https://charts.appuio.ch; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n k8up k8up appuio/k8up; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dioscuri; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dioscuri https://raw.githubusercontent.com/amazeeio/dioscuri/ingress/charts ; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dioscuri dioscuri dioscuri/dioscuri ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dioscuri https://raw.githubusercontent.com/amazeeio/dioscuri/main/charts ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dioscuri dioscuri dioscuri/dioscuri ; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace dbaas-operator; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/master/charts ; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add dbaas-operator https://raw.githubusercontent.com/amazeeio/dbaas-operator/main/charts ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator dbaas-operator dbaas-operator/dbaas-operator ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator mariadbprovider dbaas-operator/mariadbprovider -f local-dev/helm-values-mariadbprovider.yml ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n dbaas-operator mongodbprovider dbaas-operator/mongodbprovider -f local-dev/helm-values-mongodbprovider.yml ; \
+ local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace harbor; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add harbor https://helm.goharbor.io ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n harbor harbor harbor/harbor -f local-dev/helm-values-harbor.yml ; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace lagoon-builddeploy; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add lagoon-builddeploy https://raw.githubusercontent.com/amazeeio/lagoon-kbd/main/charts ; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon-builddeploy lagoon-builddeploy lagoon-builddeploy/lagoon-builddeploy \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add lagoon-builddeploy https://raw.githubusercontent.com/amazeeio/lagoon-kbd/main/charts ; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon-builddeploy lagoon-builddeploy lagoon-builddeploy/lagoon-builddeploy \
--set vars.lagoonTargetName=ci-local-control-k8s \
--set vars.rabbitPassword=guest \
--set vars.rabbitUsername=guest \
--set vars.rabbitHostname=172.17.0.1:5672; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' create namespace lagoon; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add lagoon https://uselagoon.github.io/lagoon-charts/; \
- local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon lagoon-remote lagoon/lagoon-remote --set dockerHost.image.name=172.17.0.1:5000/lagoon/docker-host --set dockerHost.registry=172.17.0.1:5000; \
+ local-dev/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' repo add lagoon https://uselagoon.github.io/lagoon-charts/; \
+ local-dev/helm/helm --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --kube-context='$(K3D_NAME)' upgrade --install -n lagoon lagoon-remote lagoon/lagoon-remote \
+ --set dioscuri.enabled=false \
+ --set dockerHost.image.repository=172.17.0.1:5000/lagoon/docker-host \
+ --set dockerHost.image.tag=latest \
+ --set dockerHost.registry=172.17.0.1:5000; \
local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon rollout status deployment lagoon-remote-docker-host -w;
ifeq ($(ARCH), darwin)
export KUBECONFIG="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')"; \
@@ -1137,7 +888,7 @@ k8s-dashboard:
kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc2/aio/deploy/recommended.yaml; \
kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n kubernetes-dashboard rollout status deployment kubernetes-dashboard -w; \
echo -e "\nUse this token:"; \
- kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \
+ kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon describe secret $$(local-dev/kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' -n lagoon get secret | grep kubernetes-build-deploy | awk '{print $$1}') | grep token: | awk '{print $$2}'; \
open http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ ; \
kubectl --kubeconfig="$$(./local-dev/k3d get-kubeconfig --name='$(K3D_NAME)')" --context='$(K3D_NAME)' proxy
@@ -1167,14 +918,14 @@ k3d/cleanall: k3d/stopall
.PHONY: kubernetes-lagoon-setup
kubernetes-lagoon-setup:
kubectl create namespace lagoon; \
- local-dev/helm/helm repo add lagoon https://uselagoon.github.io/lagoon-charts/; \
- local-dev/helm/helm upgrade --install -n lagoon lagoon-remote lagoon/lagoon-remote; \
+ local-dev/helm repo add lagoon https://uselagoon.github.io/lagoon-charts/; \
+ local-dev/helm upgrade --install -n lagoon lagoon-remote lagoon/lagoon-remote; \
echo -e "\n\nAll Setup, use this token as described in the Lagoon Install Documentation:";
$(MAKE) kubernetes-get-kubernetesbuilddeploy-token
.PHONY: kubernetes-get-kubernetesbuilddeploy-token
kubernetes-get-kubernetesbuilddeploy-token:
- kubectl -n lagoon describe secret $$(kubectl -n lagoon get secret | grep kubernetesbuilddeploy | awk '{print $$1}') | grep token: | awk '{print $$2}'
+ kubectl -n lagoon describe secret $$(kubectl -n lagoon get secret | grep kubernetes-build-deploy | awk '{print $$1}') | grep token: | awk '{print $$2}'
.PHONY: rebuild-push-oc-build-deploy-dind
rebuild-push-oc-build-deploy-dind:
@@ -1190,3 +941,192 @@ ui-development: build/api build/api-db build/local-api-data-watcher-pusher build
.PHONY: api-development
api-development: build/api build/api-db build/local-api-data-watcher-pusher build/keycloak build/keycloak-db build/broker build/broker-single build/api-redis
IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher keycloak keycloak-db broker api-redis
+
+## CI targets
+
+KIND_VERSION = v0.10.0
+GOJQ_VERSION = v0.11.2
+KIND_IMAGE = kindest/node:v1.20.2@sha256:8f7ea6e7642c0da54f04a7ee10431549c0257315b3a634f6ef2fecaaedb19bab
+TESTS = [api,features-kubernetes,nginx,drupal-php73,drupal-php74,drupal-postgres,python,gitlab,github,bitbucket,node-mongodb,elasticsearch]
+CHARTS_TREEISH = main
+
+local-dev/kind:
+ifeq ($(KIND_VERSION), $(shell kind version 2>/dev/null | sed -nE 's/kind (v[0-9.]+).*/\1/p'))
+ $(info linking local kind version $(KIND_VERSION))
+ ln -s $(shell command -v kind) ./local-dev/kind
+else
+ $(info downloading kind version $(KIND_VERSION) for $(ARCH))
+ curl -sSLo local-dev/kind https://github.com/kubernetes-sigs/kind/releases/download/$(KIND_VERSION)/kind-$(ARCH)-amd64
+ chmod a+x local-dev/kind
+endif
+
+local-dev/jq:
+ifeq ($(GOJQ_VERSION), $(shell jq -v 2>/dev/null | sed -nE 's/gojq ([0-9.]+).*/v\1/p'))
+ $(info linking local jq version $(KIND_VERSION))
+ ln -s $(shell command -v jq) ./local-dev/jq
+else
+ $(info downloading gojq version $(GOJQ_VERSION) for $(ARCH))
+ifeq ($(ARCH), darwin)
+ TMPDIR=$$(mktemp -d) \
+ && curl -sSL https://github.com/itchyny/gojq/releases/download/$(GOJQ_VERSION)/gojq_$(GOJQ_VERSION)_$(ARCH)_amd64.zip -o $$TMPDIR/gojq.zip \
+ && (cd $$TMPDIR && unzip gojq.zip) && cp $$TMPDIR/gojq_$(GOJQ_VERSION)_$(ARCH)_amd64/gojq ./local-dev/jq && rm -rf $$TMPDIR
+else
+ curl -sSL https://github.com/itchyny/gojq/releases/download/$(GOJQ_VERSION)/gojq_$(GOJQ_VERSION)_$(ARCH)_amd64.tar.gz | tar -xzC local-dev --strip-components=1 gojq_$(GOJQ_VERSION)_$(ARCH)_amd64/gojq
+ mv ./local-dev/{go,}jq
+endif
+ chmod a+x local-dev/jq
+endif
+
+.PHONY: helm/repos
+helm/repos: local-dev/helm
+ # install repo dependencies required by the charts
+ ./local-dev/helm repo add harbor https://helm.goharbor.io
+ ./local-dev/helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
+ ./local-dev/helm repo add stable https://charts.helm.sh/stable
+ ./local-dev/helm repo add bitnami https://charts.bitnami.com/bitnami
+ ./local-dev/helm repo add amazeeio https://amazeeio.github.io/charts/
+ ./local-dev/helm repo add lagoon https://uselagoon.github.io/lagoon-charts/
+ ./local-dev/helm repo update
+
+# stand up a kind cluster configured appropriately for lagoon testing
+.PHONY: kind/cluster
+kind/cluster: local-dev/kind
+ ./local-dev/kind get clusters | grep -q "$(CI_BUILD_TAG)" && exit; \
+ docker network create kind || true \
+ && export KUBECONFIG=$$(mktemp) \
+ KINDCONFIG=$$(mktemp ./kindconfig.XXX) \
+ KIND_NODE_IP=$$(docker run --rm --network kind alpine ip -o addr show eth0 | sed -nE 's/.* ([0-9.]{7,})\/.*/\1/p') \
+ && chmod 644 $$KUBECONFIG \
+ && curl -sSLo $$KINDCONFIG.tpl https://raw.githubusercontent.com/uselagoon/lagoon-charts/$(CHARTS_TREEISH)/test-suite.kind-config.yaml.tpl \
+ && envsubst < $$KINDCONFIG.tpl > $$KINDCONFIG \
+ && echo ' [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]' >> $$KINDCONFIG \
+ && echo ' endpoint = ["https://imagecache.amazeeio.cloud", "https://index.docker.io/v1/"]' >> $$KINDCONFIG \
+ && echo 'nodes:' >> $$KINDCONFIG \
+ && echo '- role: control-plane' >> $$KINDCONFIG \
+ && echo ' image: $(KIND_IMAGE)' >> $$KINDCONFIG \
+ && echo ' extraMounts:' >> $$KINDCONFIG \
+ && echo ' - containerPath: /var/lib/kubelet/config.json' >> $$KINDCONFIG \
+ && echo ' hostPath: $(HOME)/.docker/config.json' >> $$KINDCONFIG \
+ && echo ' - containerPath: /lagoon/services' >> $$KINDCONFIG \
+ && echo ' hostPath: ./services' >> $$KINDCONFIG \
+ && echo ' readOnly: false' >> $$KINDCONFIG \
+ && echo ' - containerPath: /lagoon/node-packages' >> $$KINDCONFIG \
+ && echo ' hostPath: ./node-packages' >> $$KINDCONFIG \
+ && echo ' readOnly: false' >> $$KINDCONFIG \
+ && KIND_CLUSTER_NAME="$(CI_BUILD_TAG)" ./local-dev/kind create cluster --config=$$KINDCONFIG \
+ && cp $$KUBECONFIG "kubeconfig.kind.$(CI_BUILD_TAG)" \
+ && echo -e 'Interact with the cluster during the test run in Jenkins like so:\n' \
+ && echo "export KUBECONFIG=\$$(mktemp) && scp $$NODE_NAME:$$KUBECONFIG \$$KUBECONFIG && KIND_PORT=\$$(sed -nE 's/.+server:.+:([0-9]+)/\1/p' \$$KUBECONFIG) && ssh -fNL \$$KIND_PORT:127.0.0.1:\$$KIND_PORT $$NODE_NAME" \
+ && echo -e '\nOr running locally:\n' \
+ && echo -e './local-dev/kind export kubeconfig --name "$(CI_BUILD_TAG)"\n' \
+ && echo -e 'kubectl ...\n'
+ifeq ($(ARCH), darwin)
+ export KUBECONFIG="$$(pwd)/kubeconfig.kind.$(CI_BUILD_TAG)" && \
+ if ! ifconfig lo0 | grep $$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}') -q; then sudo ifconfig lo0 alias $$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}'); fi
+ docker rm --force $(CI_BUILD_TAG)-kind-proxy-32080 || true
+ docker run -d --name $(CI_BUILD_TAG)-kind-proxy-32080 \
+ --publish 32080:32080 \
+ --link $(CI_BUILD_TAG)-control-plane:target --network kind \
+ alpine/socat -dd \
+ tcp-listen:32080,fork,reuseaddr tcp-connect:target:32080
+endif
+
+KIND_SERVICES = api api-db api-redis auth-server broker controllerhandler docker-host drush-alias keycloak keycloak-db webhook-handler webhooks2tasks kubectl-build-deploy-dind local-api-data-watcher-pusher local-git ssh tests ui
+KIND_TESTS = local-api-data-watcher-pusher local-git tests
+KIND_TOOLS = kind helm kubectl jq
+
+# install lagoon charts and run lagoon test suites in a kind cluster
+.PHONY: kind/test
+kind/test: kind/cluster helm/repos $(addprefix local-dev/,$(KIND_TOOLS)) $(addprefix build/,$(KIND_SERVICES))
+ export CHARTSDIR=$$(mktemp -d ./lagoon-charts.XXX) \
+ && ln -sfn "$$CHARTSDIR" lagoon-charts.kind.lagoon \
+ && git clone https://github.com/uselagoon/lagoon-charts.git "$$CHARTSDIR" \
+ && cd "$$CHARTSDIR" \
+ && git checkout $(CHARTS_TREEISH) \
+ && export KUBECONFIG="$$(realpath ../kubeconfig.kind.$(CI_BUILD_TAG))" \
+ && export IMAGE_REGISTRY="registry.$$(../local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \
+ && $(MAKE) install-registry HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \
+ && cd .. && $(MAKE) -j6 kind/push-images && cd "$$CHARTSDIR" \
+ && $(MAKE) fill-test-ci-values TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) \
+ HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \
+ JQ=$$(realpath ../local-dev/jq) \
+ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=$$IMAGE_REGISTRY/kubectl-build-deploy-dind:$(SAFE_BRANCH_NAME) \
+ IMAGE_REGISTRY=$$IMAGE_REGISTRY \
+ && sleep 30 \
+ && docker run --rm --network host --name ct-$(CI_BUILD_TAG) \
+ --volume "$$(pwd)/test-suite-run.ct.yaml:/etc/ct/ct.yaml" \
+ --volume "$$(pwd):/workdir" \
+ --volume "$$(realpath ../kubeconfig.kind.$(CI_BUILD_TAG)):/root/.kube/config" \
+ --workdir /workdir \
+ "quay.io/helmpack/chart-testing:v3.3.1" \
+ ct install
+
+LOCAL_DEV_SERVICES = api auth-server controllerhandler logs2email logs2microsoftteams logs2rocketchat logs2slack ui webhook-handler webhooks2tasks
+
+# kind/local-dev-patch will build the services in LOCAL_DEV_SERVICES on your machine, and then use kubectl patch to mount the folders into Kubernetes
+# the deployments should be restarted to trigger any updated code changes
+# `kubectl rollout undo deployment` can be used to rollback a deployment to before the annotated patch
+# ensure that the correct version of Node to build the services is set on your machine
+.PHONY: kind/local-dev-patch
+kind/local-dev-patch:
+ export KUBECONFIG="$$(pwd)/kubeconfig.kind.$(CI_BUILD_TAG)" && \
+ export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \
+ && for image in $(LOCAL_DEV_SERVICES); do \
+ echo "building $$image" \
+ && cd services/$$image && yarn install && yarn build && cd ../..; \
+ done \
+ && for image in $(LOCAL_DEV_SERVICES); do \
+ echo "patching lagoon-core-$$image" \
+ && ./local-dev/kubectl --namespace lagoon patch deployment lagoon-core-$$image --patch-file ./local-dev/kubectl-patches/$$image.yaml; \
+ done
+
+# kind/dev can only be run once a cluster is up and running (run kind/test first) - it doesn't rebuild the cluster at all, just pushes the built images
+# into the image registry and reinstalls the lagoon-core helm chart.
+.PHONY: kind/dev
+kind/dev: $(addprefix build/,$(KIND_SERVICES))
+ export KUBECONFIG="$$(realpath ./kubeconfig.kind.$(CI_BUILD_TAG))" \
+ && export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \
+ && $(MAKE) kind/push-images && cd lagoon-charts.kind.lagoon \
+ && $(MAKE) install-lagoon-core IMAGE_TAG=$(SAFE_BRANCH_NAME) \
+ HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \
+ JQ=$$(realpath ../local-dev/jq) \
+ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=$$IMAGE_REGISTRY/kubectl-build-deploy-dind:$(SAFE_BRANCH_NAME) \
+ IMAGE_REGISTRY=$$IMAGE_REGISTRY
+
+.PHONY: kind/push-images
+kind/push-images:
+ export KUBECONFIG="$$(pwd)/kubeconfig.kind.$(CI_BUILD_TAG)" && \
+ export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \
+ && docker login -u admin -p Harbor12345 $$IMAGE_REGISTRY \
+ && for image in $(KIND_SERVICES); do \
+ docker tag $(CI_BUILD_TAG)/$$image $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME) \
+ && docker push $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME); \
+ done
+
+## Use kind/retest to only perform a push of the local-dev, or test images, and run the tests
+## It preserves the last build lagoon core&remote setup, reducing rebuild time
+.PHONY: kind/retest
+kind/retest:
+ export KUBECONFIG="$$(pwd)/kubeconfig.kind.$(CI_BUILD_TAG)" && \
+ export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \
+ && docker login -u admin -p Harbor12345 $$IMAGE_REGISTRY \
+ && for image in $(KIND_TESTS); do \
+ docker tag $(CI_BUILD_TAG)/$$image $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME) \
+ && docker push $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME); \
+ done \
+ && cd lagoon-charts.kind.lagoon \
+ && $(MAKE) install-tests TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) \
+ HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \
+ JQ=$$(realpath ../local-dev/jq) \
+ IMAGE_REGISTRY=$$IMAGE_REGISTRY \
+ && docker run --rm --network host --name ct-$(CI_BUILD_TAG) \
+ --volume "$$(pwd)/test-suite-run.ct.yaml:/etc/ct/ct.yaml" \
+ --volume "$$(pwd):/workdir" \
+ --volume "$$(realpath ../kubeconfig.kind.$(CI_BUILD_TAG)):/root/.kube/config" \
+ --workdir /workdir \
+ "quay.io/helmpack/chart-testing:v3.3.1" \
+ ct install
+
+.PHONY: kind/clean
+kind/clean: local-dev/kind
+ KIND_CLUSTER_NAME="$(CI_BUILD_TAG)" ./local-dev/kind delete cluster
diff --git a/README.md b/README.md
index 4f7645b6fe..18e92042e9 100644
--- a/README.md
+++ b/README.md
@@ -1,31 +1,158 @@
-
+
-# amazee.io Lagoon - Docker Build and Deploy System for OpenShift & Kubernetes
+# Lagoon - the developer-focused application delivery platform for Kubernetes
+
+Lagoon solves what developers are dreaming about: A system that allows developers to locally develop their code and their services with Docker and run the exact same system in production. The same container images, the same service configurations and the same code.
+
+> Lagoon is an application delivery **platform**. Its primary focus is as a cloud-native tool for the deployment, management, security and operation of many applications. Lagoon greatly reduces the requirement on developers of those applications to have cloud-native experience or knowledge.
+
+Lagoon is fully open-source, built on open-source tools, built collaboratively with our users.
+
+## Installing Lagoon
+
+*Note that is not necessary to install Lagoon on to your local machine if you are looking to maintain websites hosted on Lagoon.*
+
+Lagoon can be installed:
+
+- Locally (for evaluation, testing, debugging or development) using [Helm](https://helm.sh/) charts and [kind](https://kind.sigs.k8s.io/)
+- Into your managed Kubernetes cloud provider of choice - it's running in production on [Amazon Elastic Kubernetes Service](https://aws.amazon.com/eks/), [Azure Kubernetes Service](https://azure.microsoft.com/en-au/services/kubernetes-service/), and [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), but we are confident that it will also run on any other managed Kubernetes service.
+
+For more information on developing or contributing to Lagoon, head to https://docs.lagoon.sh/lagoon/contributing-to-lagoon
+
+For more information on installing and administering Lagoon, head to https://docs.lagoon.sh/lagoon/administering-lagoon
+
+## Lagoon architecture
+
+Lagoon comprises two main components: Lagoon Core and Lagoon Remote. It's also built on several other third-party services, Operators and Controllers. In a full production setting, we recommend installing Lagoon Core and Remote into different Kubernetes Clusters. A single Lagoon Core installation is capable of serving multiple Remotes, but they can also be installed into the same cluster if preferred.
+
+To enhance security, Lagoon Core does not need administrator-level access to the Kubernetes clusters that are running Lagoon Remote. All inter-cluster communication happens only via RabbitMQ. This is hosted in Lagoon Core, and consumed (and published back to) by Lagoon Remote. This allows Lagoon Remotes to be managed by different teams, in different locations - even behind firewalls or inaccessible from the internet.
+
+Lagoon services are mostly built in Node.js. More recent development occurs in Go, and most of the automation and scripting components are in Bash.
+
+### Lagoon Core
+
+All the services that handle the API, authentication and external communication are installed here. Installation is via a [Helm Chart](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-core)
+- API
+ - [api](https://github.com/amazeeio/lagoon/tree/main/services/api) (the GraphQL API that powers Lagoon)
+ - [api-db](https://github.com/amazeeio/lagoon/tree/main/services/api-db) (the MariaDB storage for the API)
+ - [api-redis](https://github.com/amazeeio/lagoon/tree/main/services/api-redis) (the cache layer for API queries)
+- Authentication
+ - [keycloak](https://github.com/amazeeio/lagoon/tree/main/services/keycloak) (the main authentication application)
+ - [keycloak-db](https://github.com/amazeeio/lagoon/tree/main/services/keycloak-db) (the MariaDB storage for Keycloak)
+ - [auth-server](https://github.com/amazeeio/lagoon/tree/main/services/auth-server) (generates authentication tokens for Lagoon services)
+ - [ssh](https://github.com/amazeeio/lagoon/tree/main/services/ssh) (provides developers with ssh access to the sites hosted on Lagoon)
+- Messaging
+ - [broker](https://github.com/amazeeio/lagoon/tree/main/services/broker) (the RabbitMQ message service used to communicate with Lagoon Remote)
+ - [webhooks2tasks](https://github.com/amazeeio/lagoon/tree/main/services/webhooks2tasks) (the service that converts incoming webhooks to API updates)
+ - [controllerhandler](https://github.com/amazeeio/lagoon/tree/main/services/controllerhandler) (the service that relays build progress from the controllers)
+- Webhooks
+ - [webhook-handler](https://github.com/amazeeio/lagoon/tree/main/services/webhook-handler) (the external service that Git Repositories and Registries communicate with)
+- Notifications
+ - [logs2email](https://github.com/amazeeio/lagoon/tree/main/services/logs2email) (the service that pushes build notifications to a nominated email address)
+ - [logs2slack](https://github.com/amazeeio/lagoon/tree/main/services/logs2slack) (the service that pushes build notifications to a nominated Slack (or Discord) channel)
+ - [logs2rocketchat](https://github.com/amazeeio/lagoon/tree/main/services/logs2rocketchat) (the service that pushes build notifications to a nominated Rocket Chat channel)
+ - [logs2microsoftteams](https://github.com/amazeeio/lagoon/tree/main/services/logs2microsoftteams) (the service that pushes build notifications to a nominated Microsoft Teams channel)
+- Other Services
+ - [ui](https://github.com/amazeeio/lagoon/tree/main/services/ui) (the main user interface and dashboard for Lagoon)
+ - [lagoon-cli](https://github.com/amazeeio/lagoon-cli) (the command-line interface for managing sites on Lagoon)
+ - [drush-alias](https://github.com/amazeeio/lagoon/tree/main/services/drush-alias) (provides Drupal developers with an automated alias service for Drush)
+
+### Lagoon Remote
+
+All the services that are used to provision, deploy and maintain sites hosted by Lagoon on Kubernetes live here. These services are mostly comprised of third-party tools, developed external to Lagoon itself. Installation is via a [Helm Chart](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-remote)
+
+- [Lagoon Build Deploy](https://github.com/amazeeio/lagoon-kbd) (the controllers that handle building and deploying sites onto Lagoon)
+- [kubectl-build-deploy](https://github.com/amazeeio/lagoon/tree/main/images/kubectl-build-deploy-dind) (the service that computes which services, configuration and settings to provision for Kubernetes)
+- [docker-host](https://github.com/amazeeio/lagoon/tree/main/images/docker-host) (the service that stores and caches upstream docker images for use in builds)
+- [lagoon-idler](https://github.com/amazeeio/lagoon-idler) (optional operator that can idle non-production sites not currently in use to conserve resources)
+- [Dioscuri](https://github.com/amazeeio/dioscuri) (optional operator that provides Active/Standby functionality to Lagoon)
+- [dbaas-operator](https://github.com/amazeeio/dbaas-operator) (optional operator that provisions databases from an underlying managed database)
+
+### Additional Services
+
+These services are usually installed alongside either Lagoon Core or Lagoon Remote to provide additional functionality to Lagoon.
+
+- Registry (required)
+ - [Harbor](https://goharbor.io/) (provides image registry services to Lagoon projects)
+ - [Trivy](https://github.com/aquasecurity/trivy) (scans images for vulnerability, and can report to Lagoon)
+
+- Lagoon Logging (optional)
+ - [lagoon-logging](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-logging) (utilizes [banzaicloud/logging-operator](https://github.com/banzaicloud/logging-operator) to collect and augment container&router logs from all sites, and sends them to a logs-dispatcher)
+ - [logs-dispatcher](https://github.com/amazeeio/lagoon/tree/main/services/logs-dispatcher) (collects application logs from sites, as well as container&router logs from lagoon-logging, enriches them with additional metadata and sends them to a central log concentrator)
+ - [lagoon-logs-concentrator](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-logs-concentrator) (collects logs from remote logs-dispatchers and sends them to Elasticsearch)
+
+- Open Policy Agent (optional)
+
+ - [lagoon-gatekeeper](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-gatekeeper) (centralized policy library for Lagoon)
+
+- Elasticsearch (optional)
+ - [Open Distro for Elasticsearch](https://opendistro.github.io/for-elasticsearch/) (provides centralized log storage, search and analysis)
+ - [Kibana](https://opendistro.github.io/for-elasticsearch-docs/docs/kibana/) (the default user interface for Elasticsearch searching and visualization)
+
+- Managed databases, for use with DBaaS operator (optional)
+ - MariaDB (self managed or via [Amazon RDS for MariaDB](https://aws.amazon.com/rds/mariadb/), [Azure Database for MariaDB](https://docs.microsoft.com/en-us/azure/mariadb/#:~:text=Azure Database for MariaDB is,predictable performance and dynamic scalability.))
+
+ - MySQL (self managed or via [Amazon RDS for MySQL](https://aws.amazon.com/rds/mysql/), [Amazon Aurora MySQL](https://aws.amazon.com/rds/aurora/mysql-features/), [Azure Database for MySQL](https://azure.microsoft.com/en-au/services/mysql), [Cloud SQL for MySQL](https://cloud.google.com/sql/docs/mysql))
+
+ - PostgreSQL (self managed or via [Amazon RDS for PostgreSQL](https://aws.amazon.com/rds/postgresql/), [Amazon Aurora PostgreSQL](https://aws.amazon.com/rds/aurora/postgresql-features/), [Azure Database for PostgreSQL](https://docs.microsoft.com/en-us/azure/postgresql), [Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres) )
+
+ - MongoDB (self managed, or via [Amazon DocumentDB](https://aws.amazon.com/documentdb/), [Azure Cosmos DB](https://azure.microsoft.com/en-au/services/cosmos-db/) )
+
+### Testing
+
+Lagoon has a comprehensive [test suite](https://github.com/amazeeio/lagoon/tree/main/tests/tests), designed to cover most end-user scenarios. The testing is automated in Ansible, and runs in Jenkins, but can also be run locally in a self-contained cluster. The testing provisions a standalone Lagoon cluster, running on Kind (Kubernetes in Docker). This cluster is made of Lagoon Core, Lagoon Remote, an image registry and a set of managed databases. It runs test deployments and scenarios for a range of Node.js, Drupal, Python and NGINX projects, all built using the latest Lagoon images.
+
+
+## Other Lagoon components
+
+Here are a number of other repositories, tools and components used in Lagoon
+
+### [Lagoon Images](https://github.com/uselagoon/lagoon-images)
+
+These images are used by developers to build web applications on, and come preconfigured for running on Lagoon as well as locally. There are php, NGINX, Node.JS, Python (and more) variants. These images are regularly updated, and are not only used in hosted projects, they're used in Lagoon too!
+
+To browse the full set of images, head to https://hub.docker.com/u/uselagoon
+
+### [Lagoon Examples](https://github.com/uselagoon/lagoon-examples)
+
+A meta-project that houses a wide range of example projects, ready-made for use on Lagoon. These projects also include test suites that are used in the testing of the images. Please request an example via that repository if you want to see a particular one, or even better, have a crack at making one!
+
+### [Lagoon Charts](https://github.com/uselagoon/lagoon-charts)
+
+Houses all the Helm Charts used to deploy Lagoon, it comes with a built-in test suite.
+
+To add the repository `helm repo add lagoon https://uselagoon.github.io/lagoon-charts/`
+
+### [amazee.io Charts](https://github.com/amazeeio/charts)
+
+amazee.io has developed a number of tools, charts and operators designed to work with Lagoon and other Kubernetes services.
+
+To add the repository `helm repo add lagoon https://amazeeio.github.io/charts/`
-Lagoon solves what developers are dreaming about: A system that allows developers to locally develop their code and their services with Docker and run the exact same system in production. The same Docker images, the same service configurations and the same code.
-Please reference our [documentation](https://lagoon.readthedocs.io/) for detailed information on using, developing, and administering Lagoon.
## Contribute
-Do you want to contribute to Lagoon? Fabulous! [See our Documentation](https://lagoon.readthedocs.io/en/latest/contributing/) on how to get started.
-
-## Services
-
-This project contains the following services:
-- [UI](/services/ui/)
-- [logs2microsoftteams](/services/logs2microsoftteams/)
-- [logs2rocketchat](/services/logs2rocketchat/)
-- [logs-db-ui](/services/logs-db-ui/)
-- [logs-db](/services/logs-db/)
-- [auth-server](/services/auth-server/)
-- [ssh](/services/ssh/)
-- [logs2slack](/services/logs2slack/)
-- [drush-alias](/services/drush-alias/)
-- [webhooks2tasks](/services/webhooks2tasks/)
-- [logs2email](/services/logs2email/)
-- [backup-handler](/services/backup-handler/)
-- [webhook-handler](/services/webhook-handler/)
-- [logs2logs-db](/services/logs2logs-db/)
+Do you want to contribute to Lagoon? Fabulous! [See our Documentation](https://docs.lagoon.sh/lagoon/contributing-to-lagoon/developing-lagoon) on how to get started.
+
+
+
+## History
+
+Lagoon was originally created and open sourced by the team at [amazee.io](https://www.amazee.io/) in August 2017, and powers their global hosting platform.
+
+
+
+## Connect
+
+Find more information about Lagoon:
+
+At our website - https://lagoon.sh
+
+In our documentation - https://docs.lagoon.sh
+
+In our blog - https://dev.to/uselagoon
+
+Via our socials - https://twitter.com/uselagoon
diff --git a/charts/dbaas-operator.yaml b/charts/dbaas-operator.yaml
deleted file mode 100644
index 3181b097fe..0000000000
--- a/charts/dbaas-operator.yaml
+++ /dev/null
@@ -1,382 +0,0 @@
-apiVersion: v1
-kind: Namespace
-metadata:
- labels:
- control-plane: controller-manager
- name: dbaas-operator-system
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.2.4
- creationTimestamp: null
- name: mariadbconsumers.mariadb.amazee.io
-spec:
- group: mariadb.amazee.io
- names:
- kind: MariaDBConsumer
- listKind: MariaDBConsumerList
- plural: mariadbconsumers
- singular: mariadbconsumer
- scope: Namespaced
- validation:
- openAPIV3Schema:
- description: MariaDBConsumer is the Schema for the mariadbconsumers API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: MariaDBConsumerSpec defines the desired state of MariaDBConsumer
- properties:
- consumer:
- description: MariaDBConsumerData defines the provider link for this
- consumer
- properties:
- database:
- type: string
- password:
- type: string
- services:
- description: MariaDBConsumerServices defines the provider link for
- this consumer
- properties:
- primary:
- type: string
- replicas:
- items:
- type: string
- type: array
- type: object
- username:
- type: string
- type: object
- environment:
- description: These are the spec options for consumers
- type: string
- provider:
- description: MariaDBConsumerProvider defines the provider link for this
- consumer
- properties:
- hostname:
- type: string
- name:
- type: string
- namespace:
- type: string
- port:
- type: string
- readReplicas:
- items:
- type: string
- type: array
- type:
- type: string
- type: object
- type: object
- status:
- description: MariaDBConsumerStatus defines the observed state of MariaDBConsumer
- type: object
- type: object
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
----
-apiVersion: apiextensions.k8s.io/v1beta1
-kind: CustomResourceDefinition
-metadata:
- annotations:
- controller-gen.kubebuilder.io/version: v0.2.4
- creationTimestamp: null
- name: mariadbproviders.mariadb.amazee.io
-spec:
- group: mariadb.amazee.io
- names:
- kind: MariaDBProvider
- listKind: MariaDBProviderList
- plural: mariadbproviders
- singular: mariadbprovider
- scope: Namespaced
- validation:
- openAPIV3Schema:
- description: MariaDBProvider is the Schema for the mariadbproviders API
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- description: MariaDBProviderSpec defines the desired state of MariaDBProvider
- properties:
- environment:
- description: These are the spec options for providers
- type: string
- hostname:
- type: string
- name:
- type: string
- namespace:
- type: string
- password:
- type: string
- port:
- type: string
- readReplicaHostnames:
- items:
- type: string
- type: array
- type:
- type: string
- user:
- type: string
- type: object
- status:
- description: MariaDBProviderStatus defines the observed state of MariaDBProvider
- type: object
- type: object
- version: v1
- versions:
- - name: v1
- served: true
- storage: true
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
- name: dbaas-operator-leader-election-role
- namespace: dbaas-operator-system
-rules:
-- apiGroups:
- - ""
- resources:
- - configmaps
- verbs:
- - get
- - list
- - watch
- - create
- - update
- - patch
- - delete
-- apiGroups:
- - ""
- resources:
- - configmaps/status
- verbs:
- - get
- - update
- - patch
-- apiGroups:
- - ""
- resources:
- - events
- verbs:
- - create
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- creationTimestamp: null
- name: dbaas-operator-manager-role
-rules:
-- apiGroups:
- - ""
- resources:
- - services
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - mariadb.amazee.io
- resources:
- - mariadbconsumers
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - mariadb.amazee.io
- resources:
- - mariadbconsumers/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - mariadb.amazee.io
- resources:
- - mariadbproviders
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - mariadb.amazee.io
- resources:
- - mariadbproviders/status
- verbs:
- - get
- - patch
- - update
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: dbaas-operator-proxy-role
-rules:
-- apiGroups:
- - authentication.k8s.io
- resources:
- - tokenreviews
- verbs:
- - create
-- apiGroups:
- - authorization.k8s.io
- resources:
- - subjectaccessreviews
- verbs:
- - create
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: dbaas-operator-leader-election-rolebinding
- namespace: dbaas-operator-system
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: dbaas-operator-leader-election-role
-subjects:
-- kind: ServiceAccount
- name: default
- namespace: dbaas-operator-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: dbaas-operator-manager-rolebinding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: dbaas-operator-manager-role
-subjects:
-- kind: ServiceAccount
- name: default
- namespace: dbaas-operator-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: dbaas-operator-proxy-rolebinding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: dbaas-operator-proxy-role
-subjects:
-- kind: ServiceAccount
- name: default
- namespace: dbaas-operator-system
----
-apiVersion: v1
-kind: Service
-metadata:
- labels:
- control-plane: controller-manager
- name: dbaas-operator-controller-manager-metrics-service
- namespace: dbaas-operator-system
-spec:
- ports:
- - name: https
- port: 8443
- targetPort: https
- selector:
- control-plane: controller-manager
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- labels:
- control-plane: controller-manager
- name: dbaas-operator-controller-manager
- namespace: dbaas-operator-system
-spec:
- replicas: 1
- selector:
- matchLabels:
- control-plane: controller-manager
- template:
- metadata:
- labels:
- control-plane: controller-manager
- spec:
- containers:
- - args:
- - --secure-listen-address=0.0.0.0:8443
- - --upstream=http://127.0.0.1:8080/
- - --logtostderr=true
- - --v=10
- image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
- name: kube-rbac-proxy
- ports:
- - containerPort: 8443
- name: https
- - args:
- - --metrics-addr=127.0.0.1:8080
- - --enable-leader-election
- command:
- - /manager
- image: amazeeio/dbaas-operator:v0.1.0
- name: manager
- resources:
- limits:
- cpu: 100m
- memory: 30Mi
- requests:
- cpu: 100m
- memory: 20Mi
- terminationGracePeriodSeconds: 10
\ No newline at end of file
diff --git a/charts/dbaas-providers.yaml b/charts/dbaas-providers.yaml
deleted file mode 100644
index 0f711f0cda..0000000000
--- a/charts/dbaas-providers.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-apiVersion: mariadb.amazee.io/v1
-kind: MariaDBProvider
-metadata:
- name: mariadbprovider-development
-spec:
- environment: development
- hostname: development.172.17.0.1.nip.io
- readreplica_hostnames:
- - development.replica.172.17.0.1.nip.io
- password: password
- port: '3306'
- user: root
----
-apiVersion: mariadb.amazee.io/v1
-kind: MariaDBProvider
-metadata:
- name: mariadbprovider-production
-spec:
- environment: production
- hostname: production.172.17.0.1.nip.io
- readreplica_hostnames:
- - production.replica.172.17.0.1.nip.io
- password: password
- port: '3306'
- user: root
\ No newline at end of file
diff --git a/charts/role-mariadb-operator.yaml b/charts/role-mariadb-operator.yaml
deleted file mode 100644
index 25f7b44b50..0000000000
--- a/charts/role-mariadb-operator.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# use for lagoon-deployer to be able to create mariadbconsumer kinds
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: mariadbconsumer-role
- labels:
- # Add these permissions to the "admin" and "edit" default roles.
- rbac.authorization.k8s.io/aggregate-to-admin: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
-rules:
-- apiGroups: ["mariadb.amazee.io"]
- resources: ["mariadbconsumers"]
- verbs: ["*"]
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: mariadbprovider-role
- labels:
- # Add these permissions to the "admin" and "edit" default roles.
- rbac.authorization.k8s.io/aggregate-to-admin: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
-rules:
-- apiGroups: ["mariadb.amazee.io"]
- resources: ["mariadbproviders"]
- verbs: ["*"]
\ No newline at end of file
diff --git a/docker-compose.end2end.yaml b/docker-compose.end2end.yaml
deleted file mode 100644
index e0b5a7bde4..0000000000
--- a/docker-compose.end2end.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-version: '3.2'
-services:
- tests:
- environment:
- - WEBHOOK_HOST=webhook-handler-lagoon-master.lagoon.ch.amazee.io
- - WEBHOOK_PORT=443
- - WEBHOOK_PROTOCOL=https
- - OPENSHIFT_ROUTE_SUFFIX=ch.amazee.io
- - WEBHOOK_REPO_PREFIX=git@github.com:lagoon-end2end-ci/
- - GIT_REPO_PREFIX=git@github.com:lagoon-end2end-ci/
- - SSH_PRIVATE_KEY=-----BEGIN RSA PRIVATE KEY-----\nMIIJKAIBAAKCAgEAxGZZrOV7Islo5p51Moabfd1YB8qbHvQZfJDZJmSU4jNxMf8G\nQH8KIM6ndi60xIiDlkh9R50Gs0fgnPaBwpjMjcUay5EvzBJdMmd/EPhg359+4f5Z\nQiGTVaB5UoGJKg9DEK4Xxi+QLpQ1CiJXvd3QOqnQlJZ2WYwz4kdLxF0d+sRrl+0G\nAISt9Gg9kq6wa7k7RRr4/OyD/9DhDr1GXvHXFPRv6QqKq084CqiUaarP7OcbZKi5\nEyMkf0s86ZTttQPqQijWsenLAw6t7J0vM38eojPDMFX4fJ7wVvbHmsdcwb2YxwD0\nk7I85mV5uM99v7owJ/0YSJGcN2YESq8c68rbRp1iSfDhchd9SUyYMFlrj3R+dxte\nTwvN2W5hIEld8Ip+dUWUVaaTeLkFLmqmVhvcMJNmuj+Wnp9USwki6U5HdpgBJPT5\nYJia3/LyE5IlPaRfZ+vBDQqKOeeqkncVtqcZ468ylT0qpqjtV4OSuCzl+P/TeJ+K\npCxDoqp88yQpYRYn9ztvEVvOkT8RERnT0/NVCNKAwHFOXrkK/BJs/h3fj2NddeVC\nJXdwiB4m+T2C/RHtGxVColqUf2nEntXxweuJgqBz+4YxXqRrKu4p5L4NuudcFAyg\nbIRpbaTZDI+vmmXnTXNP6ymMo5fNJ0/IPEBAoDkkc6ZmKdM5Yth6RaCzolsCAwEA\nAQKCAgBRL4n0SaxcFevyISCLMU+HeP8RwXPcGMWMU4ggMcXFt8QcCbK46Ir0QtjA\nps/8yf2zGuYGu2dwps63s8KyAV3VYNwRuEOM1S6HTncdOb850YGuw+h7yMtRwxND\nwt5Db6mmdIDJYRH13zgJp2ajytRv25CoS4ZCwA0WhahRVLi214kudUFc53vNI2YI\ng4PUE+7nQx4X12E9V0ghQqabStdBB0ZXjA8Ef6vH5CXthDmNUX9mXcSbn5RPtndI\ni1Kz2Bl3HdCaHO3ZprDItbU0UWEFZeZSzcb5JO5u1HZwiebTA5/q638uqqg4CUyG\n0+bEYZ/Ud/AY13A/CkHN6ZNH+UQotCH3GLyaTQq6OhyXlgMBojn3xs9iMUclFcuy\nkbZ1jAxqci25pxCIeNDHBDKRyxgSkDPna8ZZ4eKGXjIZzsds4+IDkYJLMg0OCtd2\nKm+ZPM2CFU2YCqt11dlr0higGK/9gdpajJMVgEYAmigQ670LdcBc4XIOpPMrR70a\nPjbF9ze/UqtKDWtz8KMIUcvr0CjhHa3XRgMJiM34hIOZU6xf6rjEgkN2Geq24u1b\nhUW8URb+5dcasQ9iIfkNn3R+zK5PzyvQNf6+XrbVjpLqPHXJYrD85EKlXkPqiE6y\n3ehYMrIaZIY6gMuIVcg8AEtsm5EwQY7ETw4YnMQLKfyweLsHEQKCAQEA5vavDyQj\nn6PqfF1Ntr3N/FfwRr/167HB+/3wHT+FwRpCw62ACeBC0iFXmzG2wfQfot60fUGa\nQEJiUDGZFvBM0tiTpzmgGG7QpRbPe1919Sl5LZeLA9bngRnmqn5zAkmVTeBCO/4F\nMSk9hnBZ0v0W3DqNmjuIH298g3gl4VJeKpILd62LbhjvhjT/LXlekYDEj3p9Xob8\n1OQxFJgftFyg4P07RRaUzsNLhqEdY3VxDcUMb9459fEYeb7sYig+zVPaZQ31aMVK\nj6K+XiH5M5uKJWkPdMDUG84jreFAdBY9kGCuSulTWgmTLlMKSI85q5hkckY2EQQL\n5u456xfyuFcnEwKCAQEA2bCCcqcGIAiMwk/6z7XIJeUsSUg+ObvkEdogk5n6Y1Ea\nt5LxMpQzhG6SHJ2f38VyKgv9e/jnwXI8eiejper6OeQEBG4+AedcLYi0V5SUMIgD\nX4bxT9+qCwYrwt9YHkJySk237WZUWJPVfxHg0vqNYyD/CXBowx0nm8jEuZ8iT+CW\nO2uZq+0DO2WqoYT54lZux6aEzm+oAkzwJJVXJcUVPg7bJXK1ObOzvHpkZJxHL8+S\nKufzew/CXALKWHoCkEP/P8b7oxjcjQI3KK0EM2fABNvN28+qscqTqQjfAsNw24Ob\nP8rL8amgd/x7iddIbEpOSoLAH1gVoxJXA0oqkC6YmQKCAQEAiIeoomW1nUgTdCLf\nrrfyzUnaoqgVIpf42RKa319OnQD+GJg2iSAFwBuvovE3XN4H2YqW3Thri7LyVP+M\nxM+WSkl2tzLMXcUcF4staXvbyeoTVQ0xQzrFrT53aa/IIsEGvntkC/y0awQ6937w\nylWMLvF6BYNNi2+nBjx+//xl5/pfRwbS1mltJkOr2ODXM2IQT9STyc44JU0jak4m\n58Kd44IuiD+6BaPSwKn7KnEhPIeQO4z9bFJyKn3fVIL/5Pa9smEXAjEmS1Rj/ldM\n7eHzPvwlA9p9SFaKJt5x8G25ROCyB1x4RlBEreyutofcCoDSV+8DRPnEY2XN3RhS\nBgCW+wKCAQAyHrqaDSyS2YUXA/UaN59CYlZk9PYmssTa+16+vRfyH+1H0NQpsgd+\neTq4ze3ORpZ3adVN4svxNQ0JjvDVtZkxFPd70aoDJDKL5UqoU3QfDGHCb75FhpiO\n+ze+IVAXf3Ly+pvbU9Edp8PjAsnBqaA9743orXHhYmgJLRtmJWZv/6R3P9ncxLAW\nz9yOXaBu5TmSTBuNsBV9mhs8QQoUjyDJ7f+0yolBJMfAIGnW5EhbkK31pPvhHIrC\nRn4yCr1mW9F77KNpNMMq0BTFD7jE4SFLvRPThSem0Z5Xr8wwxbDJSa7H7DgyhryE\ng6Qp42AwVpdZ/mqfjNdGeWWBQ2UzVxcZAoIBAHNXgjD3umcKciYRAbltNJrXx9xk\ndAv8I69oEMCy4hCmvKUjy/UI9NqXFjBb/G6VGgh6NUE9o9o27t1Y5Plm0izyCA1M\nDFruaRfjyJ8qjbEifcqRtcF3rzsBiXIwdmbN6qT4PUipN2elpUAd7J1OIwGIIe3u\nCWNyOTNL+2+oQ/Eg1Y99mg3yrsVyOwhynVE80/X5cy07bXXR5rv1x4NKSVbPhlnt\nL6J5iAoqoDKICzjcgF5x3mj9YFWZrC3aRxRrN5RoEgeVdcXeK56UJqXHjmKN++m3\nc8OPEIBZiD8UJuhSNSOLiBFrGz6toy6rpHavqqknGhVWotXsAs1h8LNkBe8=\n-----END RSA PRIVATE KEY-----
- - SSH_HOST=api-lagoon-master.lagoon.ch.amazee.io
- - SSH_PORT=31472
- - API_HOST=api-lagoon-master.lagoon.ch.amazee.io
- - API_PROTOCOL=https
- - API_PORT=443
\ No newline at end of file
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 84b6caafc4..690549ed08 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -7,10 +7,6 @@ services:
- ./services/api-db/rerun_initdb.sh:/rerun_initdb.sh
ports:
- '3366:3306'
- labels:
- lagoon.type: custom
- lagoon.template: services/api-db/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/api-db:v1-9-1
webhook-handler:
image: ${IMAGE_REPO:-lagoon}/webhook-handler
command: yarn run dev
@@ -19,19 +15,11 @@ services:
volumes:
- ./services/webhook-handler/src:/app/services/webhook-handler/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/webhook-handler/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/webhook-handler:v1-9-1
backup-handler:
image: ${IMAGE_REPO:-lagoon}/backup-handler
restart: on-failure
ports:
- '7778:3000'
- labels:
- lagoon.type: custom
- lagoon.template: services/backup-handler/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/backup-handler:v1-9-1
depends_on:
- broker
broker:
@@ -39,60 +27,36 @@ services:
ports:
- '15672:15672'
- '5672:5672'
- labels:
- lagoon.type: rabbitmq-cluster
- lagoon.template: services/broker/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/broker:v1-9-1
controllerhandler:
image: ${IMAGE_REPO:-lagoon}/controllerhandler
command: yarn run dev
volumes:
- ./services/controllerhandler/src:/app/services/controllerhandler/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/controllerhandler/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/controllerhandler:v1-9-1
logs2rocketchat:
image: ${IMAGE_REPO:-lagoon}/logs2rocketchat
command: yarn run dev
volumes:
- ./services/logs2rocketchat/src:/app/services/logs2rocketchat/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/logs2rocketchat/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2rocketchat:v1-9-1
logs2slack:
image: ${IMAGE_REPO:-lagoon}/logs2slack
command: yarn run dev
volumes:
- ./services/logs2slack/src:/app/services/logs2slack/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/logs2slack/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2slack:v1-9-1
logs2microsoftteams:
image: ${IMAGE_REPO:-lagoon}/logs2microsoftteams
command: yarn run dev
volumes:
- ./services/logs2microsoftteams/src:/app/services/logs2microsoftteams/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/logs2microsoftteams/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2microsoftteams:v1-9-1
logs2email:
image: ${IMAGE_REPO:-lagoon}/logs2email
command: yarn run dev
volumes:
- ./services/logs2email/src:/app/services/logs2email/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/logs2slack/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs2email:v1-9-1
depends_on:
- mailhog
mailhog:
@@ -107,19 +71,16 @@ services:
volumes:
- ./services/webhooks2tasks/src:/app/services/webhooks2tasks/src
- ./node-packages:/app/node-packages:delegated
- labels:
- lagoon.type: custom
- lagoon.template: services/webhooks2tasks/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/webhooks2tasks:v1-9-1
api:
image: ${IMAGE_REPO:-lagoon}/api
command: yarn run dev
volumes:
- ./services/api/src:/app/services/api/src
- ./node-packages:/app/node-packages:delegated
+ - /app/node-packages/commons/dist
environment:
- CI=${CI:-true}
- - REGISTRY=172.17.0.1:8084 # Docker network bridge and forwarded port for harbor-nginx
+ - REGISTRY=harbor.172.17.0.1.nip.io:18080 # Docker network bridge and forwarded port for harbor-nginx
depends_on:
- api-db
- keycloak
@@ -143,10 +104,6 @@ services:
- ./services/ui/package.json:/app/services/ui/package.json
ports:
- '8888:3000'
- labels:
- lagoon.type: custom
- lagoon.template: services/ui/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/ui:v1-9-1
ssh:
image: ${IMAGE_REPO:-lagoon}/ssh
depends_on:
@@ -164,10 +121,6 @@ services:
volumes:
- ./services/ssh/home/command.sh:/home/command.sh
- ./services/ssh/home/rsh.sh:/home/rsh.sh
- labels:
- lagoon.type: custom
- lagoon.template: services/ssh/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/ssh:v1-9-1
auth-server:
image: ${IMAGE_REPO:-lagoon}/auth-server
command: yarn run dev
@@ -178,10 +131,6 @@ services:
- ./services/auth-server/src:/app/services/auth-server/src
ports:
- '3001:3000'
- labels:
- lagoon.type: custom
- lagoon.template: services/auth-server/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/auth-server:v1-9-1
keycloak:
image: ${IMAGE_REPO:-lagoon}/keycloak
user: '111111111'
@@ -189,10 +138,6 @@ services:
- keycloak-db
ports:
- '8088:8080'
- labels:
- lagoon.type: custom
- lagoon.template: services/keycloak/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/keycloak:v1-9-1
keycloak-db:
image: ${IMAGE_REPO:-lagoon}/keycloak-db
ports:
@@ -224,8 +169,6 @@ services:
- DELETED_STATUS_CODE=404
volumes:
- ./tests:/ansible
- labels:
- lagoon.type: none
tests-openshift:
image: ${IMAGE_REPO:-lagoon}/tests
environment:
@@ -249,16 +192,12 @@ services:
- DELETED_STATUS_CODE=503
volumes:
- ./tests:/ansible
- labels:
- lagoon.type: none
local-git:
image: ${IMAGE_REPO:-lagoon}/local-git
environment:
- GIT_AUTHORIZED_KEYS=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDEZlms5XsiyWjmnnUyhpt93VgHypse9Bl8kNkmZJTiM3Ex/wZAfwogzqd2LrTEiIOWSH1HnQazR+Cc9oHCmMyNxRrLkS/MEl0yZ38Q+GDfn37h/llCIZNVoHlSgYkqD0MQrhfGL5AulDUKIle93dA6qdCUlnZZjDPiR0vEXR36xGuX7QYAhK30aD2SrrBruTtFGvj87IP/0OEOvUZe8dcU9G/pCoqrTzgKqJRpqs/s5xtkqLkTIyR/SzzplO21A+pCKNax6csDDq3snS8zfx6iM8MwVfh8nvBW9seax1zBvZjHAPSTsjzmZXm4z32/ujAn/RhIkZw3ZgRKrxzryttGnWJJ8OFyF31JTJgwWWuPdH53G15PC83ZbmEgSV3win51RZRVppN4uQUuaqZWG9wwk2a6P5aen1RLCSLpTkd2mAEk9PlgmJrf8vITkiU9pF9n68ENCoo556qSdxW2pxnjrzKVPSqmqO1Xg5K4LOX4/9N4n4qkLEOiqnzzJClhFif3O28RW86RPxERGdPT81UI0oDAcU5euQr8Emz+Hd+PY1115UIld3CIHib5PYL9Ee0bFUKiWpR/acSe1fHB64mCoHP7hjFepGsq7inkvg2651wUDKBshGltpNkMj6+aZedNc0/rKYyjl80nT8g8QECgOSRzpmYp0zli2HpFoLOiWw== ansible-testing
ports:
- '2222:22'
- labels:
- lagoon.type: none
remotedev:
image: jhen0409/remotedev-server
command: node main.js
@@ -267,8 +206,6 @@ services:
- PORT=9090
ports:
- '9090:9090'
- labels:
- lagoon.type: none
local-api-data-watcher-pusher:
depends_on:
- api
@@ -276,8 +213,6 @@ services:
volumes:
- ./local-dev/api-data:/api-data
- ./local-dev/api-data-watcher-pusher:/home
- labels:
- lagoon.type: none
local-dbaas-provider:
image: ${IMAGE_REPO:-lagoon}/local-dbaas-provider
restart: always
@@ -290,6 +225,13 @@ services:
MYSQL_ROOT_PASSWORD: 'password'
ports:
- '3306:3306'
+ local-mongodb-dbaas-provider:
+ image: ${IMAGE_REPO:-lagoon}/local-mongodb-dbaas-provider
+ restart: always
+ labels:
+ lagoon.type: none
+ ports:
+ - '27017:27017'
local-minio:
image: minio/minio
entrypoint: sh
@@ -299,14 +241,10 @@ services:
environment:
- MINIO_ACCESS_KEY=minio
- MINIO_SECRET_KEY=minio123
- labels:
- lagoon.type: none
local-registry:
image: ${IMAGE_REPO:-lagoon}/local-registry
ports:
- '5000:5000'
- labels:
- lagoon.type: none
drush-alias:
image: ${IMAGE_REPO:-lagoon}/drush-alias
volumes:
@@ -314,67 +252,7 @@ services:
ports:
- '8087:8080'
labels:
- lagoon.type: custom
- lagoon.template: services/drush-alias/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/drush-alias:v1-9-1
version: '2'
- logs-db:
- image: ${IMAGE_REPO:-lagoon}/logs-db
- user: '111111111'
- environment:
- KEYCLOAK_URL: http://keycloak:8080
- ports:
- - '9200:9200'
- networks:
- default:
- aliases:
- - logs-db-service
- labels:
- lagoon.type: elasticsearch
- lagoon.template: services/logs-db/.lagoon.single.yml
- lagoon.image: amazeeiolagoon/logs-db:v1-9-1
- logs-forwarder:
- image: ${IMAGE_REPO:-lagoon}/logs-forwarder
- user: '111111111'
- labels:
- lagoon.type: custom
- lagoon.template: services/logs-forwarder/.lagoon.single.yml
- lagoon.image: amazeeiolagoon/logs-forwarder:v1-9-1
- logs-db-ui:
- image: ${IMAGE_REPO:-lagoon}/logs-db-ui
- user: '111111111'
- ports:
- - '5601:5601'
- environment:
- KEYCLOAK_URL: http://${KEYCLOAK_URL:-docker.for.mac.localhost:8088}
- LOGSDB_UI_URL: http://0.0.0.0:5601
- labels:
- lagoon.type: kibana
- lagoon.template: services/logs-db-ui/.lagoon.yml
- lagoon.image: amazeeiolagoon/logs-db-ui:v1-9-1
- logs-db-curator:
- image: ${IMAGE_REPO:-lagoon}/logs-db-curator
- user: '111111111'
- labels:
- lagoon.type: cli
- lagoon.template: services/logs-db-curator/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/logs-db-curator:v1-9-1
- logs2logs-db:
- image: ${IMAGE_REPO:-lagoon}/logs2logs-db
- user: '111111111'
- command:
- - '--config.reload.automatic'
- - '--config.reload.interval'
- - '1s'
- ports:
- - '5140:5140/udp'
- - '5141:5141/udp'
- volumes:
- - './services/logs2logs-db/pipeline:/usr/share/logstash/pipeline'
- labels:
- lagoon.type: logstash
- lagoon.template: services/logs2logs-db/.lagoon.yml
- lagoon.image: amazeeiolagoon/logs2logs-db:v1-9-1
auto-idler:
image: ${IMAGE_REPO:-lagoon}/auto-idler
user: '111111111'
@@ -384,242 +262,10 @@ services:
- './services/auto-idler/openshift-services.sh:/openshift-services.sh'
- './services/auto-idler/openshift-clis.sh:/openshift-clis.sh'
- './services/auto-idler/create_jwt.py:/create_jwt.py'
- labels:
- lagoon.type: custom
- lagoon.template: services/auto-idler/.lagoon.yml
- lagoon.image: amazeeiolagoon/auto-idler:v1-9-1
storage-calculator:
image: ${IMAGE_REPO:-lagoon}/storage-calculator
user: '111111111'
volumes:
- './services/storage-calculator/calculate-storage.sh:/calculate-storage.sh'
- labels:
- lagoon.type: custom
- lagoon.template: services/storage-calculator/.lagoon.yml
- lagoon.image: amazeeiolagoon/storage-calculator:v1-9-1
- logs-collector:
- image: openshift/origin-logging-fluentd:v3.6.1
- labels:
- lagoon.type: custom
- lagoon.template: services/logs-collector/.lagoon.yml
- lagoon.rollout: daemonset
- harbor-core:
- image: ${IMAGE_REPO:-lagoon}/harbor-core
- hostname: harbor-core
- volumes:
- - ${PWD}/services/harbor-core/harbor-core_ci.conf.yaml:/etc/core/app.conf
- - ${PWD}/services/harbor-core/ci-secret.key:/etc/core/key
- - ${PWD}/services/harbor-core/ci_tls.key:/etc/core/private_key.pem
- - /etc/core/token
- depends_on:
- - harbor-database
- - harbor-redis
- ports:
- - '8081:8080'
- environment:
- - DATABASE_TYPE=postgresql
- - POSTGRESQL_HOST=harbor-database
- - POSTGRESQL_PORT=5432
- - POSTGRESQL_USERNAME=postgres
- - POSTGRESQL_PASSWORD=test123
- - POSTGRESQL_DATABASE=registry
- - POSTGRESQL_SSLMODE=disable
- - POSTGRESQL_MAX_IDLE_CONNS=50
- - POSTGRESQL_MAX_OPEN_CONNS=100
- - CORE_URL=http://harbor-core:8080
- - JOBSERVICE_URL=http://harbor-jobservice:8080
- - REGISTRY_URL=http://harborregistry:5000
- - TOKEN_SERVICE_URL=http://harbor-core:8080/service/token
- - WITH_NOTARY=false
- - CFG_EXPIRATION=5
- - ADMIRAL_URL=NA
- - WITH_CLAIR=true
- - CLAIR_DB_HOST=harbor-database
- - CLAIR_DB_PORT=5432
- - CLAIR_DB_USERNAME=postgres
- - CLAIR_DB=postgres
- - CLAIR_DB_SSLMODE=disable
- - CLAIR_URL=http://harbor-trivy:6060
- - CLAIR_ADAPTER_URL=http://harbor-trivy:8080
- - REGISTRY_STORAGE_PROVIDER_NAME=s3
- - WITH_CHARTMUSEUM=false
- - LOG_LEVEL=error
- - CONFIG_PATH=/etc/core/app.conf
- - SYNC_REGISTRY=false
- - CHART_CACHE_DRIVER=redis
- - _REDIS_URL=harbor-redis:6379,100,
- - _REDIS_URL_REG=redis://harbor-redis:6379/2
- - PORTAL_URL=http://harbor-portal:8080
- - REGISTRYCTL_URL=http://harborregistryctl:8080
- - CLAIR_HEALTH_CHECK_SERVER_URL=http://harbor-trivy:6061
- - HTTP_PROXY=
- - HTTPS_PROXY=
- - NO_PROXY=harbor-core,harbor-jobservice,harbor-database,harborregistry,harbor-portal,harbor-trivy,127.0.0.1,localhost,.local,.internal
- - HARBOR_NGINX_ENDPOINT=http://harbor-nginx:8080
- - ROBOT_TOKEN_DURATION=500
- - CORE_SECRET=secret123
- - JOBSERVICE_SECRET=secret123
- - REGISTRY_HTTP_SECRET=secret123
- - HARBOR_ADMIN_PASSWORD=admin
- - CLAIR_DB_PASSWORD=test123
- - WITH_TRIVY=true
- - TRIVY_ADAPTER_URL=http://harbor-trivy:8080
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-core/harbor-core.yml
- lagoon.image: amazeeiolagoon/harbor-core:v1-9-1
- harbor-database:
- image: ${IMAGE_REPO:-lagoon}/harbor-database
- hostname: harbor-database
- ports:
- - '5432:5432'
- environment:
- - POSTGRES_PASSWORD=test123
- - POSTGRES_USER=postgres
- - POSTGRES_DB=postgres
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-database/harbor-database.yml
- lagoon.image: amazeeiolagoon/harbor-database:v1-9-1
- harbor-jobservice:
- image: ${IMAGE_REPO:-lagoon}/harbor-jobservice
- hostname: harbor-jobservice
- ports:
- - '8083:8080'
- volumes:
- - /tmp
- - ${PWD}/services/harbor-jobservice/ci-config.yaml:/etc/jobservice/config.yml
- depends_on:
- - harbor-database
- - harbor-redis
- - harbor-nginx
- environment:
- - CORE_SECRET=secret123
- - JOBSERVICE_SECRET=secret123
- - CORE_URL=http://harbor-core:8080
- - REGISTRY_CONTROLLER_URL=http://harborregistryctl:8080
- - LOG_LEVEL=error
- - HTTP_PROXY=
- - HTTPS_PROXY=
- - NO_PROXY=harbor-core,harbor-jobservice,harbor-database,harborregistry,harbor-portal,harbor-trivy,127.0.0.1,localhost,.local,.internal
- - SCANNER_CLAIR_DATABASE_URL=postgres://postgres:test123@harbor-database:5432/postgres?sslmode=disable
- - SCANNER_STORE_REDIS_URL=redis://harbor-redis:6379/4
- - SCANNER_LOG_LEVEL=error
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-jobservice/harbor-jobservice.yml
- lagoon.image: amazeeiolagoon/harbor-jobservice:v1-9-1
- harbor-nginx:
- image: ${IMAGE_REPO:-lagoon}/harbor-nginx
- hostname: harbor-nginx
- ports:
- - '8084:8080'
- volumes:
- - ${PWD}/services/harbor-nginx/ci.nginx.conf:/etc/nginx/nginx.conf
- depends_on:
- - harbor-core
- - harborregistry
- - harbor-portal
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-nginx/harbor-nginx.yml
- lagoon.image: amazeeiolagoon/harbor-nginx:v1-9-1
- harbor-portal:
- image: ${IMAGE_REPO:-lagoon}/harbor-portal
- hostname: harbor-portal
- ports:
- - '8085:8080'
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-portal/harbor-portal.yml
- lagoon.image: amazeeiolagoon/harbor-portal:v1-9-1
- harbor-redis:
- image: ${IMAGE_REPO:-lagoon}/harbor-redis
- hostname: harbor-redis
- volumes:
- - /var/lib/redis
- restart: always
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-redis/harbor-redis.yml
- lagoon.image: amazeeiolagoon/harbor-redis:v1-9-1
- harbor-trivy:
- image: ${IMAGE_REPO:-lagoon}/harbor-trivy
- hostname: harbor-trivy
- volumes:
- - /tmp
- depends_on:
- - harbor-redis
- - harbor-database
- environment:
- - SCANNER_LOG_LEVEL=error
- - SCANNER_TRIVY_CACHE_DIR=/home/scanner/.cache/trivy
- - SCANNER_TRIVY_REPORTS_DIR=/home/scanner/.cache/reports
- - SCANNER_TRIVY_DEBUG_MODE=false
- - SCANNER_TRIVY_VULN_TYPE=os,library
- - SCANNER_TRIVY_GITHUB_TOKEN=
- - SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
- - SCANNER_TRIVY_IGNORE_UNFIXED=false
- - SCANNER_TRIVY_SKIP_UPDATE=false
- - SCANNER_STORE_REDIS_URL=redis://harbor-redis:6379/5
- - SCANNER_JOB_QUEUE_REDIS_URL=redis://harbor-redis:6379/5
- - TRIVY_NON_SSL=true
- restart: on-failure
- deploy:
- resources:
- limits:
- cpus: "1"
- memory: 1g
- labels:
- lagoon.type: custom
- lagoon.template: services/harbor-trivy/harbor-trivy.yml
- lagoon.name: harbor-trivy
- lagoon.image: amazeeiolagoon/harbor-trivy:v1-9-1
- harborregistry:
- image: ${IMAGE_REPO:-lagoon}/harborregistry
- hostname: harborregistry
- volumes:
- - ${PWD}/services/harborregistry/registry_ci.cfg.yaml:/etc/registry/config.yml
- - ${PWD}/services/harbor-core/ci_tls.crt:/etc/registry/root.crt
- depends_on:
- - harbor-database
- - harbor-redis
- environment:
- - HARBOR_NGINX_ENDPOINT=http://harbor-nginx:8080
- - REGISTRY_REDIS_PASSWORD=
- - CORE_SECRET=secret123
- - JOBSERVICE_SECRET=secret123
- - REGISTRY_HTTP_SECRET=secret123
- restart: always
- command: ["serve", "/etc/registry/config.yml"]
- labels:
- lagoon.type: custom
- lagoon.template: services/harborregistry/harborregistry.yml
- lagoon.name: harborregistry
- lagoon.image: amazeeiolagoon/harborregistry:v1-9-1
- harborregistryctl:
- image: ${IMAGE_REPO:-lagoon}/harborregistryctl
- hostname: harborregistryctl
- volumes:
- - ${PWD}/services/harborregistryctl/registryctl_ci.cfg.yaml:/etc/registryctl/config.yml
- environment:
- - REGISTRY_REDIS_PASSWORD=
- - CORE_SECRET=secret123
- - JOBSERVICE_SECRET=secret123
- - REGISTRY_HTTP_SECRET=secret123
- labels:
- lagoon.type: custom
- lagoon.template: services/harborregistryctl/harborregistry.yml
- lagoon.name: harborregistry
- lagoon.image: amazeeiolagoon/harborregistryctl:v1-9-1
api-redis:
image: ${IMAGE_REPO:-lagoon}/api-redis
- labels:
- lagoon.type: custom
- lagoon.template: services/api-redis/.lagoon.app.yml
- lagoon.image: amazeeiolagoon/api-redis:v1-9-1
diff --git a/docs/.gitbook/assets/container_overview (1).png b/docs/.gitbook/assets/container_overview.png
similarity index 100%
rename from docs/.gitbook/assets/container_overview (1).png
rename to docs/.gitbook/assets/container_overview.png
diff --git a/docs/.gitbook/assets/screen-shot-2020-01-23-at-1.04.06-pm.png b/docs/.gitbook/assets/gitlab-settings.png
similarity index 100%
rename from docs/.gitbook/assets/screen-shot-2020-01-23-at-1.04.06-pm.png
rename to docs/.gitbook/assets/gitlab-settings.png
diff --git a/docs/.gitbook/assets/graphiql-2020-01-29-18-05-54 (1) (1).png b/docs/.gitbook/assets/graphiql-2020-01-29-18-05-54 (1).png
similarity index 100%
rename from docs/.gitbook/assets/graphiql-2020-01-29-18-05-54 (1) (1).png
rename to docs/.gitbook/assets/graphiql-2020-01-29-18-05-54 (1).png
diff --git a/docs/.gitbook/assets/projects_overview (1).png b/docs/.gitbook/assets/projects_overview.png
similarity index 100%
rename from docs/.gitbook/assets/projects_overview (1).png
rename to docs/.gitbook/assets/projects_overview.png
diff --git a/docs/.gitbook/assets/repositories_overview (1).png b/docs/.gitbook/assets/repositories_overview.png
similarity index 100%
rename from docs/.gitbook/assets/repositories_overview (1).png
rename to docs/.gitbook/assets/repositories_overview.png
diff --git a/docs/.gitbook/assets/scanning_image_1 (2) (1).png b/docs/.gitbook/assets/scanning_image_1 (1).png
similarity index 100%
rename from docs/.gitbook/assets/scanning_image_1 (2) (1).png
rename to docs/.gitbook/assets/scanning_image_1 (1).png
diff --git a/docs/.gitbook/assets/scanning_image_1 (2).png b/docs/.gitbook/assets/scanning_image_1.png
similarity index 100%
rename from docs/.gitbook/assets/scanning_image_1 (2).png
rename to docs/.gitbook/assets/scanning_image_1.png
diff --git a/docs/.gitbook/assets/topgun (1).gif b/docs/.gitbook/assets/topgun.gif
similarity index 100%
rename from docs/.gitbook/assets/topgun (1).gif
rename to docs/.gitbook/assets/topgun.gif
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 49e55e0166..1a6b53847a 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -26,8 +26,10 @@
* [Backups](using-lagoon-advanced/backups.md)
* [Remote Shell](using-lagoon-advanced/remote-shell.md)
* [GraphQL](using-lagoon-advanced/graphql.md)
+* [Project Default Users and SSH keys](using-lagoon-advanced/project-default-users-keys.md)
* [Node.js Graceful Shutdown](using-lagoon-advanced/nodejs.md)
-* [Migrations from amazee.io](using-lagoon-advanced/migration.md)
+* [Setting up Xdebug with Lagoon](using-lagoon-advanced/setting-up-xdebug-with-lagoon.md)
+* [Environment Idling](using-lagoon-advanced/environment-idling.md)
## Drupal
@@ -46,23 +48,6 @@
* [Logging](logging/logging.md)
* [Kibana Examples](logging/kibana-examples.md)
-## Administering Lagoon
-
-* [Install Lagoon 1.x on OpenShift](administering-lagoon/install.md)
-* [OpenShift Requirements](administering-lagoon/openshift_requirements.md)
-* [Install local Kubernetes cluster for Lagoon](administering-lagoon/install-local-kubernetes-cluster-for-lagoon.md)
-* [GraphQL API](administering-lagoon/graphql-queries.md)
-* [Role-Based Access Control \(RBAC\)](administering-lagoon/rbac.md)
-* [Harbor](administering-lagoon/using_harbor/README.md)
- * [Security Scanning](administering-lagoon/using_harbor/security_scanning.md)
- * [Harbor Settings](administering-lagoon/using_harbor/harbor-settings/README.md)
- * [Harbor-Core](administering-lagoon/using_harbor/harbor-settings/harbor-core.md)
- * [Harbor-Database](administering-lagoon/using_harbor/harbor-settings/harbor-database.md)
- * [Harbor-Jobservice](administering-lagoon/using_harbor/harbor-settings/harbor-jobservice.md)
- * [Harbor-Trivy](administering-lagoon/using_harbor/harbor-settings/harbor-trivy.md)
- * [HarborRegistry](administering-lagoon/using_harbor/harbor-settings/harborregistry.md)
- * [HarborRegistryCtl](administering-lagoon/using_harbor/harbor-settings/harborregistryctl.md)
-
## Docker Images
* [Elasticsearch](docker-images/elasticsearch.md)
@@ -84,11 +69,28 @@
* [Varnish](docker-images/varnish/README.md)
* [Varnish-Drupal](docker-images/varnish/varnish-drupal.md)
+## Administering Lagoon
+
+* [GraphQL API](administering-lagoon/graphql-queries.md)
+* [Role-Based Access Control \(RBAC\)](administering-lagoon/rbac.md)
+* [Harbor](administering-lagoon/using_harbor/README.md)
+ * [Security Scanning](administering-lagoon/using_harbor/security_scanning.md)
+ * [Harbor Settings](administering-lagoon/using_harbor/harbor-settings/README.md)
+ * [Harbor-Core](administering-lagoon/using_harbor/harbor-settings/harbor-core.md)
+ * [Harbor-Database](administering-lagoon/using_harbor/harbor-settings/harbor-database.md)
+ * [Harbor-Jobservice](administering-lagoon/using_harbor/harbor-settings/harbor-jobservice.md)
+ * [Harbor-Trivy](administering-lagoon/using_harbor/harbor-settings/harbor-trivy.md)
+ * [HarborRegistry](administering-lagoon/using_harbor/harbor-settings/harborregistry.md)
+ * [HarborRegistryCtl](administering-lagoon/using_harbor/harbor-settings/harborregistryctl.md)
+
## Contributing to Lagoon
+* [Developing Lagoon](contributing-to-lagoon/developing-lagoon.md)
+* [Install Local Kubernetes Cluster for Lagoon](contributing-to-lagoon/install-local-kubernetes-cluster-for-lagoon.md)
+* [Install Lagoon 1.x on OpenShift](contributing-to-lagoon/install/README.md)
+ * [OpenShift Requirements](contributing-to-lagoon/install/openshift_requirements.md)
* [Code of Conduct](contributing-to-lagoon/code-of-conduct.md)
* [Contributing](contributing-to-lagoon/contributing.md)
-* [Developing Lagoon](contributing-to-lagoon/developing-lagoon.md)
* [Tests](contributing-to-lagoon/tests.md)
* [API Debugging](contributing-to-lagoon/api-debugging.md)
@@ -97,4 +99,5 @@
* [FAQ](resources/faq.md)
* [Glossary](resources/glossary.md)
* [Tutorials, Webinars, and Videos](resources/tutorials-and-webinars.md)
+* [Lagoon Examples](https://github.com/uselagoon/lagoon-examples)
diff --git a/docs/administering-lagoon/graphql-queries.md b/docs/administering-lagoon/graphql-queries.md
index 1fae896493..4dcd039e1a 100644
--- a/docs/administering-lagoon/graphql-queries.md
+++ b/docs/administering-lagoon/graphql-queries.md
@@ -29,7 +29,7 @@ Under "GraphQL Endpoint", enter the API endpoint URL with `/graphql` on the end.
Press ESC to close the HTTP header overlay and now we are ready to send the first GraphQL request!
-![Editing HTTP Headers in GraphiQL.](../.gitbook/assets/graphiql-2020-01-29-18-05-54%20%281%29%20%281%29.png)
+![Editing HTTP Headers in GraphiQL.](../.gitbook/assets/graphiql-2020-01-29-18-05-54%20%281%29.png)
Enter this in the left panel
@@ -43,7 +43,7 @@ query allProjects{
![Running a query in GraphiQL.](../.gitbook/assets/graphiql-2020-01-29-20-10-32%20%281%29%20%281%29.png)
-And press the ▶️button \(or press CTRL+ENTER\).
+And press the ▶️ button \(or press CTRL+ENTER\).
If all went well, your first GraphQL response should appear shortly afterwards in the right pane.
@@ -220,7 +220,7 @@ Now for every deployment you will receive messages in your defined channel.
### Adding a new OpenShift target
{% hint style="info" %}
-In Lagoon 1.x `addOpenshift` is used for both Openshift and Kubernetes targets. In Lagoon 2.x this will change.
+In Lagoon 1.x `addOpenshift` is used for both OpenShift and Kubernetes targets. In Lagoon 2.x this will change.
{% endhint %}
The OpenShift cluster to which Lagoon should deploy. Lagoon is not only capable of deploying to its own OpenShift, but also to any OpenShift anywhere in the world.
@@ -414,7 +414,7 @@ mutation {
Update the production environment within a project:
{% hint style="warning" %}
-This required a redeploy in order for the changes to be reflected in the containers.
+This requires a redeploy in order for the changes to be reflected in the containers.
{% endhint %}
```graphql
@@ -504,7 +504,7 @@ query search{
## Maintaining project metadata
-Project metadata can be assigned using arbitrary key/value pairs. Projects can then be queried by the associated metadata; for example you may categorise projects by type of software, version number, or any other categorisation you may wish to query on later.
+Project metadata can be assigned using arbitrary key/value pairs. Projects can then be queried by the associated metadata; for example you may categorize projects by type of software, version number, or any other categorization you may wish to query on later.
### Add/update metadata on a project
diff --git a/docs/administering-lagoon/rbac.md b/docs/administering-lagoon/rbac.md
index 39389b7142..f2d42bb21a 100644
--- a/docs/administering-lagoon/rbac.md
+++ b/docs/administering-lagoon/rbac.md
@@ -52,7 +52,7 @@ Here is a table that lists the roles and the access they have:
| :--- | :--- | :--- | :--- | ---: | :--- | :--- | :--- | :--- | :--- | :--- | :--- |
| addBackup | backup | add | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| deleteBackup | backup | delete | projectID | Yes | Yes | Yes | Yes | No | No | No | |
-| deleteAllBackups | backup | deleteAll | | Yes | | | | | No | No | |
+| deleteAllBackups | backup | deleteAll | | Yes | No | No | No | No | No | No | |
| getBackupsByEnvironmentId | backup | view | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| | deployment | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| addEnvVariable \(to Project\) | env\_var | project:add | projectID | Yes | Yes | Yes | No | No | No | No | |
@@ -64,12 +64,12 @@ Here is a table that lists the roles and the access they have:
| getEnvVarsByEnvironmentId | env\_var | environment:view:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| addOrUpdateEnvironment | environment | addOrUpdate:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| addOrUpdateEnvironment | environment | addOrUpdate:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
-| updateEnvironment | environment | update:development | projectID | Yes | Yes | Yes | Yes | Yes | | | |
+| updateEnvironment | environment | update:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| updateEnvironment | environment | update:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| deleteEnvironment | environment | delete:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| deleteEnvironment | environment | delete:production | projectID | Yes | Yes | Yes | No | No | No | No | |
-| deleteAllEnvironments | environment | deleteAll | | Yes | | | | | | | |
-| addOrUpdateEnvironmentStorage | environment | storage | | Yes | Yes | | | | | | |
+| deleteAllEnvironments | environment | deleteAll | | Yes | No | No | No | No | No | No | |
+| addOrUpdateEnvironmentStorage | environment | storage | | Yes | Yes | No | No | No | No | No | |
| addDeployment | environment | deploy:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| addDeployment | environment | deploy:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| deleteDeployment | deployment | delete | projectID | Yes | Yes | Yes | Yes | No | No | No | |
@@ -85,52 +85,52 @@ Here is a table that lists the roles and the access they have:
| deployEnvironmentPromote | environment | deploy:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| deployEnvironmentPromote | environment | deploy:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| getEnvironmentsByProjectId | environment | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
-| getEnvironmentStorageMonthByEnvironmentId | environment | storage | | Yes | | | | | | | |
-| getEnvironmentHoursMonthByEnvironmentId | environment | storage | | Yes | | | | | | | |
-| getEnvironmentHitsMonthByEnvironmentId | environment | storage | | Yes | | | | | | | |
+| getEnvironmentStorageMonthByEnvironmentId | environment | storage | | Yes | No | No | No | No | No | No | |
+| getEnvironmentHoursMonthByEnvironmentId | environment | storage | | Yes | No | No | No | No | No | No | |
+| getEnvironmentHitsMonthByEnvironmentId | environment | storage | | Yes | No | No | No | No | No | No | |
| getEnvironmentServicesByEnvironmentId | environment | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| addGroup | group | add | | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| updateGroup | group | update | groupID | Yes | Yes | Yes | Yes | No | No | No | |
| deleteGroup | group | delete | groupID | Yes | Yes | Yes | Yes | No | No | No | |
-| deleteAllGroups | group | deleteAll | | Yes | | | | | | | |
+| deleteAllGroups | group | deleteAll | | Yes | No | No | No | No | No | No | |
| addUserToGroup | group | addUser | groupID | Yes | Yes | Yes | Yes | No | No | No | |
| removeUserFromGroup | group | removeUser | groupID | Yes | Yes | Yes | Yes | No | No | No | |
-| addNotificationSlack | notification | add | | Yes | Yes | | | | | | |
-| updateNotificationSlack | notification | update | | Yes | Yes | | | | | | |
-| deleteNotificationSlack | notification | delete | | Yes | Yes | | | | | | |
-| deleteAllNotificationSlacks | notification | deleteAll | | Yes | | | | | | | |
-| addNotificationRocketChat | notification | add | | Yes | Yes | | | | | | |
-| updateNotificationRocketChat | notification | update | | Yes | Yes | | | | | | |
-| deleteNotificationRocketChat | notification | delete | | Yes | Yes | | | | | | |
-| deleteAllNotificationRocketChats | notification | deleteAll | | Yes | | | | | | | |
-| removeAllNotificationsFromAllProjects | notification | removeAll | | Yes | | | | | | | |
+| addNotificationSlack | notification | add | | Yes | Yes | No | No | No | No | No | |
+| updateNotificationSlack | notification | update | | Yes | Yes | No | No | No | No | No | |
+| deleteNotificationSlack | notification | delete | | Yes | Yes | No | No | No | No | No | |
+| deleteAllNotificationSlacks | notification | deleteAll | | Yes | No | No | No | No | No | No | |
+| addNotificationRocketChat | notification | add | | Yes | Yes | No | No | No | No | No | |
+| updateNotificationRocketChat | notification | update | | Yes | Yes | No | No | No | No | No | |
+| deleteNotificationRocketChat | notification | delete | | Yes | Yes | No | No | No | No | No | |
+| deleteAllNotificationRocketChats | notification | deleteAll | | Yes | No | No | No | No | No | No | |
+| removeAllNotificationsFromAllProjects | notification | removeAll | | Yes | No | No | No | No | No | No | |
| getNotificationsByProjectId | notification | view | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
-| addOpenshift | openshift | add | | Yes | Yes | | | | | | |
-| updateOpenshift | openshift | update | | Yes | Yes | | | | | | |
-| deleteOpenshift | openshift | delete | | Yes | Yes | | | | | | |
-| deleteAllOpenshifts | openshift | deleteAll | | Yes | Yes | | | | | | |
-| getAllOpenshifts | openshift | viewAll | | Yes | | | | | | | |
+| addOpenshift | openshift | add | | Yes | Yes | No | No | No | No | No | |
+| updateOpenshift | openshift | update | | Yes | Yes | No | No | No | No | No | |
+| deleteOpenshift | openshift | delete | | Yes | Yes | No | No | No | No | No | |
+| deleteAllOpenshifts | openshift | deleteAll | | Yes | Yes | No | No | No | No | No | |
+| getAllOpenshifts | openshift | viewAll | | Yes | No | No | No | No | No | No | |
| getOpenshiftByProjectId | openshift | view | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| addNotificationToProject | project | addNotification | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| removeNotificationFromProject | project | removeNotification | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| addProject | project | add | | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| updateProject | project | update | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| deleteProject | project | delete | projectID | Yes | Yes | Yes | No | No | No | No | |
-| deleteAllProjects | project | deleteAll | | Yes | | | | | | | |
+| deleteAllProjects | project | deleteAll | | Yes | No | No | No | No | No | No | |
| addGroupsToProject | project | addGroup | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| removeGroupsFromProject | project | removeGroup | projectID | Yes | Yes | Yes | Yes | No | No | No | |
-| getAllProjects | project | viewAll | | Yes | Yes | | | | | | |
+| getAllProjects | project | viewAll | | Yes | Yes | No | No | No | No | No | |
| getProjectByEnvironmentId | project | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| getProjectByGitUrl | project | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| getProjectByName | project | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| addRestore | restore | add | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| updateRestore | restore | update | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
-| addSshKey | ssh\_key | add | userId | Yes | Yes | | | | | | Yes |
-| updateSshKey | ssh\_key | update | userId | Yes | Yes | | | | | | Yes |
-| deleteSshKey | ssh\_key | delete | userId | Yes | Yes | | | | | | Yes |
-| deleteAllSshKeys | ssh\_key | deleteAll | | Yes | | | | | | | |
-| removeAllSshKeysFromAllUsers | ssh\_key | removeAll | | Yes | | | | | | | |
-| getUserSshKeys | ssh\_key | view:user | userID | Yes | Yes | | | | | | Yes |
+| addSshKey | ssh\_key | add | userId | Yes | Yes | No | No | No | No | No | Yes |
+| updateSshKey | ssh\_key | update | userId | Yes | Yes | No | No | No | No | No | Yes |
+| deleteSshKey | ssh\_key | delete | userId | Yes | Yes | No | No | No | No | No | Yes |
+| deleteAllSshKeys | ssh\_key | deleteAll | | Yes | No | No | No | No | No | No | No |
+| removeAllSshKeysFromAllUsers | ssh\_key | removeAll | | Yes | No | No | No | No | No | No | No |
+| getUserSshKeys | ssh\_key | view:user | userID | Yes | Yes | No | No | No | No | No | Yes |
| addTask | task | add:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
| addTask | task | add:production | projectID | Yes | Yes | Yes | Yes | No | No | No | |
| taskDrushArchiveDump | task | drushArchiveDump:development | projectID | Yes | Yes | Yes | Yes | Yes | No | No | |
@@ -160,9 +160,9 @@ Here is a table that lists the roles and the access they have:
| getTaskByRemoteId | task | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| getTaskById | task | view | projectID | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
| addUser | user | add | | Yes | Yes | Yes | Yes | Yes | Yes | Yes | |
-| updateUser | user | update | userId | Yes | Yes | | | | | | Yes |
-| deleteUser | user | delete | userId | Yes | Yes | | | | | | Yes |
-| deleteAllUsers | user | deleteAll | | Yes | | | | | | | |
+| updateUser | user | update | userId | Yes | Yes | No | No | No | No | No | Yes |
+| deleteUser | user | delete | userId | Yes | Yes | No | No | No | No | No | Yes |
+| deleteAllUsers | user | deleteAll | | Yes | No | No | No | No | No | No | |
| getProjectByEnvironmentId | project | viewPrivateKey | projectID | Yes | Yes | Yes | No | No | No | No | |
| getProjectByGitUrl | project | viewPrivateKey | projectID | Yes | Yes | Yes | No | No | No | No | |
| getProjectByName | project | viewPrivateKey | projectID | Yes | Yes | Yes | No | No | No | No | |
diff --git a/docs/administering-lagoon/using_harbor/README.md b/docs/administering-lagoon/using_harbor/README.md
index 4cc90cb826..cf1817e213 100644
--- a/docs/administering-lagoon/using_harbor/README.md
+++ b/docs/administering-lagoon/using_harbor/README.md
@@ -14,13 +14,13 @@ If you are hosting a site with amazee.io, we do not allow customer access to the
Once logged in, the first screen is a list of all repositories your user has access to. Each "repository" in Harbor correlates to a project in Lagoon.
-![Harbor Projects Overview](../../.gitbook/assets/projects_overview%20%281%29.png)
+![Harbor Projects Overview](../../.gitbook/assets/projects_overview.png)
Within each Harbor repository, you'll see a list of container images from all environments with a single Lagoon project.
-![Harbor Repositories Overview](../../.gitbook/assets/repositories_overview%20%281%29.png)
+![Harbor Repositories Overview](../../.gitbook/assets/repositories_overview.png)
From here, you can drill down into an individual container in order to see its details, including an overview of its security scan results.
-![Harbor Container Overview](../../.gitbook/assets/container_overview%20%281%29.png)
+![Harbor Container Overview](../../.gitbook/assets/container_overview.png)
diff --git a/docs/administering-lagoon/using_harbor/harbor-settings/README.md b/docs/administering-lagoon/using_harbor/harbor-settings/README.md
index 031d5a3470..2bb666faf8 100644
--- a/docs/administering-lagoon/using_harbor/harbor-settings/README.md
+++ b/docs/administering-lagoon/using_harbor/harbor-settings/README.md
@@ -2,15 +2,11 @@
There are a variety of settings you can configure for Harbor's services. Each has its own YAML file.
-[HarborClair]()
-
-[HarborClairAdapter]()
-
[HarborRegistry](harborregistry.md)
[HarborRegistryCtl](harborregistryctl.md)
-[Harbor-Core]()
+[Harbor-Core](harbor-core.md)
[Harbor-Database](harbor-database.md)
diff --git a/docs/administering-lagoon/using_harbor/security_scanning.md b/docs/administering-lagoon/using_harbor/security_scanning.md
index 862afeefe8..f8fec0b4a5 100644
--- a/docs/administering-lagoon/using_harbor/security_scanning.md
+++ b/docs/administering-lagoon/using_harbor/security_scanning.md
@@ -4,5 +4,5 @@ Harbor comes with a built-in security scanning solution provided by the Trivy se
An example of a security scan in Harbor, showing applicable vulnerabilities for a scanned container:
-![Harbor Security Scanning Example Image](../../.gitbook/assets/scanning_image_1%20%282%29%20%281%29.png)
+![Harbor Security Scanning Example Image](../../.gitbook/assets/scanning_image_1%20%281%29.png)
diff --git a/docs/contributing-to-lagoon/developing-lagoon.md b/docs/contributing-to-lagoon/developing-lagoon.md
index f4ac1e79d1..16bb8f1886 100644
--- a/docs/contributing-to-lagoon/developing-lagoon.md
+++ b/docs/contributing-to-lagoon/developing-lagoon.md
@@ -85,7 +85,7 @@ The API uses a [Puppet](https://puppet.com/docs/puppet/latest/puppet_index.html)
## Troubleshooting
-\*\*\*\*⚠ **I can't build a docker image for any Node.js based service**
+⚠ **I can't build a docker image for any Node.js based service**
Rebuild the images via
@@ -94,17 +94,17 @@ make clean
make build
```
-\*\*\*\*⚠ **I get errors about missing node\_modules content when I try to build / run a Node.js based image**
+⚠ **I get errors about missing node\_modules content when I try to build / run a Node.js based image**
Make sure to run `yarn` in Lagoon's root directory, since some services have common dependencies managed by `yarn` workspaces.
⚠ **My builds can't resolve domains**
-Some Internet Service Providers \(ISPs\) set up a "search domain" to catch domain name errors. Virtualbox will copy this setting into minishift, which can cause domain resolution errors in the OpenShift pods. To check for this problem, look at the `/etc/resolv.conf` in your failing pod and check for errant search domains.
+Some Internet Service Providers \(ISPs\) set up a "search domain" to catch domain name errors. VirtualBox will copy this setting into MiniShift, which can cause domain resolution errors in the OpenShift pods. To check for this problem, look at the `/etc/resolv.conf` in your failing pod and check for errant search domains.
To fix, you must remove the extra search domain.
-* Log in to the minishift vm: `minishift ssh`.
+* Log in to the MiniShift vm: `minishift ssh`.
* Remove the setting from `/etc/resolv.conf`.
* Restart openshift docker: `sudo docker restart origin`.
* Redeploy `docker-host` in the `lagoon` project.
diff --git a/docs/administering-lagoon/install-local-kubernetes-cluster-for-lagoon.md b/docs/contributing-to-lagoon/install-local-kubernetes-cluster-for-lagoon.md
similarity index 97%
rename from docs/administering-lagoon/install-local-kubernetes-cluster-for-lagoon.md
rename to docs/contributing-to-lagoon/install-local-kubernetes-cluster-for-lagoon.md
index 0201364c7a..e160b9544e 100644
--- a/docs/administering-lagoon/install-local-kubernetes-cluster-for-lagoon.md
+++ b/docs/contributing-to-lagoon/install-local-kubernetes-cluster-for-lagoon.md
@@ -4,7 +4,7 @@ description: >-
Kubernetes is only supported to deploy projects and environments into.
---
-# Install local Kubernetes cluster for Lagoon
+# Install Local Kubernetes Cluster for Lagoon
Let's see how to install a local lightweight Kubernetes cluster using k3s by Rancher: [rancher/k3s](https://github.com/rancher/k3s)
@@ -116,11 +116,13 @@ In order to have the best experience we recommend the following:
## Deploy Lagoon on Kubernetes
-1. TODO
+{% hint style="info" %}
+Coming Soon!
+{% endhint %}
## Configure Installed Lagoon
-We have a fully running Kubernetes cluster. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](graphql-queries.md).
+We have a fully running Kubernetes cluster. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](../administering-lagoon/graphql-queries.md).
## Clean up
diff --git a/docs/administering-lagoon/install.md b/docs/contributing-to-lagoon/install/README.md
similarity index 96%
rename from docs/administering-lagoon/install.md
rename to docs/contributing-to-lagoon/install/README.md
index 4802164d7a..13528283b9 100644
--- a/docs/administering-lagoon/install.md
+++ b/docs/contributing-to-lagoon/install/README.md
@@ -37,7 +37,7 @@ Technically, Lagoon can use any Service Account and also needs no admin permissi
In this example we create the Service Account `lagoon` in the OpenShift Project `default`.
-1. Make sure you have the `oc cli` tools already installed. If not, please see [here](https://docs.openshift.org/latest/cli_reference/get_started_cli.html#cli-reference-get-started-cli).
+1. Make sure you have the `oc cli` tools already installed. If not, please see documentation [here](https://docs.openshift.org/latest/cli_reference/get_started_cli.html#cli-reference-get-started-cli).
2. Log into OpenShift as an admin:
```text
@@ -92,5 +92,5 @@ Once Lagoon is install operational, you need to initialize OpendistroSecurity to
### Configure Installed Lagoon
-We have a fully running Lagoon. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](graphql-queries.md).
+We have a fully running Lagoon. Now it's time to configure the first project inside of it. Follow the examples in [GraphQL API](../../administering-lagoon/graphql-queries.md).
diff --git a/docs/administering-lagoon/openshift_requirements.md b/docs/contributing-to-lagoon/install/openshift_requirements.md
similarity index 100%
rename from docs/administering-lagoon/openshift_requirements.md
rename to docs/contributing-to-lagoon/install/openshift_requirements.md
diff --git a/docs/docker-images/elasticsearch.md b/docs/docker-images/elasticsearch.md
index ecee7db0df..016f6b3231 100644
--- a/docs/docker-images/elasticsearch.md
+++ b/docs/docker-images/elasticsearch.md
@@ -6,8 +6,8 @@
## Supported versions
-* 6.8 [\[Dockerfile\]](https://github.com/amazeeio/lagoon/blob/master/images/elasticsearch/Dockerfile6)
-* 7.6 [\[Dockerfile\]](https://github.com/amazeeio/lagoon/blob/master/images/elasticsearch/Dockerfile7)
+* 6 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/elasticsearch/6.Dockerfile)
+* 7 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/elasticsearch/7.Dockerfile)
## Known issues
diff --git a/docs/docker-images/mariadb/README.md b/docs/docker-images/mariadb/README.md
index 7c30b54f0b..36c2c646c7 100644
--- a/docs/docker-images/mariadb/README.md
+++ b/docs/docker-images/mariadb/README.md
@@ -2,7 +2,7 @@
MariaDB is the open source successor to MySQL.
-The [Lagoon `MariaDB` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mariadb/Dockerfile). Based on the official packages `mariadb` and `mariadb-client` provided by the `alpine:3.8` image.
+The [Lagoon `MariaDB` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb/Dockerfile). Based on the official packages `mariadb` and `mariadb-client` provided by the `alpine:3.8` image.
This Dockerfile is intended to be used to set up a standalone MariaDB database server.
@@ -16,7 +16,7 @@ This image is prepared to be used on Lagoon. There are therefore some things alr
## Included tools
* [`mysqltuner.pl`](https://github.com/major/MySQLTuner-perl) - Perl script useful for database parameter tuning.
-* [`mysql-backup.sh`](https://github.com/amazeeio/lagoon/blob/master/images/mariadb/mysql-backup.sh) - Script for automating the daily MySQL backups on development environment.
+* [`mysql-backup.sh`](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb/mysql-backup.sh) - Script for automating the daily MySQL backups on development environment.
* [`pwgen`](https://linux.die.net/man/1/pwgen) - Utility to generate random and complex passwords.
## Included `my.cnf` configuration file
diff --git a/docs/docker-images/mariadb/mariadb-drupal.md b/docs/docker-images/mariadb/mariadb-drupal.md
index ca0ef9bc62..1681bf7644 100644
--- a/docs/docker-images/mariadb/mariadb-drupal.md
+++ b/docs/docker-images/mariadb/mariadb-drupal.md
@@ -2,7 +2,7 @@
MariaDB is the open source successor to MySQL.
-The [Lagoon `mariadb-drupal` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/mariadb-drupal/Dockerfile) is a customized [`mariadb` image](./) to use within Drupal projects in Lagoon. It differs from the `mariadb` image only for initial database setup, made by some environment variables:
+The Lagoon `mariadb-drupal` Docker image [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb-drupal/Dockerfile) is a customized [`mariadb` image](./) to use within Drupal projects in Lagoon. It differs from the `mariadb` image only for initial database setup, made by some environment variables:
| Environment Variable | Default | Description |
| :--- | :--- | :--- |
diff --git a/docs/docker-images/mongodb.md b/docs/docker-images/mongodb.md
index 4e2aa3089a..72211fce50 100644
--- a/docs/docker-images/mongodb.md
+++ b/docs/docker-images/mongodb.md
@@ -4,7 +4,7 @@
>
> * from [mongodb.com](https://www.mongodb.com/)
-[Lagoon `MongoDB` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mongo/Dockerfile). Based on the official package `mongodb` provided by the `alpine:3.8` image.
+[Lagoon `MongoDB` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/mongo/Dockerfile). Based on the official package `mongodb` provided by the `alpine:3.8` image.
This Dockerfile is intended to be used to set up a standalone MongoDB database server.
diff --git a/docs/docker-images/nginx/README.md b/docs/docker-images/nginx/README.md
index 8681fbc81f..79d01e5118 100644
--- a/docs/docker-images/nginx/README.md
+++ b/docs/docker-images/nginx/README.md
@@ -1,6 +1,6 @@
# NGINX
-The [Lagoon `nginx` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx/Dockerfile). Based on the official [`openresty/openresty` images](https://hub.docker.com/r/openresty/openresty/).
+The [Lagoon `nginx` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/nginx/Dockerfile). Based on the official [`openresty/openresty` images](https://hub.docker.com/r/openresty/openresty/).
This Dockerfile is intended to be used as a base for any web servers within Lagoon.
@@ -23,8 +23,6 @@ Build the content during the build process and inject it into the `nginx` contai
## Helpers
-{% embed url="https://www.youtube.com/watch?v=xQ7A-e8UPzY" caption="How do I add a redirect in Lagoon?" %}
-
### `redirects-map.conf`
In order to create redirects, we have `redirects-map.conf` in place. This helps you to redirect marketing domains to sub-sites or do non-www to www redirects. **If you have a lot of redirects, we suggest having `redirects-map.conf` stored next to your code for easier maintainability.**
@@ -39,7 +37,7 @@ Here's an example showing how to redirect `www.example.com` to `example.com` and
RUN echo "~^www.example.com http://example.com\$request_uri;" >> /etc/nginx/redirects-map.conf
```
-To get more details about the various types of redirects that can be achieved, see the documentation within the [`redirects-map.conf`](https://github.com/amazeeio/lagoon/blob/master/images/nginx/redirects-map.conf) directly.
+To get more details about the various types of redirects that can be achieved, see the documentation within the [`redirects-map.conf`](https://github.com/uselagoon/lagoon-images/blob/main/images/nginx/redirects-map.conf) directly.
After you put the `redirects-map.conf` in place, you also need to include it in your `nginx.dockerfile` in order to get the configuration file into your build.
diff --git a/docs/docker-images/nginx/nginx-drupal.md b/docs/docker-images/nginx/nginx-drupal.md
index b3e32830f4..36fbd46b32 100644
--- a/docs/docker-images/nginx/nginx-drupal.md
+++ b/docs/docker-images/nginx/nginx-drupal.md
@@ -1,6 +1,6 @@
# NGINX-Drupal
-The [Lagoon `nginx-drupal` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/nginx-drupal/Dockerfile). Optimized to work with Drupal. Based on [Lagoon `nginx` image](./).
+The [Lagoon `nginx-drupal` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/nginx-drupal/Dockerfile). Optimized to work with Drupal. Based on [Lagoon `nginx` image](./).
## Lagoon adaptions
@@ -8,7 +8,7 @@ This image is prepared to be used on Lagoon. There are therefore some things alr
* Folder permissions are automatically adapted with [`fix-permissions`](https://github.com/sclorg/s2i-base-container/blob/master/core/root/usr/bin/fix-permissions), so this image will work with a random user.
* To keep `drupal.conf` 's configuration file as clean and customizable as possible, we added `include` directives in the main sections of the file:`server`, `location /`, `location @drupal` and `location @php`.
-* Further information in the section [Drupal.conf customization](nginx-drupal.md#drupal-conf-customization).
+* Further information in the section [`Drupal.conf` customization](nginx-drupal.md#drupal-conf-customization).
## Included Drupal configuration \(`drupal.conf`\)
diff --git a/docs/docker-images/php-cli/README.md b/docs/docker-images/php-cli/README.md
index f8ecdd0656..3355301b4d 100644
--- a/docs/docker-images/php-cli/README.md
+++ b/docs/docker-images/php-cli/README.md
@@ -1,6 +1,6 @@
# PHP-CLI
-The [Lagoon `php-cli` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/php/cli/Dockerfile). Based on [Lagoon `php-fpm` image](../php-fpm.md), it has all the needed command line tools for daily operations.
+The [Lagoon `php-cli` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli). Based on [Lagoon `php-fpm` image](../php-fpm.md), it has all the needed command line tools for daily operations.
Containers \(or pods\) started from `cli` images are responsible for building code for Composer or Node.js based projects.
@@ -10,6 +10,17 @@ The image also contains database `cli`s for both MariaDB and PostgreSQL.
This Dockerfile is intended to be used as a base for any `cli` needs within Lagoon.
{% endhint %}
+## Supported versions
+
+* 5.6 \(available for compatibility, no longer officially supported\)
+* 7.0 \(available for compatibility, no longer officially supported\)
+* 7.2 \(available for compatibility, no longer officially supported\)
+* 7.3 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli/7.3.Dockerfile)
+* 7.4 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli/7.4.Dockerfile)
+* 8.0 \(coming soon\)
+
+All PHP versions use their own Dockerfiles.
+
## Lagoon adaptions
This image is prepared to be used on Lagoon. There are therefore some things already done:
diff --git a/docs/docker-images/php-cli/php-cli-drupal.md b/docs/docker-images/php-cli/php-cli-drupal.md
index 062f21ab89..71a5a3d50b 100644
--- a/docs/docker-images/php-cli/php-cli-drupal.md
+++ b/docs/docker-images/php-cli/php-cli-drupal.md
@@ -1,11 +1,22 @@
# PHP-CLI-Drupal
-The [Lagoon `php-cli-drupal` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/php/cli-drupal/Dockerfile) is optimized to work with Drupal. It is based on the [Lagoon `php-cli` image](./), and has all the command line tools needed for the daily maintenance of a Drupal website:
+The [Lagoon `php-cli-drupal` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli-drupal) is optimized to work with Drupal. It is based on the [Lagoon `php-cli` image](./), and has all the command line tools needed for the daily maintenance of a Drupal website:
* `drush`
* `drupal console`
* `drush launcher` \(which will fallback to Drush 8 if there is no site installed Drush found\)
+## Supported versions
+
+* 5.6 \(available for compatibility, no longer officially supported\)
+* 7.0 \(available for compatibility, no longer officially supported\)
+* 7.2 \(available for compatibility, no longer officially supported\)
+* 7.3 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli-drupal/7.3.Dockerfile)
+* 7.4 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli-drupal/7.4.Dockerfile)
+* 8.0 \(coming soon\)
+
+All PHP versions use their own Dockerfiles.
+
## Lagoon adaptions
This image is prepared to be used on Lagoon. There are therefore some things already done:
diff --git a/docs/docker-images/php-fpm.md b/docs/docker-images/php-fpm.md
index d52ef372e7..b92671582c 100644
--- a/docs/docker-images/php-fpm.md
+++ b/docs/docker-images/php-fpm.md
@@ -1,6 +1,6 @@
# PHP-FPM
-The [Lagoon `php-fpm` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/Dockerfile). Based on [the official PHP Alpine images](https://hub.docker.com/_/php/).
+The [Lagoon `php-fpm` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm). Based on [the official PHP Alpine images](https://hub.docker.com/_/php/).
> _PHP-FPM \(FastCGI Process Manager\) is an alternative PHP FastCGI implementation with some additional features useful for sites of any size, especially busier sites._
>
@@ -12,27 +12,28 @@ The [Lagoon `php-fpm` Docker image](https://github.com/amazeeio/lagoon/blob/mast
This Dockerfile is intended to be used as a base for any `PHP` needs within Lagoon. This image itself does not create a web server, rather a `php-fpm` fastcgi listener. You may need to adapt the `php-fpm` pool config.
{% endhint %}
-## Supported Versions
+## Supported versions
-* 5.6 \(available for compatibility, not officially supported\)
-* 7.0 \(available for compatibility, not officially supported\)
-* 7.2 \(available for compatibility, not officially supported - End of Support by 30 Nov 2020 \)
-* 7.3
-* 7.4
+* 5.6 \(available for compatibility, no longer officially supported\)
+* 7.0 \(available for compatibility, no longer officially supported\)
+* 7.2 \(available for compatibility, no longer officially supported\)
+* 7.3 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm/7.3.Dockerfile)
+* 7.4 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm/7.4.Dockerfile)
+* 8.0 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm/8.0.Dockerfile)
+
+All PHP versions use their own Dockerfiles.
{% hint style="info" %}
We stop updating End of Life (EOL) PHP images usually with the Lagoon release that comes after the officially communicated EOL date: https://www.php.net/supported-versions.php.
{% endhint %}
-All PHP versions use the same Dockerfile.
-
## Lagoon adaptions
This image is prepared to be used on Lagoon. There are therefore some things are already done:
* Folder permissions are automatically adapted with [`fix-permissions`](https://github.com/sclorg/s2i-base-container/blob/master/core/root/usr/bin/fix-permissions), so this image will work with a random user.
* The `/usr/local/etc/php/php.ini` and `/usr/local/etc/php-fpm.conf`, plus all files within `/usr/local/etc/php-fpm.d/` , are parsed through [`envplate`](https://github.com/kreuzwerker/envplate) with a container-entrypoint.
-* See the [Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/Dockerfile) for installed `PHP` extensions.
+* See the [Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm/7.4.Dockerfile) for installed `PHP` extensions.
* To install further extensions, extend your Dockerfile from this image. Install extensions according to the docs, under the heading[ How to install more PHP extensions.](https://github.com/docker-library/docs/blob/master/php/README.md#how-to-install-more-php-extensions)
## Included PHP config.
@@ -68,7 +69,7 @@ Also, `php-fpm` error logging happens in `stderr`.
## default fpm-pool
-This image is shipped with an `fpm-pool` config \([`php-fpm.d/www.conf`](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/php-fpm.d/www.conf)\) that creates an `fpm-pool` and listens on port 9000. This is because we try to provide an image which already covers most needs for PHP, so you don't need to create your own. You are welcome to do so if you like, though!
+This image is shipped with an `fpm-pool` config \([`php-fpm.d/www.conf`](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm/php-fpm.d/www.conf)\) that creates an `fpm-pool` and listens on port 9000. This is because we try to provide an image which already covers most needs for PHP, so you don't need to create your own. You are welcome to do so if you like, though!
Here a short description of what this file does:
diff --git a/docs/docker-images/postgres.md b/docs/docker-images/postgres.md
index ee41c6158c..1eca054101 100644
--- a/docs/docker-images/postgres.md
+++ b/docs/docker-images/postgres.md
@@ -1,10 +1,11 @@
# PostgreSQL
-The [Lagoon PostgreSQL Docker image](https://github.com/amazeeio/lagoon/blob/master/images/postgres/Dockerfile). Based on [the official PostgreSQL Alpine images](https://hub.docker.com/_/postgres).
+The [Lagoon PostgreSQL Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/postgres). Based on [the official PostgreSQL Alpine images](https://hub.docker.com/_/postgres).
## Supported versions
-* 11.6 [\[Dockerfile\]](https://github.com/amazeeio/lagoon/blob/master/images/postgres/Dockerfile)
+* 11 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/postgres/11.Dockerfile)
+* 12 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/postgres/12.Dockerfile)
## Tips & Tricks
diff --git a/docs/docker-images/rabbitmq.md b/docs/docker-images/rabbitmq.md
index 7bf245b9ba..67d3eba740 100644
--- a/docs/docker-images/rabbitmq.md
+++ b/docs/docker-images/rabbitmq.md
@@ -1,6 +1,6 @@
# RabbitMQ
-The [Lagoon RabbitMQ Dockerfile](https://github.com/amazeeio/lagoon/tree/master/images/rabbitmq) with management plugin installed. Based on the official `rabbitmq:3-management` image at [docker-hub](https://hub.docker.com/_/rabbitmq).
+The [Lagoon RabbitMQ Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/rabbitmq) with management plugin installed. Based on the official `rabbitmq:3-management` image at [docker-hub](https://hub.docker.com/_/rabbitmq).
This Dockerfile is intended to be used to set up a standalone RabbitMQ queue broker, as well as a base image to set up a cluster with high availability queue support by default \([Mirrored queues](https://www.rabbitmq.com/ha.html)\).
diff --git a/docs/docker-images/redis/README.md b/docs/docker-images/redis/README.md
index e75e85ab94..9d531e18a4 100644
--- a/docs/docker-images/redis/README.md
+++ b/docs/docker-images/redis/README.md
@@ -1,12 +1,13 @@
# Redis
-[Lagoon `Redis` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/redis/Dockerfile), based on [offical `redis:alpine` image](https://hub.docker.com/_/redis/).
+[Lagoon `Redis` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/redis), based on [offical `redis:alpine` image](https://hub.docker.com/_/redis/).
This Dockerfile is intended to be used to set up a standalone Redis _ephemeral_ server by default.
-## Version
+## Supported versions
-Currently supports alpine version 5.x.
+* 5 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/redis/5.Dockerfile)
+* 6 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/redis/6.Dockerfile)
## Lagoon adaptions
diff --git a/docs/docker-images/redis/redis-persistent.md b/docs/docker-images/redis/redis-persistent.md
index b1df0e0736..87ad6243b2 100644
--- a/docs/docker-images/redis/redis-persistent.md
+++ b/docs/docker-images/redis/redis-persistent.md
@@ -1,6 +1,6 @@
# Redis-persistent
-The [Lagoon `redis-persistent` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/redis-persistent/Dockerfile). Based on the [Lagoon `redis` image](./), it is intended for use if the Redis service must be in `persistent` mode \(ie. with a persistent volume where transactions will be saved\).
+The [Lagoon `redis-persistent` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/redis-persistent/5.Dockerfile). Based on the [Lagoon `redis` image](./), it is intended for use if the Redis service must be in `persistent` mode \(ie. with a persistent volume where transactions will be saved\).
It differs from `redis` only for `FLAVOR` environment variable.
diff --git a/docs/docker-images/solr/README.md b/docs/docker-images/solr/README.md
index c34c31e5ab..40e24bf503 100644
--- a/docs/docker-images/solr/README.md
+++ b/docs/docker-images/solr/README.md
@@ -1,14 +1,14 @@
# Solr
-The [Lagoon `Solr` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/solr/Dockerfile). Based on the official [`solr:-alpine` images](https://hub.docker.com/_/solr).
+The [Lagoon `Solr` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/solr/7.7.Dockerfile). Based on the official [`solr:-alpine` images](https://hub.docker.com/_/solr).
This Dockerfile is intended to be used to set up a standalone Solr server with an initial core `mycore`.
## Supported Versions
-* 5.5
-* 6.6
-* 7.7
+* 5.5 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr/5.5.Dockerfile)
+* 6.6 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr/6.6.Dockerfile)
+* 7.7 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr/7.7.Dockerfile)
## Lagoon adaptions
diff --git a/docs/docker-images/solr/solr-drupal.md b/docs/docker-images/solr/solr-drupal.md
index 78f8b0e1ee..56c37808b4 100644
--- a/docs/docker-images/solr/solr-drupal.md
+++ b/docs/docker-images/solr/solr-drupal.md
@@ -1,6 +1,6 @@
# Solr-Drupal
-The [Lagoon `solr-drupal` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile), is a customized[`Solr` image](./) to use within Drupal projects in Lagoon.
+The [Lagoon `solr-drupal` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal/7.7.Dockerfile), is a customized[`Solr` image](./) to use within Drupal projects in Lagoon.
The initial core created is `Drupal` , and it is created and configured starting from a Drupal customized and optimized configuration, copied from the [search\_api\_solr](https://www.drupal.org/project/search_api_solr) Drupal module.
@@ -8,9 +8,9 @@ The [documentation](./#lagoon-and-openshift-adaptions) outlines how to provide y
For each Solr version, there is a specific `solr-drupal:` Docker image.
-## Supported versions
+## Supported Versions
-* 5.5
-* 6.6
-* 7.7
+* 5.5 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal/5.5.Dockerfile)
+* 6.6 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal/6.6.Dockerfile)
+* 7.7 [\[Dockerfile\]](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal/7.7.Dockerfile)
diff --git a/docs/docker-images/varnish/README.md b/docs/docker-images/varnish/README.md
index b239954767..4cc0b80c12 100644
--- a/docs/docker-images/varnish/README.md
+++ b/docs/docker-images/varnish/README.md
@@ -1,6 +1,6 @@
# Varnish
-The [Lagoon `Varnish` image Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/varnish/Dockerfile). Based on the [official `varnish` package](https://hub.docker.com/_/varnish) provided by `alpine:3.7` image.
+The [Lagoon `Varnish` image Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish/Dockerfile). Based on the [official `varnish` package](https://hub.docker.com/_/varnish) provided by `alpine:3.7` image.
By default, `vmod-dynamic` and `vmod-bodyaccess` modules are installed.
diff --git a/docs/docker-images/varnish/varnish-drupal.md b/docs/docker-images/varnish/varnish-drupal.md
index 9ec92724fd..4bb30bd33b 100644
--- a/docs/docker-images/varnish/varnish-drupal.md
+++ b/docs/docker-images/varnish/varnish-drupal.md
@@ -1,6 +1,6 @@
# Varnish-Drupal
-The [Lagoon `varnish-drupal` Docker image](https://github.com/amazeeio/lagoon/blob/master/images/varnish-drupal/Dockerfile). It is a customized [`varnish` image](./) to use within Drupal projects in Lagoon.
+The [Lagoon `varnish-drupal` Docker image](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish-drupal/Dockerfile). It is a customized [`varnish` image](./) to use within Drupal projects in Lagoon.
It differs from `varnish` only for `default.vcl` file, optimized for Drupal on Lagoon.
diff --git a/docs/docs/.gitbook/assets/0.gif b/docs/docs/.gitbook/assets/0.gif
deleted file mode 100644
index b93949847f..0000000000
Binary files a/docs/docs/.gitbook/assets/0.gif and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/2.gif b/docs/docs/.gitbook/assets/2.gif
deleted file mode 100644
index 73113734a4..0000000000
Binary files a/docs/docs/.gitbook/assets/2.gif and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/bb_webhook_1.png b/docs/docs/.gitbook/assets/bb_webhook_1.png
deleted file mode 100644
index 23dea3aded..0000000000
Binary files a/docs/docs/.gitbook/assets/bb_webhook_1.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/first_deployment_slack_2nd_success.jpg b/docs/docs/.gitbook/assets/first_deployment_slack_2nd_success.jpg
deleted file mode 100644
index e3d69f28c4..0000000000
Binary files a/docs/docs/.gitbook/assets/first_deployment_slack_2nd_success.jpg and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/gh_webhook_1.png b/docs/docs/.gitbook/assets/gh_webhook_1.png
deleted file mode 100644
index b20b4c985b..0000000000
Binary files a/docs/docs/.gitbook/assets/gh_webhook_1.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/gh_webhook_2.png b/docs/docs/.gitbook/assets/gh_webhook_2.png
deleted file mode 100644
index 6a0c5b672b..0000000000
Binary files a/docs/docs/.gitbook/assets/gh_webhook_2.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/gitlab_webhook.png b/docs/docs/.gitbook/assets/gitlab_webhook.png
deleted file mode 100644
index c7162d204b..0000000000
Binary files a/docs/docs/.gitbook/assets/gitlab_webhook.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-05-54.png b/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-05-54.png
deleted file mode 100644
index 60912674a3..0000000000
Binary files a/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-05-54.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-07-28.png b/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-07-28.png
deleted file mode 100644
index 891ac952f3..0000000000
Binary files a/docs/docs/.gitbook/assets/graphiql-2020-01-29-18-07-28.png and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/step2_require.gif b/docs/docs/.gitbook/assets/step2_require.gif
deleted file mode 100644
index 285ab2a145..0000000000
Binary files a/docs/docs/.gitbook/assets/step2_require.gif and /dev/null differ
diff --git a/docs/docs/.gitbook/assets/webhooks-2020-01-23-12-40-16.png b/docs/docs/.gitbook/assets/webhooks-2020-01-23-12-40-16.png
deleted file mode 100644
index 982e3d2345..0000000000
Binary files a/docs/docs/.gitbook/assets/webhooks-2020-01-23-12-40-16.png and /dev/null differ
diff --git a/docs/drupal/drush-9.md b/docs/drupal/drush-9.md
index d5a3199b36..66a73dc776 100644
--- a/docs/drupal/drush-9.md
+++ b/docs/drupal/drush-9.md
@@ -18,9 +18,9 @@ Drush 9 provides a new command, `drush site:alias-convert` , which can convert D
In order to be able to use `drush site:alias-convert` , you need to do the following:
-* Rename the `aliases.drushrc.php` inside the `drush` folder to `lagooncd ...aliases.drushrc.php`.
+* Rename the `aliases.drushrc.php` inside the `drush` folder to `lagoon.aliases.drushrc.php`.
-### Generate Site aliases
+### Generate Site Aliases
You can now convert your Drush aliases by running the following command in your project using the `cli` container:
diff --git a/docs/drupal/first-deployment-of-drupal.md b/docs/drupal/first-deployment-of-drupal.md
index 785b25dff1..05516f65f6 100644
--- a/docs/drupal/first-deployment-of-drupal.md
+++ b/docs/drupal/first-deployment-of-drupal.md
@@ -44,7 +44,7 @@ With full Drush site alias support in Lagoon, you can synchronize a local databa
{% hint style="warning" %}
You may have to tell pygmy about your public keys before the next step.
-If you get an error like `Permission denied (publickey)`, check out the documentation here: [pygmy - adding ssh keys](https://pygmy.readthedocs.io/en/master/usage/#adding-ssh-keys)
+If you get an error like `Permission denied (publickey)`, check out the documentation here: [pygmy - adding ssh keys](https://docs.lagoon.sh/pygmy/ssh-agent)
{% endhint %}
First let's make sure that you can see the Drush site aliases:
diff --git a/docs/drupal/services/README.md b/docs/drupal/services/README.md
index 84ce4cd597..21759d3519 100644
--- a/docs/drupal/services/README.md
+++ b/docs/drupal/services/README.md
@@ -20,8 +20,6 @@ Redis is a fast, open-source, in-memory key-value data store for use as a databa
[Learn about Redis with Drupal.](redis.md)
-[Documentation on the plain Redis image](../../docker-images/varnish/)
-
[Documentation on the Redis-persistent image.](../../docker-images/redis/redis-persistent.md)
{% endtab %}
diff --git a/docs/drupal/services/solr.md b/docs/drupal/services/solr.md
index 1a0d2c1865..44906ff197 100644
--- a/docs/drupal/services/solr.md
+++ b/docs/drupal/services/solr.md
@@ -2,11 +2,11 @@
## Standard use
-For Solr 5.5, 6.6 and 7.7, we ship the default schema files provided by the [search\_api\_solr](https://www.drupal.org/project/search_api_solr) Drupal module. Add the Solr version you would like to use in your `docker-compose.yml` file, following [our example](https://github.com/amazeeio/drupal-example/blob/master/docker-compose.yml#L103-L111).
+For Solr 5.5, 6.6 and 7.7, we ship the default schema files provided by the [search\_api\_solr](https://www.drupal.org/project/search_api_solr) Drupal module. Add the Solr version you would like to use in your `docker-compose.yml` file, following [our example](https://github.com/amazeeio/drupal-example-simple/blob/63b3fc613260d5192b7e2dd0167c6fc85d8d9162/docker-compose.yml#L110).
## Custom schema
-To implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile).
+To implement schema customizations for Solr in your project, look to how Lagoon [creates our standard images](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal/7.7.Dockerfile).
* In the `solr` section of your `docker-compose.yml` file, replace `image: amazeeio/solr:7.7` with:
diff --git a/docs/drupal/services/varnish.md b/docs/drupal/services/varnish.md
index 5bd20c368e..548c3f750e 100644
--- a/docs/drupal/services/varnish.md
+++ b/docs/drupal/services/varnish.md
@@ -1,6 +1,6 @@
# Varnish
-We suggest using Drupal with a Varnish reverse proxy. Lagoon provides a `varnish-drupal` Docker image that has Varnish already configured with a [Drupal Varnish config](https://github.com/amazeeio/lagoon/blob/master/images/varnish-drupal/drupal.vcl).
+We suggest using Drupal with a Varnish reverse proxy. Lagoon provides a `varnish-drupal` Docker image that has Varnish already configured with a [Drupal Varnish config](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish-drupal/drupal.vcl).
This Varnish config does the following:
@@ -8,11 +8,13 @@ This Varnish config does the following:
* It automatically caches any assets \(images, css, js, etc.\) for one month, and also sends this header to the browser, so browser cache the assets as well. This happens for authenticated and non-authenticated requests.
* It has support for `BAN` and `URIBAN` which is used by the Drupal 8 purge module.
* It removes `utm_` and `gclid` from the URL parameter to prevent Google Analytics links from creating multiple cache objects.
-* Many other good things - just check out the [drupal.vcl](https://github.com/amazeeio/lagoon/blob/master/images/varnish-drupal/drupal.vcl).
+* Many other good things - just check out the [drupal.vcl](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish-drupal/drupal.vcl).
## Usage with Drupal 8
-TL;DR: Check out the [Drupal 8 Example](https://github.com/amazeeio/drupal-example), it ships with the needed modules and needed Drupal configuration.
+**TL;DR**: [Check out the drupal8-advanced example in our examples repo](https://github.com/uselagoon/lagoon-examples), it ships with the needed modules and needed Drupal configuration.
+
+**Note**: many of these examples are on the same `drupal-example-simple` repo, but different branches/hashes. Be sure to get the exact branch from the examples list!
### Install Purge and Varnish Purge modules
diff --git a/docs/drupal/step-by-step-getting-drupal-ready-to-run-on-lagoon.md b/docs/drupal/step-by-step-getting-drupal-ready-to-run-on-lagoon.md
index b37c05f1fb..cb59644137 100644
--- a/docs/drupal/step-by-step-getting-drupal-ready-to-run-on-lagoon.md
+++ b/docs/drupal/step-by-step-getting-drupal-ready-to-run-on-lagoon.md
@@ -22,6 +22,10 @@ Drupal is shipped with `sites/*/settings*.php` and `sites/*/services*.yml` in `.
Unfortunately the Drupal community has not decided on a standardized `WEBROOT` folder name. Some projects put Drupal within `web`, and others within `docroot` or somewhere else. The Lagoon Drupal settings files assume that your Drupal is within `web`, but if this is different for your Drupal, please adapt the files accordingly.
+### Note about composer.json
+
+If you installed Drupal via composer, please check your `composer.json` and make sure that the `name` is NOT `drupal/drupal`, as this could confuse Drush and other tools of the Drupal universe, just rename it to something like `myproject/drupal`
+
## 2. Customise `docker-compose.yml`
Don't forget to customize the values in `lagoon-project` & `LAGOON_ROUTE` with your site-specific name & the URL you'd like to access the site with. Here's an example:
@@ -74,7 +78,7 @@ This might sound weird, as there was already a `composer install` executed durin
* In order to be able to edit files on the host and have them immediately available in the container, the default `docker-composer.yml` mounts the whole folder into the the containers \(this happens with `.:/app:delegated` in the volumes section\). This also means that all dependencies installed during the Docker build are overwritten with the files on the host.
* Locally, you probably want dependencies defined as `require-dev` in `composer.json` to exist as well, while on a production deployment they would just use unnecessary space. So we run `composer install --no-dev` in the Dockerfile and `composer install` manually.
-If everything went well, open the `LAGOON_ROUTE` defined in `docker-compose.yml` \(for example [http://drupal.docker.amazee.io](http://drupal.docker.amazee.io)\) and you should be greeted by a nice Drupal error. Don't worry - that's ok right now, most important is that it tries to load a Drupal site.
+If everything went well, open the `LAGOON_ROUTE` defined in `docker-compose.yml` \(for example `http://drupal.docker.amazee.io`\) and you should be greeted by a nice Drupal error. Don't worry - that's ok right now, most important is that it tries to load a Drupal site.
If you get a 500 or similar error, make sure everything loaded properly with Composer.
@@ -114,7 +118,7 @@ Site path : sites/default
{% hint style="warning" %}
You may have to tell pygmy about your public key before the next step.
-If you get an error like `Permission denied (publickey)`, check out the documentation here: [pygmy - adding ssh keys](https://pygmy.readthedocs.io/en/master/usage/#adding-ssh-keys)
+If you get an error like `Permission denied (publickey)`, check out the documentation here: [pygmy - adding ssh keys](https://docs.lagoon.sh/pygmy/ssh-agent)
{% endhint %}
Now it is time to install Drupal \(if instead you would like to import an existing SQL file, please [skip to step 7](step-by-step-getting-drupal-ready-to-run-on-lagoon.md#7-import-existing-database-dump), but we suggest you start with a clean Drupal installation in the beginning to be sure everything works\).
diff --git a/docs/drupal/subfolders.md b/docs/drupal/subfolders.md
index 73e5d80711..f5d74c4666 100644
--- a/docs/drupal/subfolders.md
+++ b/docs/drupal/subfolders.md
@@ -46,6 +46,8 @@ location ~ ^/subfolder {
proxy_pass $subfolder_drupal_host;
proxy_set_header Host $proxy_host;
# $proxy_host will be automatically generated by Nginx based on proxy_pass (it needs to be without scheme and port).
+
+ expires off; # make sure we honor cache headers from the proxy and not overwrite them
```
{% endtab %}
{% endtabs %}
diff --git a/docs/logging/kibana-examples.md b/docs/logging/kibana-examples.md
index 66036b7a5a..d81b4269c5 100644
--- a/docs/logging/kibana-examples.md
+++ b/docs/logging/kibana-examples.md
@@ -99,5 +99,7 @@ Also note that you can save your visualizations \(and searches\)! That will make
## Troubleshooting
-{% embed url="https://www.youtube.com/watch?v=hyUMRlQTXEA" caption="How do I fix an internal server error in Kibana?" %}
+{% embed url="https://www.youtube.com/watch?v=BuQo5J0Qc2c&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=5" %}
+
+
diff --git a/docs/resources/faq.md b/docs/resources/faq.md
index b9d77c6501..27f57a419d 100644
--- a/docs/resources/faq.md
+++ b/docs/resources/faq.md
@@ -26,7 +26,7 @@ If you ever need to recover or restore a backup feel free to submit a ticket or
## How can I download a database dump?
-{% embed url="https://www.youtube.com/watch?v=amkyV2skidc" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=bluTyxKqLbw&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=3" %}
## I'm getting an invalid SSL certificate error
@@ -58,7 +58,7 @@ Upgrading Drush should fix that for you. We strongly suggest that you use versio
## I'm seeing an Internal Server Error when trying to access my Kibana logs!
-{% embed url="https://www.youtube.com/watch?v=hyUMRlQTXEA" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=BuQo5J0Qc2c&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=5" %}
No need to panic! This usually happens when a tenant has not been selected. To fix this, follow these steps:
@@ -73,17 +73,31 @@ You should now be able to see your logs.
I'm unable to SSH into any environment. I'm getting the following message: `Permission denied (publickey)`.
-## How can I check the status of a build?
+## How can I check the status of a build
+
+{% embed url="https://www.youtube.com/watch?v=PyrlZqTjf68&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd" %}
+
+##
+a cron job?
+
+{% embed url="https://www.youtube.com/watch?v=Yd\_JfDyfbR0&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=2" %}
+
+## How do I add a new route?
+
+{% embed url="https://www.youtube.com/watch?v=vQxh87F3fW4&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=4" %}
+
+## How do I remove a route?
+
+You will need to contact your helpful Lagoon administrator should you need to remove a route. You can use either the private RocketChat or Slack channel that was set up for you to communicate - if not, you can always reach us at support@amazee.io.
-{% embed url="https://www.youtube.com/watch?v=tVx-IGaN0Bg" caption="How can I check the status of a build?" %}
## **When I run `pygmy status`, no keys are loaded:**
-You'll need to load your SSH key into pygmy. Here's how: [https://pygmy.readthedocs.io/en/master/troubleshooting/](https://pygmy.readthedocs.io/en/master/troubleshooting/)
+You'll need to load your SSH key into pygmy. Here's how: [https://docs.lagoon.sh/pygmy/ssh-agent](https://docs.lagoon.sh/pygmy/ssh-agent)
## **When I run `drush sa` no aliases are returned:**
-This typically indicates an issue with Pygmy. You can find our troubleshooting docs for Pygmy here: [https://pygmy.readthedocs.io/en/master/troubleshooting/](https://pygmy.readthedocs.io/en/master/troubleshooting/).
+This typically indicates an issue with Pygmy. You can find our troubleshooting docs for Pygmy here: [https://docs.lagoon.sh/pygmy/troubleshooting](https://docs.lagoon.sh/pygmy/troubleshooting)
## My deployments fail with a message saying: "drush needs a more functional environment"
@@ -91,7 +105,7 @@ This usually means that there is no database uploaded to the project. [Follow ou
## When I start pygmy I see an "address already in use" error?
-\`\`Error starting userland proxy: listen tcp 0.0.0.0:80: bind: address already in use Error: failed to start containers: amazeeio-haproxy\`\`\`
+`Error starting userland proxy: listen tcp 0.0.0.0:80: bind: address already in use Error: failed to start containers: amazeeio-haproxy`
This is a known error! Most of the time it means that there is already something running on port 80. You can find the culprit by running the following query:
@@ -105,6 +119,10 @@ That should list everything running on port 80. Kill the process running on port
You can make that change using the Lagoon API! You can find the documentation for this change [in our GraphQL documentation](../administering-lagoon/graphql-queries.md#updating-objects).
+## How do I add a redirect?
+
+{% embed url="https://www.youtube.com/watch?v=rWb-PkRDhY4&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=6" %}
+
## How can I add new users \(and SSH keys\) to my project/group?
This can be done via the Lagoon API. You can find the steps documentation for this change [in our GraphQL documentation](../administering-lagoon/graphql-queries.md#allowing-access-to-the-project).
@@ -123,7 +141,7 @@ Once you've added a runtime environment variable to your production environment
For cloud hosting customers, you can SFTP to your Lagoon environment by using the following information:
-* **Server Hostname**: ssh.lagoon.amazeeio.cloud
+* **Server Hostname**: `ssh.lagoon.amazeeio.cloud`
* **Port**: 32222
* **Username**: <Project-Environment-Name>
diff --git a/docs/resources/glossary.md b/docs/resources/glossary.md
index 17dfee4321..2ff3eb7d1a 100644
--- a/docs/resources/glossary.md
+++ b/docs/resources/glossary.md
@@ -19,6 +19,7 @@ description: >-
| DNS | Domain Name System |
| Docker | A container engine using Linux features and automating application deployment. |
| Drupal | Open-source Content Management System |
+| Drush | |
| EC2 | Amazon Elastic Compute Cloud |
| Elasticsearch | An open-source search engine. It provides a distributed, multi-tenant-capable full-text search engine with a web interface and schema-free JSON documents. |
| Galera | A generic synchronous multi-master replication library for transactional databases. |
@@ -50,6 +51,7 @@ description: >-
| Node.js | An open-source, cross-platform, JavaScript runtime environment that executes JavaScript code outside of a browser. |
| OpenShift | Container application platform that brings Docker and Kubernetes to the enterprise. |
| PHP | PHP \(Personal Home Page\) is a general-purpose programming language originally designed for web development. |
+| PHPStorm | |
| Pod | A group of containers that are deployed together on the same host. The basic unit that Kubernetes works with. |
| PostgreSQL | A free and open-source relational database management system emphasizing extensibility and technical standards compliance. |
| Public/Private Key | Public-key encryption is a cryptographic system that uses two keys -- a public key known to everyone and a private or secret key known only to the recipient of the message. |
@@ -61,10 +63,10 @@ description: >-
| Solr | An open-source enterprise-search platform, written in Java. |
| SSH | Secure Socket Shell, a network protocol that provides administrators with a secure way to access a remote computer. |
| SSL | Secure Socket Layer |
-| Symfony | |
+| Symfony | Symfony is a PHP web application framework and a set of reusable PHP components/libraries, Drupal 8 and up are based on Symfony. |
| TCP | Transmission Control Protocol, a standard that defines how to establish and maintain a network conversation through which application programs can exchange data. |
| TLS | Transport Layer Security |
-| Trivy | |
+| Trivy | A simple and comprehensive vulnerability scanner for containers, suitable for CI. |
| TTL | Time to live or hop limit is a mechanism that limits the lifespan or lifetime of data in a computer or network. |
| Varnish | A powerful, open-source HTTP engine/reverse HTTP proxy that can speed up a website by caching \(or storing\) a copy of a webpage the first time a user visits. |
| VM | Virtual Machine |
diff --git a/docs/resources/tutorials-and-webinars.md b/docs/resources/tutorials-and-webinars.md
index 2011676525..c34c406367 100644
--- a/docs/resources/tutorials-and-webinars.md
+++ b/docs/resources/tutorials-and-webinars.md
@@ -38,25 +38,37 @@
## How do I fix an internal server error in Kibana?
-{% embed url="https://www.youtube.com/watch?v=is-mt6oBQs8" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=BuQo5J0Qc2c&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=5" %}
+
+
## How do I add a new route?
-{% embed url="https://www.youtube.com/watch?v=0D8vp55z1qc" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=vQxh87F3fW4&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=4" %}
+
+
## How do I check the status of a build?
-{% embed url="https://www.youtube.com/watch?v=UM6lM12ACK4" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=PyrlZqTjf68&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=1" %}
+
+
## How do I add a redirect in Lagoon?
-{% embed url="https://www.youtube.com/watch?v=zmlfJT1FlL0" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=rWb-PkRDhY4&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=6" %}
+
+
## How do I download a database dump?
-{% embed url="https://www.youtube.com/watch?v=vnr3EITlQys" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=bluTyxKqLbw&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=3" %}
+
+
## How do I add a cron job?
-{% embed url="https://youtube.com/watch?v=7mtw8wM\_Ntg" caption="" %}
+{% embed url="https://www.youtube.com/watch?v=Yd\_JfDyfbR0&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=2" %}
+
+
diff --git a/docs/using-lagoon-advanced/active_standby.md b/docs/using-lagoon-advanced/active_standby.md
index 5a254eb7c2..b6b1a63bbc 100644
--- a/docs/using-lagoon-advanced/active_standby.md
+++ b/docs/using-lagoon-advanced/active_standby.md
@@ -141,7 +141,7 @@ mutation updateProject {
## Notes
-When the active/standby trigger has been executed, the `productionEnvironment` and `standbyProductionEnvironments` will switch within the Lagoon API. Both environments are still classed as `production` environment types. We use the `productionEnvironment` to determine which one is labelled as `active`. For more information on the differences between environment types, read the [documentation for `environment types`](https://github.com/amazeeio/lagoon/tree/5cd57792f638b841dca84d99b7fcbf06af793817/docs/using-lagoon-advanced/environment_types.md#environment-types)
+When the active/standby trigger has been executed, the `productionEnvironment` and `standbyProductionEnvironments` will switch within the Lagoon API. Both environments are still classed as `production` environment types. We use the `productionEnvironment` to determine which one is labelled as `active`. For more information on the differences between environment types, read the [documentation for `environment types`](environment-types.md)
```graphql
query projectByName {
diff --git a/docs/using-lagoon-advanced/backups.md b/docs/using-lagoon-advanced/backups.md
index f726e91ab0..5c88136fca 100644
--- a/docs/using-lagoon-advanced/backups.md
+++ b/docs/using-lagoon-advanced/backups.md
@@ -8,9 +8,9 @@ description: >-
## Short-Term Backups
-These backups are provided by Lagoon itself, and are implemented for databases **only**. Lagoon will automatically instruct the `MariaDB` and `Postgres` [services types](service-types.md) to set up a cron job which creates backups once a day \(see example [backup script](https://github.com/amazeeio/lagoon/blob/docs/images/mariadb/mysql-backup.sh) for MariaDB\). These backups are kept for four days and automatically cleaned up after that.
+These backups are provided by Lagoon itself, and are implemented for databases **only**. Lagoon will automatically instruct the `MariaDB` and `Postgres` [services types](service-types.md) to set up a cron job which creates backups once a day \(see example [backup script](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb/mysql-backup.sh) for MariaDB\). These backups are kept for four days and automatically cleaned up after that.
-These backups are accessible for developers directly by connecting via the [remote shell](remote-shell.md) to the corresponding container \(like `mariadb`\) and checking the [folder](https://github.com/amazeeio/lagoon/blob/docs/images/mariadb/mysql-backup.sh#L24) where the backups are stored\). They can then be downloaded, extracted, or used in any other way.
+These backups are accessible for developers directly by connecting via the [remote shell](remote-shell.md) to the corresponding container \(like `mariadb`\) and checking the [folder](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb/mysql-backup.sh#L24) where the backups are stored\). They can then be downloaded, extracted, or used in any other way.
## Mid-Term Backups
diff --git a/docs/using-lagoon-advanced/environment-idling.md b/docs/using-lagoon-advanced/environment-idling.md
new file mode 100644
index 0000000000..a8dbd2804c
--- /dev/null
+++ b/docs/using-lagoon-advanced/environment-idling.md
@@ -0,0 +1,31 @@
+# Environment Idling
+
+### What is the Environment Idler?
+
+Lagoon automatically idles environments if they have been unused for a couple of hours. This is done in order to reduce the load on the Kubernetes clusters and improve the overall performance of production environments and development environments that are actually in use.
+
+### How does an environment get idled?
+
+The Environment Idler has many different configuration capabilities, here are the defaults of a standard Lagoon installation \(these could be quite different in your Lagoon, check with your Lagoon administrator!\)
+
+* Idling is tried every 4 hours.
+* Production environments are never idled.
+* CLI pods are idled if they don't include a cronjob and if there is no remote shell connection active.
+* All other services and pods are idled if there was no traffic on the environment in the last 4 hours.
+* If there is an active build happening, there will be no idling.
+
+### How does an environment get un-idled?
+
+The Lagoon Idler will automatically un-idle an environment as soon as it is visited, therefore just visiting any URL of the environment will start the environment.
+
+The un-idling will take a couple of seconds, as the Kubernetes cluster needs to start all containers again. During this time there will be waiting screen shown to the visitor that their environment is currently started.
+
+### **Can I disable / prevent the Idler from idling my environment?**
+
+Yes, there is a field `autoIdle` on the project \(impacts all environments\) and environment \(if you need to target just 1 environment\), as to whether idling is allowed to take place. A value of `1` indicates the project/environment is eligible for idling. If the project is set to `0` the environments will never be idled, even if the environment is set to `0`
+The default is always `1`\(idling is enabled\).
+
+Talk to your Lagoon administrator if you are unsure how to set these project/environment fields.
+
+
+
diff --git a/docs/using-lagoon-advanced/graphql.md b/docs/using-lagoon-advanced/graphql.md
index e58709d747..636d18e92f 100644
--- a/docs/using-lagoon-advanced/graphql.md
+++ b/docs/using-lagoon-advanced/graphql.md
@@ -52,7 +52,7 @@ query whatIsThere {
}
```
-And press the ▶️button \(or press CTRL+ENTER\).
+And press the ▶️ button \(or press CTRL+ENTER\).
![Entering a query in the GraphiQL UI.](../.gitbook/assets/graphiql-2020-01-29-18-07-28.png)
diff --git a/docs/using-lagoon-advanced/project-default-users-keys.md b/docs/using-lagoon-advanced/project-default-users-keys.md
new file mode 100644
index 0000000000..9d9e0e578e
--- /dev/null
+++ b/docs/using-lagoon-advanced/project-default-users-keys.md
@@ -0,0 +1,11 @@
+# Project default users and SSH keys
+
+When a Lagoon project is created, by default an associated SSH "project key" is generated and the private key made available inside the CLI pods of the project. A service account `default-user@project` is also created and given `MAINTAINER` access to the project. The SSH "project key" is attached to that `default-user@project`.
+
+The result of this is that from inside the CLI pod of any environment it is possible to SSH to any other environment within the same project. This access is used for running tasks from the command line such as synchronising databases between environments (e.g. drush `sql-sync`).
+
+There is more information on the `MAINTAINER` role available in the [RBAC](https://docs.lagoon.sh/lagoon/administering-lagoon/rbac) documentation.
+
+## Specifying the project key
+
+It is possible to specify an SSH private key when creating a project, but this is not recommended as it has security implications.
diff --git a/docs/using-lagoon-advanced/remote-shell.md b/docs/using-lagoon-advanced/remote-shell.md
index 8b6018bd23..8d2020421d 100644
--- a/docs/using-lagoon-advanced/remote-shell.md
+++ b/docs/using-lagoon-advanced/remote-shell.md
@@ -64,3 +64,16 @@ ssh -p 32222 -t drupal-example-master@ssh.lagoon.amazeeio.cloud service=nginx co
This will execute `whoami` within the `cli` container.
+## Multiple SSH Keys
+
+If you have multiple SSH keys, you can specify which key to use for a given domain by setting this in your `~/.ssh/config` file.
+
+~/.ssh/.config
+```
+Host *
+ AddKeysToAgent yes
+ UseKeychain yes
+ IdentityFile ~/.ssh/[YOUR-DEFAULT-PRIVATE-KEY]
+Host ssh.lagoon.amazeeio.cloud
+ IdentityFile ~/.ssh/[YOUR-PRIVATE-KEY-FOR-USE-ON-LAGOON]
+```
diff --git a/docs/using-lagoon-advanced/setting-up-xdebug-with-lagoon.md b/docs/using-lagoon-advanced/setting-up-xdebug-with-lagoon.md
new file mode 100644
index 0000000000..70c61fc84d
--- /dev/null
+++ b/docs/using-lagoon-advanced/setting-up-xdebug-with-lagoon.md
@@ -0,0 +1,88 @@
+# Setting up Xdebug with Lagoon
+
+{% hint style="info" %}
+Note: amazee.io's base images are currently using Xdebug v2, and will be upgraded soon. You can [keep track on this issue](https://github.com/uselagoon/lagoon-images/issues/40).
+{% endhint %}
+
+## Enable Xdebug Extension
+
+Base images provided by amazee.io are pre-configured to include `Xdebug`. For performance reasons, the extension is not installed by default. To install Xdebug, set the environment variable `XDEBUG_ENABLE` to any string.
+
+1. **Locally** \(pygmy and Lando\)
+ 1. If you’re using the amazee.io example `docker-compose.yml` file, this setting already exists. Uncomment these lines: [https://github.com/amazeeio/drupal-example-simple/blob/c352e7dc9b2d452bc8a3aecbdd38c8b46981600e/docker-compose.yml\#L16-L17](https://github.com/amazeeio/drupal-example-simple/blob/c352e7dc9b2d452bc8a3aecbdd38c8b46981600e/docker-compose.yml#L16-L17).
+ 2. Make sure to rebuild and restart the container after changing this setting.
+2. **Remotely** \(dev/prod\)
+ 1. You can [use the Lagoon API to add the environment variable to a running environment](environment-variables.md#runtime-environment-variables-lagoon-api). Make sure to redeploy the environment after changing this setting.
+
+## Activate xDebug Extension
+
+Even if Xdebug is installed, it may not be enabled. Base images provided by amazee.io are pre-configured to require an activation trigger for the Xdebug extension to start a session. You can [view the complete documentation](https://2.xdebug.org/docs/remote#starting) for starting the debugger but the most straightforward instructions are below.
+
+### CLI
+
+The `php-cli` image is configured to _always_ activate Xdebug when it’s installed, so there is nothing else that needs to be done. Running any PHP script will start a debugging session.
+
+### Web
+
+[Install a browser extension](https://2.xdebug.org/docs/remote#browser-extensions) to set/unset an activation cookie.
+
+Make sure the activation cookie is set for the website you want to start debugging.
+
+## Configure PHPStorm
+
+1. PHPStorm is configured correctly out of the box.
+2. Click the “**Start Listening for PHP Debug Connections**” icon in the toolbar.
+3. Load a webpage or run a Drush command.
+4. On first run, PHPStorm should pop up a window asking you to:
+ 1. Confirm path mappings.
+ 2. Select the correct file locally that was triggered on the server.
+
+![](https://lh5.googleusercontent.com/V9lugsEA2VWQSe88tUFQ73ihGoOZ24YIigAfvh2PRl-ACz7jbvk1qKniLvEdhvBPKI5XiVnegd2gC48ICphTSJqZsgCSfrJaIhfgFb5Xp8Jt7gFoyCqn1AjYeRAd0KqJ7w6WUNg9)
+
+## Configure Visual Studio Code
+
+1. [Install the PHP Debug extension](https://marketplace.visualstudio.com/items?itemName=felixfbecker.php-debug) by Felix Becker.
+2. [Follow the instructions](https://marketplace.visualstudio.com/items?itemName=felixfbecker.php-debug#vs-code-configuration) to create a basic `launch.json` for PHP.
+3. Add correct path mappings. For a typical Drupal site, an example would be:
+
+ ```text
+ "pathMappings": {
+ "/app": "${workspaceFolder}",
+ },
+ ```
+
+4. In the **Run** tab of Visual Studio Code, click the green arrow next to “**Listen for Xdebug**”
+5. Load a webpage or run a Drush command.
+
+## Troubleshooting
+
+* Verify that Xdebug extension is installed. The best way to do this on a Drupal site is to check the PHP status page. You should find a section about Xdebug and all its settings.
+
+![phpinfo results](https://lh4.googleusercontent.com/Vj4VmT8NTQe-losaowuw5ni3px2oFaGpcANEcp6Tqun3TUyI0A4pPw6PU1n57viw4xcZep0tubUthjffasX17YuhX4TbmOnqUCbo683mubW6vGjCgEvVA4dcIFmkxRci_pCacYnI)
+
+* Verify the following settings:
+
+| | |
+| :--- | :--- |
+| `xdebug.remote_enabled` | On |
+| `xdebug.remote_host` | `host.docker.internal` or your IP address |
+| `xdebug.remote_port` | 9000 |
+
+* Verify you have the activation cookie set. You can use the browser tools in Chrome or Firefox to check that a `XDEBUG_SESSION` cookie is set.
+* Verify that Xdebug is activated and attempting to start a debug session with your computer. You can use the `nc -l 9000` command line tool to open the Xdebug port. If everything is configured in PHP correctly, you should get a Xdebug init response when you load a webpage or run a Drush command:
+
+![Xdebug init response](https://lh5.googleusercontent.com/jCK7APIdUwp7XXOZaADYTArPorzqT-v-zhT-6w_A4BTJqpn62RQdrG17NC4jpjZzXW83B0nL_BcKt5hA5SBZ8jP2SFKeJpXpRYsinM2h9Yk6JAL3sMOtEgVCCQjSzFmhXMLnOUdP)
+
+* Verify that the `xdebug.remote_host` has been set correctly. For local debugging with docker for mac, this value should be `host.docker.internal`. For remote debugging this value should be your IP address. If this value was not correctly determined, you can override it by setting the `DOCKERHOST` environment variable.
+* Verify that Docker for Mac networking is not broken. On your host machine, run `nc -l 9000`, then in a new terminal window, run:
+
+ ```text
+ docker-compose run cli nc -zv host.docker.internal 9000
+ ```
+
+* You should see a message like: `host.docker.internal (192.168.65.2:9000) open`.
+* When using Lando locally, in order to debug scripts run from the CLI you must first SSH into the cli container via `lando ssh`. You won’t be able to debug things by running `lando drush` or `lando php`.
+* You can enable the Xdebug log by setting the `XDEBUG_LOG` environment variable. Logs will be saved to `/tmp/xdebug.log`.
+
+
+
diff --git a/docs/using-lagoon-the-basics/build-and-deploy-process.md b/docs/using-lagoon-the-basics/build-and-deploy-process.md
index 05b42e921d..fa2c9b019f 100644
--- a/docs/using-lagoon-the-basics/build-and-deploy-process.md
+++ b/docs/using-lagoon-the-basics/build-and-deploy-process.md
@@ -6,25 +6,25 @@ Watch the video below for a walk-through of the deployment process.
{% embed url="https://www.youtube.com/watch?v=XiaH7gqUXWc" caption="Lagoon Deployment Demo Video" %}
-## 1. Set up OpenShift Project/Kubernetes Namespace for Environment
+### 1. Set up OpenShift Project/Kubernetes Namespace for Environment
First, Lagoon checks if the OpenShift project/Kubernetes namespace for the given environment exists and is correctly set up. It will make sure that we have the needed service accounts, create secrets, and will configure environment variables into a ConfigMap `lagoon-env` which is filled with information like the environment type and name, the Lagoon project name, and so on.
-## 2. Git Checkout & Merge
+### 2. Git Checkout & Merge
Next, Lagoon will check out your code from Git. It needs that to be able to read the `.lagoon.yml` ,`docker-compose.yml` and any `.env` files, but also to build the Docker images.
Note that Lagoon will only process these actions if the branch/PR matches the branch regex set in Lagoon. Based on how the deployment has been triggered, different things will happen:
-### **Branch Webhook Push**
+#### **Branch Webhook Push**
If the deployment is triggered automatically via a Git webhook and is for a single branch, Lagoon will check out the Git SHA which is included in the webhook payload. This will trigger a deployment for every Git SHA pushed.
-### **Branch REST trigger**
+#### **Branch REST trigger**
If you trigger a branch deployment manually via the REST API \(via the UI, or GraphQL\) and do NOT define a `SHA` in the POST payload, Lagoon will just check out the latest commit in that branch and deploy it.
-### **Pull Requests**
+#### **Pull Requests**
If the deployment is a pull request \(PR\) deployment, Lagoon will load the base and the HEAD branch and SHAs for the pull request and will:
@@ -35,7 +35,7 @@ If the deployment is a pull request \(PR\) deployment, Lagoon will load the base
If the merge fails, Lagoon will also stop and inform you about this.
-## 3. Build Image
+### 3. Build Image
For each service defined in the `docker-compose.yml` Lagoon will check if images need to be built or not. If they need to be built, this will happen now. The order of building is based on the order they are configured in `docker-compose.yml` , and some build arguments are injected:
@@ -56,59 +56,63 @@ Also, if this is a pull request build:
Additionally, for each already built image, its name is also injected. If your `docker-compose.yml` is configured to first build the `cli` image and then the `nginx` image, the name of the `nginx` image is injected as `NGINX_IMAGE`.
-## 4. Configure OpenShift/Kubernetes Services and Routes
+### 4. Configure OpenShift/Kubernetes Services and Routes
Next, Lagoon will configure OpenShift/Kubernetes with all services and routes that are defined from the service types, plus possible additional custom routes that you have defined in `.lagoon.yml`.
In this step it will expose all defined routes in the `LAGOON_ROUTES` as comma separated URLs. It will also define one route as the "main" route, in this order:
1. If custom routes defined: the first defined custom route in `.lagoon.yml`.
-2. The first auto generated one from a service defined in `docker-compose.yml`.
+2. The first auto-generated route from a service defined in `docker-compose.yml`.
3. None.
The "main" route is injected via the `LAGOON_ROUTE` environment variable.
-## 5. Push and Tag Images
+### 5. Push and Tag Images
Now it is time to push the previously built Docker images into the internal Docker image registry.
For services that didn't specify a Dockerfile to be built in `docker-compose.yml` and only gave an image, they are also tagged and will cause the internal Docker image registry to know about the images, so that they can be used in containers.
-## 6. Persistent Storage
+### 6. Persistent Storage
Lagoon will now create persistent storage \(PVC\) for each service that needs and requested persistent storage.
-## 7. Cron jobs
+### 7. Cron jobs
For each service that requests a cron job \(like MariaDB\), plus for each custom cron job defined in `.lagoon.yml,` Lagoon will now generate the cron job environment variables which are later injected into the [Deployment](https://docs.openshift.com/container-platform/4.4/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are).
-## 8. Run defined pre-rollout tasks
+### 8. Run defined pre-rollout tasks
-Now Lagoon will check the `.lagoon.yml` file for defined tasks in `pre-rollout` and will run them one by one in the defined services. Note that these tasks are executed on the pods currently running \(so cannot utilise features or scripts that only exist in the latest commit\) and therefore they are also not run on first deployments.
+Now Lagoon will check the `.lagoon.yml` file for defined tasks in `pre-rollout` and will run them one by one in the defined services. Note that these tasks are executed on the pods currently running \(so cannot utilize features or scripts that only exist in the latest commit\) and therefore they are also not run on first deployments.
If any of them fail, Lagoon will immediately stop and notify you, and the rollout will not proceed.
-## 9. DeploymentConfigs, Statefulsets, Daemonsets
+### 9. DeploymentConfigs, Statefulsets, Daemonsets
-This is probably the most important step. Based on the defined service type, Lagoon will create the [Deployment](https://docs.openshift.com/container-platform/4.4/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are), [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) or [Daemonsets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) for the service. \(Note that Deployments are analogous DeploymentConfigs in Openshift\)
+This is probably the most important step. Based on the defined service type, Lagoon will create the [Deployment](https://docs.openshift.com/container-platform/4.4/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are), [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) or [Daemonsets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) for the service. \(Note that Deployments are analogous DeploymentConfigs in OpenShift\)
It will include all previously gathered information like the cron jobs, the location of persistent storage, the pushed images and so on.
Creation of these objects will also automatically cause OpenShift/Kubernetes to trigger new deployments of the pods if necessary, like when an environment variable has changed or an image has changed. But if there is no change, there will be no deployment! This means if you only update the PHP code in your application, the Varnish, Solr, MariaDB, Redis and any other service that is defined but does not include your code will not be deployed. This makes everything much much faster.
-## 10. Wait for all rollouts to be done
+### 10. Wait for all rollouts to be done
Now Lagoon waits! It waits for all of the just-triggered deployments of the new pods to be finished, as well as for their health checks to be successful.
If any of the deployments or health checks fail, the deployment will be stopped here, and you will be informed via the defined notification systems \(like Slack\) that the deployment has failed.
-## 11. Run defined post-rollout tasks
+### 11. Run defined post-rollout tasks
Now Lagoon will check the `.lagoon.yml` file for defined tasks in `post-rollout` and will run them one by one in the defined services.
If any of them fail, Lagoon will immediately stop and notify you.
-## 12. Success
+### 12. Success
If all went well and nothing threw any errors, Lagoon will mark this build as successful and inform you via defined notifications. ✅
+### Push without deploying
+
+There may be a case where you want to push without a deployment. Make sure your commit message contains "`[skip deploy]`" or "`[deploy skip]`" and Lagoon will not trigger a deployment from that commit.
+
diff --git a/docs/using-lagoon-the-basics/configure-webhooks.md b/docs/using-lagoon-the-basics/configure-webhooks.md
index 5df3f5276e..aa53fdeb55 100644
--- a/docs/using-lagoon-the-basics/configure-webhooks.md
+++ b/docs/using-lagoon-the-basics/configure-webhooks.md
@@ -33,7 +33,7 @@ Managing the following settings will require you to have a high level of access
* Navigate to Settings -> Integrations in your GitLab repository.
-![Go to Settings > Integrations in your GitLab repository.](../.gitbook/assets/screen-shot-2020-01-23-at-1.04.06-pm.png)
+![Go to Settings > Integrations in your GitLab repository.](../.gitbook/assets/gitlab-settings.png)
* The `URL` is the route to the `webhook-handler` of your Lagoon instance, provided by your Lagoon administrator.
* Select the `Trigger` events which will send a notification to Lagoon. We suggest that you send `Push events` and `Merge request events`, and then filter further in the Lagoon configuration of your project.
diff --git a/docs/using-lagoon-the-basics/docker-compose-yml.md b/docs/using-lagoon-the-basics/docker-compose-yml.md
index cde148d39f..6a6356a011 100644
--- a/docs/using-lagoon-the-basics/docker-compose-yml.md
+++ b/docs/using-lagoon-the-basics/docker-compose-yml.md
@@ -218,7 +218,7 @@ You can also overwrite the rollout for just one specific environment. This is do
Feeling adventurous and want to do something completely customized? Welcome to the Danger Zone!
-![Welcome to the Danger Zone](../.gitbook/assets/topgun%20%281%29.gif)
+![Welcome to the Danger Zone](../.gitbook/assets/topgun.gif)
When defining a service as `lagoon.type: custom`, you can tell Lagoon to not use any pre-defined service type templates and pass your full own custom YAML file.
diff --git a/docs/using-lagoon-the-basics/index.md b/docs/using-lagoon-the-basics/index.md
index fb63c636e0..e5dd076b5b 100644
--- a/docs/using-lagoon-the-basics/index.md
+++ b/docs/using-lagoon-the-basics/index.md
@@ -15,7 +15,7 @@ gem install pygmy
pygmy up
```
-[Pygmy](https://pygmy.readthedocs.io/en/master/) is an amazee.io flavored local development system.
+[Pygmy](https://docs.lagoon.sh/pygmy/) is an amazee.io flavored local development system.
Learn more about Lagoon, pygmy, and [Local Development Environments](local-development-environments.md)
@@ -31,11 +31,11 @@ Learn more about Lagoon, pygmy, and [Local Development Environments](local-devel
### `.lagoon.yml`
-This is the main file that will be used by Lagoon to understand what should be deployed, as well as many other things. See [Documentation for `.lagoon.yml`](lagoon-yml.md).
+This is the main file that will be used by Lagoon to understand what should be deployed, as well as many other things. See [documentation for `.lagoon.yml`](lagoon-yml.md).
### `docker-compose.yml`
-This file is used by `Docker Compose` to start your local development environment. Lagoon also uses it to understand which of the services should be deployed, which type, and how to build them. This happens via `labels`. See [Documentation for `docker-compose.yml`](docker-compose-yml.md).
+This file is used by `Docker Compose` to start your local development environment. Lagoon also uses it to understand which of the services should be deployed, which type, and how to build them. This happens via `labels`. See [documentation for `docker-compose.yml`](docker-compose-yml.md).
### Dockerfiles
@@ -48,26 +48,29 @@ Some Docker images and containers need additional customizations from the provid
| Type | Versions | Dockerfile |
| :--- | :--- | :--- |
-| [Elasticsearch](../docker-images/elasticsearch.md) | 6.8, 7.6 | [elasticsearch/Dockerfiles](https://github.com/amazeeio/lagoon/tree/master/images/elasticsearch) |
-| [MariaDB-Drupal](../docker-images/mariadb/mariadb-drupal.md) | | [mariadb-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mariadb-drupal/Dockerfile) |
-| [MariaDB](../docker-images/mariadb/) | 10.4 | [mariadb/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mariadb/Dockerfile) |
-| [MongoDB](../docker-images/mongodb.md) | 3.6 | [mongo/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mongo/Dockerfile) |
-| [NGINX](../docker-images/nginx/) | openresty/1.15.8.2 | [nginx/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx/Dockerfile) |
-| [NGINX-Drupal](../docker-images/nginx/nginx-drupal.md) | | [nginx-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx-drupal/Dockerfile) |
-| [PHP-fpm-Drupal](../docker-images/php-cli/php-cli-drupal.md) | 7.2, 7.3, 7.4 | [php/cli-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli-drupal/Dockerfile) |
-| [PHP-cli](../docker-images/php-cli/) | 7.2, 7.3, 7.4 | [php/cli/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli/Dockerfile) |
-| [PHP-fpm](../docker-images/php-fpm.md) | 7.2, 7.3, 7.4 | [php/fpm/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/Dockerfile) |
-| [PostgreSQL](../docker-images/postgres.md) | 11.6 | [postgres/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/postgres/Dockerfile) |
-| Python | 2.7, 3.7 | [python/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/python/Dockerfile) |
-| [RabbitMQ](../docker-images/rabbitmq.md) | 3.8 | [rabbitmq/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/rabbitmq/Dockerfile) |
-| [Redis-persistent](../docker-images/redis/redis-persistent.md) | 5.0 | [redis-persistent/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/redis-persistent/Dockerfile) |
-| [Redis](https://github.com/AlannaBurke/lagoon/tree/6c60efce4fc48ebd7d5858cedaafb6ed86b704ee/docs/docker_images/redis.md) | 5.0 | [redis/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/redis/Dockerfile) |
-| [Solr-Drupal](../docker-images/solr/solr-drupal.md) | 5.5, 6.6, 7.7 | [solr-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile) |
-| [Solr](../docker-images/solr/) | 5.5, 6.6, 7.7 | [solr/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/solr/Dockerfile) |
-| [Varnish-Drupal](../docker-images/varnish/varnish-drupal.md) | 5.2 | [varnish-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/varnish-drupal/Dockerfile) |
-| [Varnish](../docker-images/varnish/) | 5.2 | [varnish/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/varnish/Dockerfile) |
-
-All images are pushed to [https://hub.docker.com/u/amazeeio](https://hub.docker.com/u/amazeeio). We suggest always using the latest tag \(like `amazeeio/nginx:latest`\) or unsuffixed images \(like `amazeeio/node:14`\), as they are kept up to date in terms of features and security.
-
-If you choose to use a specific Lagoon version of an image like `amazeeio/nginx:v0.21.0` or `amazeeio/node:10-v0.21.0` it is your own responsibility to upgrade the version of the images as soon as a new Lagoon version is released!
+| [MariaDB](../docker-images/mariadb/) | 10.4 | [mariadb/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb) |
+| [MariaDB \(Drupal\)](../docker-images/mariadb/mariadb-drupal.md) | | [mariadb-drupal/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/mariadb-drupal) |
+| [PostgreSQL](../docker-images/postgres.md) | 11, 12 | [postgres/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/postgres) |
+| [MongoDB](../docker-images/mongodb.md) | 3.6 | [mongo/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/mongo) |
+| [NGINX](../docker-images/nginx/) | openresty/1.19 | [nginx/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/nginx) |
+| [NGINX \(Drupal\)](../docker-images/nginx/nginx-drupal.md) | openresty/1.19 | [nginx-drupal/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/nginx-drupal) |
+| Node.js | 10, 12, 14 | [node/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/node) |
+| [PHP FPM](../docker-images/php-fpm.md) | 7.2, 7.3, 7.4 | [php/fpm/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/php-fpm) |
+| [PHP CLI](../docker-images/php-cli/) | 7.2, 7.3, 7.4 | [php/cli/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli) |
+| [PHP CLI \(Drupal\)](../docker-images/php-cli/php-cli-drupal.md) | 7.3, 7.4, 8.0 | [php/cli-drupal/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/php-cli-drupal) |
+| Python | 2.7, 3.7, 3.8 | [python/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/python) |
+| [Redis](https://github.com/amazeeio/lagoon/tree/0ea0472705f825feb5fe0b946a8f4c9fc0bba707/docs/docker-images/redis.md) | 5, 6 | [redis/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/redis) |
+| [Redis-persistent](../docker-images/redis/redis-persistent.md) | 5, 6 | [redis-persistent/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/redis-persistent) |
+| [Solr](../docker-images/solr/) | 5.5, 6.6, 7.7 | [solr/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/solr) |
+| [Solr \(Drupal\)](../docker-images/solr/solr-drupal.md) | 5.5, 6.6, 7.7 | [solr-drupal/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/solr-drupal) |
+| [Varnish](../docker-images/varnish/) | 5 | [varnish/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish) |
+| [Varnish \(Drupal\)](../docker-images/varnish/varnish-drupal.md) | 5 | [varnish-drupal/Dockerfile](https://github.com/uselagoon/lagoon-images/blob/main/images/varnish-drupal) |
+| [Elasticsearch](../docker-images/elasticsearch.md) | 6, 7 | [elasticsearch/Dockerfiles](https://github.com/uselagoon/lagoon-images/blob/main/images/elasticsearch) |
+| [Logstash](../docker-images/elasticsearch.md) | 6, 7 | [logstash/Dockerfiles](https://github.com/uselagoon/lagoon-images/blob/main/images/logstash) |
+| [Kibana](../docker-images/elasticsearch.md) | 6, 7 | [kibana/Dockerfiles](https://github.com/uselagoon/lagoon-images/blob/main/images/kibana) |
+| [RabbitMQ](../docker-images/rabbitmq.md) | 3.8 | [rabbitmq/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/rabbitmq) |
+
+All images are pushed to [https://hub.docker.com/u/uselagoon](https://hub.docker.com/u/uselagoon). We suggest always using the latest tag \(like `uselagoon/nginx:latest`\) or unsuffixed images \(like `amazeeio/node:14`\), as they are kept up to date in terms of features and security.
+
+If you choose to use a specific Lagoon version of an image like `uselagoon/nginx:20.10.0` or `uselagoon/node-10:20.10.0` it is your own responsibility to upgrade the version of the images as soon as a new Lagoon version is released!
diff --git a/docs/using-lagoon-the-basics/lagoon-yml.md b/docs/using-lagoon-the-basics/lagoon-yml.md
index c291398b08..90149a10f6 100644
--- a/docs/using-lagoon-the-basics/lagoon-yml.md
+++ b/docs/using-lagoon-the-basics/lagoon-yml.md
@@ -40,7 +40,8 @@ tasks:
service: cli
routes:
- insecure: Redirect
+ autogenerate:
+ insecure: Redirect
environments:
master:
@@ -124,7 +125,7 @@ Note: If you would like to temporarily disable pre/post-rollout tasks during a d
## Routes
-{% embed url="https://www.youtube.com/watch?v=un23Vivz\_-Q" caption="How do I add a new route?" %}
+{% embed url="https://www.youtube.com/watch?v=vQxh87F3fW4&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=4" caption="" %}
### `routes.autogenerate.enabled`
@@ -205,7 +206,7 @@ In the `"www.example.com"` example repeated below, we see two more options \(als
* `hsts` can be set to a value of `max-age=31536000;includeSubDomains;preload`. Ensure there are no spaces and no other parameters included. Only the `max-age` parameter is required. The required `max-age` parameter indicates the length of time, in seconds, the HSTS policy is in effect for.
{% hint style="info" %}
-If you plan to switch from a SSL certificate signed by a Certificate Authority \(CA\) to a Let's Encrypt certificate, it's best get in touch with your Lagoon administrator to oversee the transition. There are [known issues](https://github.com/tnozicka/openshift-acme/issues/68) during the transition. The workaround would be manually removing the CA certificate and then triggering the Let's Encrypt process.
+If you plan to switch from a SSL certificate signed by a Certificate Authority \(CA\) to a Let's Encrypt certificate, it's best to get in touch with your Lagoon administrator to oversee the transition. There are [known issues](https://github.com/tnozicka/openshift-acme/issues/68) during the transition. The workaround would be manually removing the CA certificate and then triggering the Let's Encrypt process.
{% endhint %}
{% tabs %}
@@ -221,7 +222,7 @@ If you plan to switch from a SSL certificate signed by a Certificate Authority \
### **Monitoring a specific path**
-When [UptimeRobot](https://uptimerobot.com/) is configured for your cluster \(OpenShift or Kubernetes\), Lagoon will inject annotations to each route/ingress for use by the `stakater/IngressControllerMonitor`. The default action is to monitor the homepage of the route. If you have a specific route to be monitored, this can be overriden by adding a `monitoring-path` to your route specification. A common use is to set up a path for monitoring which bypasses caching to give a more real-time monitoring of your site.
+When [UptimeRobot](https://uptimerobot.com/) is configured for your cluster \(OpenShift or Kubernetes\), Lagoon will inject annotations to each route/ingress for use by the `stakater/IngressControllerMonitor`. The default action is to monitor the homepage of the route. If you have a specific route to be monitored, this can be overridden by adding a `monitoring-path` to your route specification. A common use is to set up a path for monitoring which bypasses caching to give a more real-time monitoring of your site.
{% tabs %}
{% tab title=".lagoon.yml" %}
@@ -232,13 +233,15 @@ When [UptimeRobot](https://uptimerobot.com/) is configured for your cluster \(Op
{% endtab %}
{% endtabs %}
-### **Ingress annotations \(Redirects\)**
+### **Ingress annotations**
{% hint style="info" %}
Route/Ingress annotations are only supported by projects that deploy into clusters that run nginx-ingress controllers! Check with your Lagoon administrator if this is supported.
{% endhint %}
-* `annotations` can be a yaml map of [annotations supported by the nginx-ingress controller](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/), this is specifically useful for easy redirects:
+* `annotations` can be a yaml map of [annotations supported by the nginx-ingress controller](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/), this is specifically useful for easy redirects and other configurations
+
+#### **Ingress annotations redirects**
In this example any requests to `example.ch` will be redirected to `https://www.example.ch` with keeping folders or query parameters intact \(`example.com/folder?query` -> `https://www.example.ch/folder?query`\)
@@ -265,6 +268,25 @@ You can of course also redirect to any other URL not hosted on Lagoon, this will
{% endtab %}
{% endtabs %}
+#### Trusted Reverse Proxies
+
+Some configurations involve a reverse proxy \(like a CDN\) in front of the Kubernetes Clusters. In these configurations the IP of the Reverse Proxy will appear as the `REMOTE_ADDR` `HTTP_X_REAL_IP` `HTTP_X_FORWARDED_FOR` headers field in your applications. While the original IP of the requester can be found in the `HTTP_X_ORIGINAL_FORWARDED_FOR` header.
+
+If you like the original IP to appear in the `REMOTE_ADDR` `HTTP_X_REAL_IP` `HTTP_X_FORWARDED_FOR` headers, you need to tell the ingress which reverse proxy IPs you want to trust:
+
+{% tabs %}
+{% tab title=".lagoon.yml" %}
+```yaml
+ - "example.ch":
+ annotations:
+ nginx.ingress.kubernetes.io/server-snippet: |
+ set_real_ip_from 1.2.3.4/32;
+```
+{% endtab %}
+{% endtabs %}
+
+This example would trust the CIDR `1.2.3.4/32` \(the IP `1.2.3.4` in this case\). Therefore if there is a request sent to the Kubernetes clustesr from the IP `1.2.3.4` the `X-Forwarded-For` Header is analyzed and it's contents injected into `REMOTE_ADDR` `HTTP_X_REAL_IP` `HTTP_X_FORWARDED_FOR` headers.
+
### `Environments.[name].types`
The Lagoon build process checks the `lagoon.type` label from the `docker-compose.yml` file in order to learn what type of service should be deployed \(read more about them in the [documentation of `docker-compose.yml`](docker-compose-yml.md#custom-templates)\).
@@ -349,17 +371,31 @@ routes:
enabled: false
environments:
develop:
- autogenerateRoutes: tru
+ autogenerateRoutes: true
```
{% endtab %}
{% endtabs %}
### `Cron jobs - environments.[name].cronjobs`
-{% embed url="https://www.youtube.com/watch?v=6qqY-XmBZ8c" caption="How do I add a cron job?" %}
+{% embed url="https://www.youtube.com/watch?v=Yd\_JfDyfbR0&list=PLOM3iGqJj\_UdTtl4eVDszI9VgGW9Dcefd&index=2" caption="" %}
As most of the time it is not desirable to run the same cron jobs across all environments, you must explicitly define which jobs you want to run for each environment.
+Example:
+
+{% tabs %}
+{% tab title=".lagoon.yml" %}
+```yaml
+ cronjobs:
+ - name: drush cron
+ schedule: "H * * * *" # This will run the cron once per hour.
+ command: drush cron
+ service: cli
+```
+{% endtab %}
+{% endtabs %}
+
* `name:`
* Just a friendly name for identifying what the cron job will do.
* `schedule:`
@@ -410,7 +446,7 @@ With the key `ssh` you can define another SSH endpoint that should be used by th
### `additional-yaml`
-The `additional-yaml` has some super powers. Basically, it allows you to define any arbitrary YAML configuration file to be inserted before the build step \(it still needs to be valid Kubernetes/OpenShift YAML , though☺\).
+The `additional-yaml` has some super powers. Basically, it allows you to define any arbitrary YAML configuration file to be inserted before the build step \(it still needs to be valid Kubernetes/OpenShift YAML, though ☺\).
Example:
diff --git a/docs/using-lagoon-the-basics/local-development-environments.md b/docs/using-lagoon-the-basics/local-development-environments.md
index 71716ad267..bfde94e684 100644
--- a/docs/using-lagoon-the-basics/local-development-environments.md
+++ b/docs/using-lagoon-the-basics/local-development-environments.md
@@ -8,16 +8,20 @@ Even though Lagoon has only a hard dependency on Docker and [Docker Compose](htt
* A system that receives and displays mail locally.
{% hint style="warning" %}
-You do not need to _install_ Lagoon locally in order to _use_ it locally! That sounds confusing, but follow the documentation. Lagoon is the system that **deploys** your local development environment to your production environment, it's **not** the environment itself.
+You do not need to _install_ Lagoon locally to _use_ it locally! That sounds confusing but follow the documentation. Lagoon is the system that **deploys** your local development environment to your production environment, it's **not** the environment itself.
{% endhint %}
-Lagoon currently works best with `pygmy` , which is the amazee.io flavored system of the above tools and works out of the box with Lagoon.
+## pygmy or Lando - the choice is yours
-`pygmy` is a [Ruby](https://www.ruby-lang.org/en/) gem, so to install it, run: `gem install pygmy`.
+Lagoon has traditionally worked best with `pygmy` , which is the amazee.io flavored system of the above tools and works out of the box with Lagoon. It lives at [https://github.com/amazeeio/pygmy](https://github.com/amazeeio/pygmy)
-For detailed usage info on `pygmy`, see the [documentation of pygmy](https://pygmy.readthedocs.io/).
+`pygmy` is a [Ruby](https://www.ruby-lang.org/en/) gem, so to install it, run: `gem install pygmy`. For detailed usage info on pygmy, see its [documentation](https://docs.lagoon.sh/pygmy/).
-[Lando integration with Lagoon is coming soon!](https://www.amazee.io/blog/post/announcing-lando-integration-for-lagoon)
+As announced in our[ blog post](https://www.amazee.io/blog/post/announcing-lando-integration-for-lagoon), Lagoon is now also compatible with Lando! For more information, please see the documentation at [https://docs.lando.dev/config/lagoon.html](https://docs.lando.dev/config/lagoon.html) to get yourself up and running.
-We are evaluating adding support for other systems like [Docksal](https://docksal.io/), [DDEV](https://www.ddev.com/ddev-local/), and [Docker4Drupal](https://wodby.com/docs/stacks/drupal/local/), and will possibly add full support for these in the future. If you do have Lagoon running with a system like these, we would love for you to submit a [PR on GitHub](https://github.com/amazeeio/pygmy)!
+Lando's workflow for Lagoon will be familiar to users of Lando, and will also be the easiest way for Lagoon newcomers to get up and running. Pygmy presents a closer integration with Docker, which will lend itself better to more complex scenarios and use cases but will also require a deeper understanding.
+
+There is also a community-built fork of Pygmy, re-written in Go, available at [https://github.com/fubarhouse/pygmy-go](https://github.com/fubarhouse/pygmy-go) that presents even more opportunity for local customisation and control.
+
+We have previously evaluated adding support for other systems like [Docksal](https://docksal.io/), [DDEV](https://www.ddev.com/ddev-local/), and [Docker4Drupal](https://wodby.com/docs/stacks/drupal/local/), and whilst we may add support for these in the future, our current focus is on supporting using Lando and pygmy. If you do have Lagoon running with one of these \(or other\) tools, we would love for you to submit a [PR on GitHub](https://github.com/amazeeio/pygmy)!
diff --git a/helpers/annotate-pvc-backup.sh b/helpers/annotate-pvc-backup.sh
deleted file mode 100644
index 1b5dd04067..0000000000
--- a/helpers/annotate-pvc-backup.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-##
-## This annotates all PVCs with the name `solr` and `nginx` with appuio.ch/backup="true" in order that the restic backup system will back them up
-##
-
-oc get pvc --all-namespaces | grep solr | sed '1d' | awk '{ print $2, "--namespace", $1 }' | while read line; do oc annotate --overwrite pvc $line appuio.ch/backup="true"; done
-oc get pvc --all-namespaces | grep nginx | sed '1d' | awk '{ print $2, "--namespace", $1 }' | while read line; do oc annotate --overwrite pvc $line appuio.ch/backup="true"; done
-
-oc get --all-namespaces pod -l 'service in (cli)' | sed '1d' | awk '{ print "--namespace", $1, "pod", $2 }' | while read line; do oc annotate $line --overwrite appuio.ch/backupcommand='/bin/sh -c "if [[ $MARIADB_HOST ]]; then dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -h $MARIADB_HOST -u $MARIADB_USERNAME -p$MARIADB_PASSWORD $MARIADB_DATABASE > $dump && cat $dump && rm $dump; fi"'; done
\ No newline at end of file
diff --git a/helpers/check_acme_routes.sh b/helpers/check_acme_routes.sh
deleted file mode 100755
index c4dc864235..0000000000
--- a/helpers/check_acme_routes.sh
+++ /dev/null
@@ -1,301 +0,0 @@
-#!/bin/bash
-
-# Description: script to check routes with exposer pods.
-# In case of no DNS record or mis-configuration, script will update the route
-# by disabling the tls-acme, removing other acme related annotations and add
-# an interal one for filtering purpose
-
-set -eu -o pipefail
-
-# Set DEBUG variable to true, to start bash in debug mode
-DEBUG="${DEBUG:-"false"}"
-if [ "$DEBUG" = "true" ]; then
- set -x
-fi
-
-# Some variables
-
-# Cluster full hostname and API hostname
-CLUSTER_HOSTNAME="${CLUSTER_HOSTNAME:-""}"
-CLUSTER_API_HOSTNAME="${CLUSTER_API_HOSTNAME:-"$CLUSTER_HOSTNAME"}"
-
-# Default command
-COMMAND=${1:-"help"}
-
-# Set DRYRUN variable to true to run in dry-run mode
-DRYRUN="${DRYRUN:-"false"}"
-
-
-# Set a REGEX variable to filter the execution of the script
-REGEX=${REGEX:-".*"}
-
-# Set NOTIFYONLY to true if you want to send customers a notification
-# explaining why Lagoon is not able to issue Let'S Encrypt certificate for
-# some routes defined in customer's .lagoon.yml file.
-# If set to true, no other action rather than notification is done (ie: no annotation or deletion)
-NOTIFYONLY=${NOTIFYONLY:-"false"}
-
-# Help function
-function usage() {
- echo -e "The available commands are:
- - help (get this help)
- - getpendingroutes (get a list of routes with acme \"orderStatus\" in Pending
- - getdisabledroutes (get a list of routes with \"administratively-disabled\" annotation
- - getbrokenroutes (get a list of all possible broken routes)
- - updateroutes (update broken routes)
-
- By default, script doesn't set any default cluster to run routes' checks. Please set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME variables.
- If you want to change the API endpoint, set CLUSTER_API_HOSTNAME variable.
- If you want to change the cluster's hostname, set CLUSTER_HOSTNAME variable.
- If you want to filter the execution of the script only for certain projects, set the REGEX variable.
- If you want to test against a specific IP, set the CLUSTER_IPS array.
-
- Examples:
- CLUSTER_HOSTNAME=\"ch.amazee.io\" CLUSTER_API_HOSTNAME=\"ch.amazeeio.cloud\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes witl TLS in Pending status for the defined cluster)
- REGEX=\"drupal-example\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes for all projects matchiing the regex \`drupal-example\` with TLS in Pending status)
- REGEX=\"drupal-example-master\" DRYRUN=true ./check_acme_routes.sh updateroutes (Will run in DRYRUN mode to check and update all broken routes in \`drupal-example-master\` project)"
-
-}
-
-# Function that performs mandatory variales and dependencies checks
-function initial_checks() {
- # By default script doesn't set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME. At least CLUSTER_HOSTNAME must be set
- if [ -z "$CLUSTER_HOSTNAME" ]; then
- echo "Please set CLUSTER_HOSTNAME variable"
- usage
- exit 1
- fi
-
- # Script depends on `lagoon-cli`. Check if it in installed
- if [[ ! $(command -v lagoon) ]]; then
- echo "Please install \`lagoon-cli\` from https://github.com/amazeeio/lagoon-cli because the script relys on it"
- exit 1
- fi
-}
-
-# function to get a list of all "administratively-disabled" routes
-function get_all_disabled_routes() {
- echo -e "List of routes administratively disabled\n"
- oc get route --all-namespaces -o=jsonpath="{range .items[?(@.metadata.annotations.amazee\.io/administratively-disabled)]}{.metadata.namespace}{'\t'}{.metadata.name}{'\n'}{end}"
- exit 0
-}
-
-# Function to check if you are running the script on the right cluster and if you're logged in correctly
-function check_cluster_api() {
- # Check on which cluster you're going to run commands
- if oc whoami --show-server | grep -q -v "$CLUSTER_API_HOSTNAME"; then
- echo "Please connect to the right cluster"
- exit 1
- fi
-
- # Check if you're logged in correctly
- if [ $(oc status|grep -q "Unauthorized";echo $?) -eq 0 ]; then
- echo "Please login into the cluster"
- exit 1
- fi
-}
-
-# Function to get a list of all routes with acme.openshift.io/status.provisioningStatus.orderStatus=pending
-function get_pending_routes() {
- for namespace in $(oc get projects --no-headers=true |awk '{print $1}'|sort -u|grep -E "$REGEX")
- do
- IFS=$';'
- # For each route in a namespace with `tls-acme` set to true, check the `orderStatus` if in pending status
- for routelist in $(oc get route -n "$namespace" -o=jsonpath="{range .items[?(@.metadata.annotations.kubernetes\.io/tls-acme=='true')]}{.metadata.name}{'\n'}{.metadata.annotations.acme\.openshift\.io/status}{';'}{end}"|sed "s/^[[:space:]]*//")
- do
- PENDING_ROUTE_NAME=$(echo "$routelist"|sed -n 1p)
- if echo "$routelist"|sed -n 4p | grep -q pending; then
- STATUS="Pending"
- echo "Route $PENDING_ROUTE_NAME in $namespace is in $STATUS status"
- fi
-
- done
- unset IFS
- done
-}
-
-# Function for creating an array with all routes that might be updated
-function create_routes_array() {
- # Get the list of namespaces with broker routes, according to REGEX
- for namespace in $(oc get routes --all-namespaces|grep exposer|awk '{print $1}'|sort -u|grep -E "$REGEX")
- do
- # Raw JSON Openshift project output
- PROJECTJSON="$(oc get project "$namespace" -o json)"
-
- # Gather project name based on a label or an annotation
- if [ $(echo $PROJECTJSON |grep -q 'lagoon.sh/project'; echo $?) -eq 0 ]; then
- PROJECTNAME=$(echo "${PROJECTJSON}" | grep 'lagoon.sh/project' | awk -F'"' '{print $4}')
- else
- PROJECTNAME=$(echo "${PROJECTJSON}" |grep display-name|awk -F'[][]' '{print $2}'|tr "_" "-")
- fi
-
- # Get the list of broken unique routes for each namespace
- for routelist in $(oc get -n "$namespace" route|grep exposer|awk -vNAMESPACE="$namespace" -vPROJECTNAME="$PROJECTNAME" '{print $1";"$2";"NAMESPACE";"PROJECTNAME}'|sort -u -k2 -t ";")
- do
- # Put the list into an array
- ROUTES_ARRAY+=("$routelist")
- done
- done
-
- # Create a sorted array of unique route to check
- ROUTES_ARRAY_SORTED=($(sort -u -k 2 -t ";"<<<"${ROUTES_ARRAY[*]}"))
-}
-
-# Function to check the routes, update them and delete the exposer's routes
-function check_routes() {
-
- # Cluster array of IPs
- CLUSTER_IPS=($(dig +short "$CLUSTER_HOSTNAME"))
- for i in "${ROUTES_ARRAY_SORTED[@]}"
- do
- # Tranform the item into an array
- route=($(echo "$i" | tr ";" "\n"))
-
- # Gather some useful variables
- ROUTE_NAME=${route[0]}
- ROUTE_HOSTNAME=${route[1]}
- ROUTE_NAMESPACE=${route[2]}
- ROUTE_PROJECTNAME=${route[3]}
-
- # Get route DNS record(s)
- if [[ $(dig +short "$ROUTE_HOSTNAME" &> /dev/null; echo $?) -ne 0 ]]; then
- ROUTE_HOSTNAME_IP="null"
- else
- ROUTE_HOSTNAME_IP=$(dig +short "$ROUTE_HOSTNAME")
- fi
-
- # Check if the route matches the Cluster's IP(s)
- if echo "$ROUTE_HOSTNAME_IP" | grep -E -q -v "${CLUSTER_IPS[*]}"; then
-
- # If IP is empty, then no DNS record set
- if [ -z "$ROUTE_HOSTNAME_IP" ]; then
- DNS_ERROR="No A or CNAME record set"
- else
- DNS_ERROR="$ROUTE_HOSTNAME in $ROUTE_NAMESPACE has no DNS record poiting to ${CLUSTER_IPS[*]} and going to disable tls-acme"
- fi
-
- # Print the error on stdout
- echo "$DNS_ERROR"
-
- if [[ "$NOTIFYONLY" = "true" ]]; then
- notify_customer "$ROUTE_PROJECTNAME"
- else
- # Call the update function to update the route
- update_annotation "$ROUTE_HOSTNAME" "$ROUTE_NAMESPACE"
- notify_customer "$ROUTE_PROJECTNAME"
-
- # Now once the main route is updated, it's time to get rid of exposers' routes
- for j in $(oc get -n "$ROUTE_NAMESPACE" route|grep exposer|grep -E '(^|\s)'"$ROUTE_HOSTNAME"'($|\s)'|awk '{print $1";"$2}')
- do
- ocroute=($(echo "$j" | tr ";" "\n"))
- OCROUTE_NAME=${ocroute[0]}
- if [[ $DRYRUN = true ]]; then
- echo -e "DRYRUN oc delete -n $ROUTE_NAMESPACE route $OCROUTE_NAME"
- else
- echo -e "\nDelete route $OCROUTE_NAME"
- oc delete -n "$ROUTE_NAMESPACE" route "$OCROUTE_NAME"
- fi
- done
- fi
- fi
- echo -e "\n"
-
-
- done
-}
-
-# Function to update route's annotation (ie: update tls-amce, remove tls-acme-awaiting-* and set a new one for internal purpose)
-function update_annotation() {
- echo "Update route's annotations"
- OCOPTIONS="--overwrite"
- if [[ "$DRYRUN" = "true" ]]; then
- OCOPTIONS="--dry-run --overwrite"
- fi
-
- # Annotate the route
- oc annotate -n "$2" $OCOPTIONS route "$1" acme.openshift.io/status- kubernetes.io/tls-acme-awaiting-authorization-owner- kubernetes.io/tls-acme-awaiting-authorization-at-url- kubernetes.io/tls-acme="false" amazee.io/administratively-disabled="$(date +%s)"
-}
-
-
-# Function to notify customer about the misconfiguration of their routes
-function notify_customer() {
-
- # Get Slack|Rocketchat channel and webhook
- if [ $(TEST=$(lagoon list slack -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then
- NOTIFICATION="slack"
- elif [ $(TEST=$(lagoon list rocketchat -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then
- NOTIFICATION="rocketchat"
- else
- echo "No notification set"
- return 0
- fi
-
- MESSAGE="Your $ROUTE_HOSTNAME route is configured in the \`.lagoon.yml\` file to issue an TLS certificate from Lets Encrypt. Unfortunately Lagoon is unable to issue a certificate as $DNS_ERROR.\nTo be issued correctly, the DNS records for $ROUTE_HOSTNAME should point to $CLUSTER_HOSTNAME with an CNAME record (preferred) or to ${CLUSTER_IPS[*]} via an A record (also possible but not preferred).\nIf you don't need the SSL certificate or you are using a CDN that provides you with an TLS certificate, please update your .lagoon.yml file by setting the tls-acme parameter to false for $ROUTE_HOSTNAME, as described here: https://lagoon.readthedocs.io/en/latest/using_lagoon/lagoon_yml/#ssl-configuration-tls-acme.\nWe have now administratively disabled the issuing of Lets Encrypt certificate for $ROUTE_HOSTNAME in order to protect the cluster, this will be reset during the next deployment, therefore we suggest to resolve this issue as soon as possible. Feel free to reach out to us for further information.\nThanks you.\namazee.io team"
-
- NOTIFICATION_DATA=($(lagoon list $NOTIFICATION -p "$1" --no-header|awk '{print $3";"$4}'))
- for notification in ${NOTIFICATION_DATA[@]}
- do
- CHANNEL=$(echo "$notification"|cut -f1 -d ";")
- WEBHOOK=$(echo "$notification"|cut -f2 -d ";")
-
- # json Payload
- PAYLOAD="\"channel\": \"$CHANNEL\", \"text\": \"${MESSAGE}\""
-
- echo -e "Sending notification into ${CHANNEL}"
-
- # Execute curl to send message into the channel
- if [[ $DRYRUN = true ]]; then
- echo "DRYRUN Sending notification on \"$NOTIFICATION\" curl -X POST -H 'Content-type: application/json' --data '{'"$PAYLOAD"'}' "$WEBHOOK""
- else
- curl -X POST -H 'Content-type: application/json' --data '{'"${PAYLOAD}"'}' ${WEBHOOK}
- fi
- done
-}
-
-# Main function
-function main() {
-
- COMMAND="$1"
-
- # Check first the cluster you're connected to
- echo -e "You're running the script on $CLUSTER_HOSTNAME\nDRYRUN mode is set to \"$DRYRUN\""
- check_cluster_api
-
- case "$COMMAND" in
- help)
- usage
- ;;
- getpendingroutes)
- get_pending_routes
- ;;
- getdisabledroutes)
- get_all_disabled_routes
- ;;
- getbrokenroutes)
- echo -e "\nCreating a list of possible broken routes"
- create_routes_array
- echo -e "ROUTE_NAMESPACE;ROUTE_NAME;ROUTE_HOSTNAME"|column -t -s ";"
- for i in "${ROUTES_ARRAY_SORTED[@]}"
- do
- # Tranform the item into an array
- route=($(echo "$i" | tr ";" "\n"))
- # Gather some useful variables
- ROUTE_NAME=${route[0]}
- ROUTE_HOSTNAME=${route[1]}
- ROUTE_NAMESPACE=${route[2]}
- echo -e "$ROUTE_NAMESPACE;$ROUTE_NAME;$ROUTE_HOSTNAME"|column -t -s ";"
- done
- ;;
- updateroutes)
- echo -e "Checking routes\n"
- create_routes_array
- check_routes
- ;;
- *)
- usage
- ;;
- esac
-}
-
-initial_checks "$COMMAND"
-main "$COMMAND"
diff --git a/helpers/k8up-initiate-archive.sh b/helpers/k8up-initiate-archive.sh
deleted file mode 100755
index ba2cc4999d..0000000000
--- a/helpers/k8up-initiate-archive.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-
-function outputToYaml() {
- IFS=''
- while read data; do
- echo "$data" >> /tmp/k8up-archive-initiate.yml;
- done;
-}
-
-if [ -z "$OPENSHIFT_PROJECT" ]; then
- echo "OPENSHIFT_PROJECT not set"
- exit 1
-fi
-
-if [ -z "$ARCHIVE_BUCKET" ]; then
- echo "ARCHIVE_BUCKET not set"
- exit 1
-fi
-
-set -e -o pipefail
-
-OC="oc"
-
-rm -f /tmp/k8up-archive-initiate.yml;
-
-echo "${OPENSHIFT_PROJECT}: starting =================================================================="
-
-# Fill environment variables which are needed by exec-openshift-resources.sh and the lagoon templates
-CONFIGMAP=$($OC -n $OPENSHIFT_PROJECT get configmap lagoon-env -o json)
-PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_PROJECT')
-SAFE_PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_SAFE_PROJECT')
-BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_BRANCH')
-SAFE_BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_SAFE_BRANCH')
-ENVIRONMENT_TYPE=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_ENVIRONMENT_TYPE')
-LAGOON_GIT_SHA="00000000000000000000000000000000000000000"
-OPENSHIFT_REGISTRY="docker-registry.default.svc:5000"
-ROUTER_URL=""
-SERVICE_NAME="none"
-
-# If restic backups are supported by this cluster we create the schedule definition
-if oc get customresourcedefinition schedules.backup.appuio.ch > /dev/null; then
-
- # create archive only if there is a backup-schedule already existing for this project
- if oc -n ${OPENSHIFT_PROJECT} get schedule backup-schedule &> /dev/null; then
-
- # create archive only if this is a production environment
- if [[ "${ENVIRONMENT_TYPE}" == "production" ]]; then
- TEMPLATE_PARAMETERS=()
-
- # Run Archive on Monday at 0300-0600
- ARCHIVE_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-9) 7 * *")
- TEMPLATE_PARAMETERS+=(-p ARCHIVE_SCHEDULE="${ARCHIVE_SCHEDULE}")
-
- TEMPLATE_PARAMETERS+=(-p ARCHIVE_BUCKET="${ARCHIVE_BUCKET}")
-
- OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/backup-archive-schedule.yml"
- . $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-resources.sh
-
- oc apply -n ${OPENSHIFT_PROJECT} -f /tmp/k8up-archive-initiate.yml
- rm /tmp/k8up-archive-initiate.yml
- else
- echo "${OPENSHIFT_PROJECT}: Not production environment, not creating an archive-schedule"
- fi
- else
- echo "${OPENSHIFT_PROJECT}: No backup-schedule found for project, not creating an archive-schedule"
- fi
-else
- echo "k8up is not supported by this cluster"
- exit 1
-fi
-
-echo "${OPENSHIFT_PROJECT}: done =================================================================="
diff --git a/helpers/k8up-initiate.sh b/helpers/k8up-initiate.sh
deleted file mode 100755
index 9939931f12..0000000000
--- a/helpers/k8up-initiate.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-function outputToYaml() {
- IFS=''
- while read data; do
- echo "$data" >> /tmp/k8up-initiate.yml;
- done;
-}
-
-if [ -z "$JWTSECRET" ]; then
- echo "JWTSECRET not set"
- exit 1
-fi
-
-if [ -z "$OPENSHIFT_PROJECT" ]; then
- echo "OPENSHIFT_PROJECT not set"
- exit 1
-fi
-
-set -eu -o pipefail
-
-OC="oc"
-
-echo "${OPENSHIFT_PROJECT}: starting =================================================================="
-
-# Fill environment variables which are needed by exec-openshift-resources.sh and the lagoon templates
-CONFIGMAP=$($OC -n $OPENSHIFT_PROJECT get configmap lagoon-env -o json)
-PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_PROJECT')
-SAFE_PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_SAFE_PROJECT')
-BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_BRANCH')
-SAFE_BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_SAFE_BRANCH')
-LAGOON_GIT_SHA="00000000000000000000000000000000000000000"
-OPENSHIFT_REGISTRY="docker-registry.default.svc:5000"
-ROUTER_URL=""
-SERVICE_NAME="none"
-
-PROJECT_SECRET=$(echo -n "$PROJECT-$JWTSECRET" | sha256sum | cut -d " " -f 1)
-
-# If restic backups are supported by this cluster we create the schedule definition
-if oc get customresourcedefinition schedules.backup.appuio.ch > /dev/null; then
-
- baas_repo_pw=$(oc -n ${OPENSHIFT_PROJECT} create secret generic baas-repo-pw --from-literal=repo-pw=$(echo -n "$PROJECT_SECRET-BAAS-REPO-PW" | sha256sum | cut -d " " -f 1) -o json --dry-run)
-
- if ! oc -n ${OPENSHIFT_PROJECT} get secret baas-repo-pw &> /dev/null; then
- # Create baas-repo-pw secret based on the project secret
- echo "$baas_repo_pw" | oc -n ${OPENSHIFT_PROJECT} create -f -
- else
- echo "$baas_repo_pw" | oc -n ${OPENSHIFT_PROJECT} replace -f -
- fi
-
- TEMPLATE_PARAMETERS=()
-
- # Run Backups every day at 2200-0200
- BACKUP_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(22-2) * * *")
- TEMPLATE_PARAMETERS+=(-p BACKUP_SCHEDULE="${BACKUP_SCHEDULE}")
-
- # Run Checks on Sunday at 0300-0600
- CHECK_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 0")
- TEMPLATE_PARAMETERS+=(-p CHECK_SCHEDULE="${CHECK_SCHEDULE}")
-
- # Run Prune on Saturday at 0300-0600
- PRUNE_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 6")
- TEMPLATE_PARAMETERS+=(-p PRUNE_SCHEDULE="${PRUNE_SCHEDULE}")
-
- OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/backup/schedule.yml"
- . $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-resources.sh
-
- oc apply -n ${OPENSHIFT_PROJECT} -f /tmp/k8up-initiate.yml
- rm /tmp/k8up-initiate.yml
-else
- echo "k8sup is not supported by this cluster"
- exit 1
-fi
-
-# Disable backup of solr pvc's
-if solr=$(oc -n ${OPENSHIFT_PROJECT} get pvc solr -o json 2> /dev/null) && [[ $(echo "$solr" | jq -r '.metadata.annotations."appuio.ch/backup"') != "false" ]]; then
- oc -n ${OPENSHIFT_PROJECT} annotate --overwrite pvc solr appuio.ch/backup="false";
-fi
-
-# Enable backup of nginx pvc's
-if nginx=$(oc -n ${OPENSHIFT_PROJECT} get pvc nginx -o json 2> /dev/null) && [[ $(echo "$nginx" | jq -r '.metadata.annotations."appuio.ch/backup"') != "true" ]]; then
- oc -n ${OPENSHIFT_PROJECT} annotate --overwrite pvc nginx appuio.ch/backup="true";
-fi
-
-# Remove any backupcommand from nginx pods if they exit
-if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig nginx -o json 2> /dev/null | jq -r -e '.spec.template.metadata.annotations."appuio.ch/backupcommand"' &> /dev/null; then
- oc -n ${OPENSHIFT_PROJECT} patch deploymentconfig nginx --type json -p='[{"op": "remove", "path": "/spec/template/metadata/annotations/appuio.ch~1backupcommand"}]'
-fi
-
-# add backupcommand to clis to backup mariadb
-if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig cli &> /dev/null; then
- oc -n ${OPENSHIFT_PROJECT} patch deploymentconfig cli -p '{"spec":{"template":{"metadata":{"annotations":{"appuio.ch/backupcommand":"/bin/sh -c \"if [[ $MARIADB_HOST ]]; then dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -h $MARIADB_HOST -u $MARIADB_USERNAME -p$MARIADB_PASSWORD $MARIADB_DATABASE > $dump && cat $dump && rm $dump; fi\"", "backup.appuio.ch/file-extension": ".mysql.sql"}}}}}' || true
-fi
-
-echo "${OPENSHIFT_PROJECT}: done =================================================================="
diff --git a/helpers/k8up-remove-prune.sh b/helpers/k8up-remove-prune.sh
deleted file mode 100755
index 8403870c45..0000000000
--- a/helpers/k8up-remove-prune.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-if [ -z "$OPENSHIFT_PROJECT" ]; then
- echo "OPENSHIFT_PROJECT not set"
- exit 1
-fi
-
-set -e -o pipefail
-
-echo "${OPENSHIFT_PROJECT}: starting =================================================================="
-
-if oc -n "${OPENSHIFT_PROJECT}" patch schedule backup-schedule --type=json -p="[{\"op\": \"remove\", \"path\": \"/spec/prune\"}]" 2>/dev/null; then
- echo "${OPENSHIFT_PROJECT}: patched backup-schedule"
-else
- echo "${OPENSHIFT_PROJECT}: backup-schedule already patched"
-fi
-
-echo "${OPENSHIFT_PROJECT}: done =================================================================="
diff --git a/helpers/label-namespaces.sh b/helpers/label-namespaces.sh
deleted file mode 100755
index e9f1f93c8e..0000000000
--- a/helpers/label-namespaces.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-
-##
-# Label all namespaces with lagoon info
-#
-# Old environments weren't labelled the way that Lagoon expects. This script
-# can be run against a cluster to add the missing labels.
-
-set -euo pipefail
-#set -x
-
-# Loop through all oc projects.
-while read -r project ; do
-
- # Check if lagoon-env configmap exists.
- if oc get configmap -n "$project" lagoon-env >/dev/null 2>&1; then
-
- echo "################################################"
- echo "Annotating project: $project..."
- echo "################################################"
-
- LAGOON_PROJECT=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_PROJECT:/ { print $2 }')
- LAGOON_ENVIRONMENT_TYPE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_ENVIRONMENT_TYPE:/ { print $2 }')
- LAGOON_GIT_SAFE_BRANCH=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_GIT_SAFE_BRANCH:/ { print $2 }')
- MARIADB_DATABASE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_DATABASE:/ { print $2 }')
- MARIADB_USERNAME=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_USERNAME:/ { print $2 }')
-
- oc label namespace "$project" "lagoon.sh/project=$LAGOON_PROJECT" --overwrite
- oc label namespace "$project" "lagoon.sh/environmentType=$LAGOON_ENVIRONMENT_TYPE" --overwrite
- oc label namespace "$project" "lagoon.sh/environment=$LAGOON_GIT_SAFE_BRANCH" --overwrite
- oc label namespace "$project" "lagoon.sh/mariadb-schema=$MARIADB_DATABASE" --overwrite
- oc label namespace "$project" "lagoon.sh/mariadb-username=$MARIADB_USERNAME" --overwrite
- else
-
- echo "No lagoon-env configmap found for $project"
-
- fi
-
-done < <(oc get ns -l '!lagoon.sh/project' | sed '1d' | awk '{print $1}')
diff --git a/helpers/lagoon-sync.sh b/helpers/lagoon-sync.sh
deleted file mode 100644
index 38316b9097..0000000000
--- a/helpers/lagoon-sync.sh
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-
-# to create serviceaccounts:
-# oc -n $namespace create serviceaccount lagoon-sync
-# oc -n $namespace adm policy add-role-to-user edit -z lagoon-sync
-# oc -n $namespace serviceaccounts get-token lagoon-sync
-
-set -eu -o pipefail
-
-#SOURCE_CONSOLE=""
-#SOURCE_NAMESPACE=""
-#SOURCE_SERVICEACCOUNT_TOKEN=""
-
-#DESTINATION_CONSOLE=""
-#DESTINATION_NAMESPACE=""
-#DESTINATION_SERVICEACCOUNT_TOKEN=""
-
-if [ -z "$SOURCE_CONSOLE" ]; then
- echo "SOURCE_CONSOLE not set"
- exit 1
-fi
-
-if [ -z "$DESTINATION_CONSOLE" ]; then
- echo "DESTINATION_CONSOLE not set"
- exit 1
-fi
-
-if [ -z "$SOURCE_SERVICEACCOUNT_TOKEN" ]; then
- echo "SOURCE_SERVICEACCOUNT_TOKEN not set"
- exit 1
-fi
-
-if [ -z "$DESTINATION_SERVICEACCOUNT_TOKEN" ]; then
- echo "DESTINATION_SERVICEACCOUNT_TOKEN not set"
- exit 1
-fi
-
-if [ -z "$SOURCE_NAMESPACE" ]; then
- echo "SOURCE_NAMESPACE not set"
- exit 1
-fi
-
-if [ -z "$DESTINATION_NAMESPACE" ]; then
- echo "DESTINATION_NAMESPACE not set"
- exit 1
-fi
-
-echo "SOURCE_CONSOLE: $SOURCE_CONSOLE"
-echo "SOURCE_NAMESPACE: $SOURCE_NAMESPACE"
-echo "DESTINATION_CONSOLE: $DESTINATION_CONSOLE"
-echo "DESTINATION_NAMESPACE: $DESTINATION_NAMESPACE"
-
-set -v
-
-mkdir -p /tmp/lagoon-sync/backup
-
-oc login $SOURCE_CONSOLE --token=$SOURCE_SERVICEACCOUNT_TOKEN
-source_context=$(oc config current-context)
-
-oc login $DESTINATION_CONSOLE --token=$DESTINATION_SERVICEACCOUNT_TOKEN
-destination_context=$(oc config current-context)
-
-source_api_db_pod=$(oc --context=$source_context -n $SOURCE_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=api-db)
-oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- /lagoon/mysql-backup.sh 127.0.0.1 || true
-source_api_db_backup=$(oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- sh -c "find . -name \"*.sql.gz\" -print0 | xargs -r -0 ls -1 -t | head -1")
-oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- cat $source_api_db_backup > /tmp/lagoon-sync/$source_api_db_backup
-
-
-destination_api_db_pod=$(oc --context=$destination_context -n $DESTINATION_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=api-db)
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "mkdir -p backup"
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "cat > $source_api_db_backup" < /tmp/lagoon-sync/$source_api_db_backup
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "zcat $source_api_db_backup | mysql infrastructure"
-
-
-source_keycloak_db_pod=$(oc --context=$source_context -n $SOURCE_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=keycloak-db)
-oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- /lagoon/mysql-backup.sh 127.0.0.1
-source_keycloak_db_backup=$(oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- sh -c "find . -name \"*.sql.gz\" -print0 | xargs -r -0 ls -1 -t | head -1")
-oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- cat $source_keycloak_db_backup > /tmp/lagoon-sync/$source_keycloak_db_backup
-
-destination_keycloak_db_pod=$(oc --context=$destination_context -n $DESTINATION_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=keycloak-db)
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "mkdir -p backup"
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "cat > $source_keycloak_db_backup" < /tmp/lagoon-sync/$source_keycloak_db_backup
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "zcat $source_keycloak_db_backup | mysql keycloak"
-
-
-oc --context=$destination_context -n $DESTINATION_NAMESPACE rollout latest dc/keycloak
-oc --context=$destination_context -n $DESTINATION_NAMESPACE rollout latest dc/api
-
-oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- /rerun_initdb.sh
-
-
diff --git a/helpers/mariadb-galera2shared.sh b/helpers/mariadb-galera2shared.sh
deleted file mode 100755
index fffab3c708..0000000000
--- a/helpers/mariadb-galera2shared.sh
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-
-
-
-if [ ! "$1" ]; then
- echo "please define openshift project as first argument"
- exit 1;
-fi
-
-set -uo pipefail
-
-which shyaml > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install shyaml (pip3 install shyaml)"
- exit 1
-fi
-
-which jq > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install jq"
- exit 1
-fi
-
-which svcat > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install svcat"
- exit 1
-fi
-
-set -e
-
-PROJECT_NAME=$1
-
-echo "*** Starting mariadb-galera --> mariadb-shared migration in ${PROJECT_NAME}"
-
-SERVICE_NAME=mariadb
-SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:])
-SERVICE_TYPE=mariadb-shared
-
-ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE')
-
-OLD_POD="mariadb-galera-0"
-
-if [[ "$OLD_POD" ]]; then
- echo "found $SERVICE_NAME pod $OLD_POD"
-else
- echo "no running pod found for service '${SERVICE_NAME}'', is it running?"
- exit 1
-fi
-
-echo "*** Pausing nginx and cli"
-NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas')
-CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas')
-oc -n $1 scale dc/nginx --replicas=0
-oc -n $1 scale dc/cli --replicas=0
-
-
-# create service broker
-## taken from build-deploy-docker-compose.sh
-
-OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml"
-SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb"
-SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}"
-OPENSHIFT_PROJECT=$1
-. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh
-
-# ServiceBrokers take a bit, wait until the credentials secret is available
-until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials
-do
- echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs"
- sleep 10
-done
-
-# Load credentials out of secret
-SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml
-oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS
-
-DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
-
-echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST"
-# transfer database between from old to new
-oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME"
-
-CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml"
-echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}"
-oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK
-
-echo "*** updating configmap to point to ${DB_HOST}."
-# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
-oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \
- -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
-
-
-echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later"
-oc -n $1 delete service mariadb
-oc -n $1 scale dc/mariadb-maxscale --replicas=0
-oc -n $1 scale statefulset/mariadb-galera --replicas=0
-
-
-# transfer complete, clean up
-rm -f $SECRETS
-
-oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS
-oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS
-
-oc -n $1 rollout latest dc/nginx
-oc -n $1 rollout latest dc/cli
-oc -n $1 rollout status dc/nginx
-oc -n $1 rollout status dc/cli
-echo "*** done."
diff --git a/helpers/mariadb-single2shared-no-nginx.sh b/helpers/mariadb-single2shared-no-nginx.sh
deleted file mode 100755
index 8b59b98a62..0000000000
--- a/helpers/mariadb-single2shared-no-nginx.sh
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/bash
-
-
-
-if [ ! "$1" ]; then
- echo "please define openshift project as first argument"
- exit 1;
-fi
-
-set -uo pipefail
-
-which shyaml > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install shyaml (pip3 install shyaml)"
- exit 1
-fi
-
-which jq > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install jq"
- exit 1
-fi
-
-which svcat > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install svcat"
- exit 1
-fi
-
-set -e
-
-PROJECT_NAME=$1
-
-echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}"
-
-SERVICE_NAME=mariadb
-SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:])
-SERVICE_TYPE=mariadb-shared
-
-ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE')
-
-MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas')
-
-if [ "$MARIADB_REPLICAS" == "0" ]; then
- oc -n $1 scale dc/mariadb --replicas=1
- oc -n $1 rollout status dc/mariadb
-fi
-
-# export old mariadb pod name
-OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME)
-
-if [[ "$OLD_POD" ]]; then
- echo "found $SERVICE_NAME pod $OLD_POD"
-else
- echo "no running pod found for service '${SERVICE_NAME}'', is it running?"
- exit 1
-fi
-
-# create service broker
-## taken from build-deploy-docker-compose.sh
-
-OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml"
-SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb"
-SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}"
-OPENSHIFT_PROJECT=$1
-. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh
-
-# ServiceBrokers take a bit, wait until the credentials secret is available
-until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials
-do
- echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs"
- sleep 10
-done
-
-# Load credentials out of secret
-SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml
-oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS
-
-DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
-
-echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST"
-# transfer database between from old to new
-oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db \$MARIADB_DATABASE || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME"
-
-CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml"
-echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}"
-oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK
-
-echo "*** updating configmap to point to ${DB_HOST}."
-# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
-oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \
- -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
-
-
-echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later"
-oc -n $1 delete service mariadb
-oc -n $1 scale dc/mariadb --replicas=0
-
-# transfer complete, clean up
-rm -f $SECRETS
-
-echo "*** done."
diff --git a/helpers/mariadb-single2shared-wordpress.sh b/helpers/mariadb-single2shared-wordpress.sh
deleted file mode 100755
index 13461cfbf5..0000000000
--- a/helpers/mariadb-single2shared-wordpress.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-
-
-
-if [ ! "$1" ]; then
- echo "please define openshift project as first argument"
- exit 1;
-fi
-
-set -uo pipefail
-
-which shyaml > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install shyaml (pip3 install shyaml)"
- exit 1
-fi
-
-which jq > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install jq"
- exit 1
-fi
-
-which svcat > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install svcat"
- exit 1
-fi
-
-set -e
-
-PROJECT_NAME=$1
-
-echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}"
-
-SERVICE_NAME=mariadb
-SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:])
-SERVICE_TYPE=mariadb-shared
-
-ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE')
-
-MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas')
-
-if [ "$MARIADB_REPLICAS" == "0" ]; then
- oc -n $1 scale dc/mariadb --replicas=1
- oc -n $1 rollout status dc/mariadb
-fi
-
-# export old mariadb pod name
-OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME)
-
-if [[ "$OLD_POD" ]]; then
- echo "found $SERVICE_NAME pod $OLD_POD"
-else
- echo "no running pod found for service '${SERVICE_NAME}'', is it running?"
- exit 1
-fi
-
-echo "*** Pausing nginx and cli"
-NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas')
-CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas')
-oc -n $1 scale dc/nginx --replicas=0
-oc -n $1 scale dc/cli --replicas=0
-
-
-# create service broker
-## taken from build-deploy-docker-compose.sh
-
-OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml"
-SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb"
-SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}"
-OPENSHIFT_PROJECT=$1
-. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh
-
-# ServiceBrokers take a bit, wait until the credentials secret is available
-until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials
-do
- echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs"
- sleep 10
-done
-
-# Load credentials out of secret
-SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml
-oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS
-
-DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
-
-echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST"
-# transfer database between from old to new
-oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db lagoon || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME"
-
-CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml"
-echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}"
-oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK
-
-echo "*** updating configmap to point to ${DB_HOST}."
-# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
-oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \
- -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
-
-
-echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later"
-oc -n $1 delete service mariadb
-oc -n $1 scale dc/mariadb --replicas=0
-
-# transfer complete, clean up
-rm -f $SECRETS
-
-oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS
-oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS
-
-oc -n $1 rollout latest dc/nginx
-oc -n $1 rollout latest dc/cli
-oc -n $1 rollout status dc/nginx
-oc -n $1 rollout status dc/cli
-echo "*** done."
diff --git a/helpers/mariadb-single2shared.sh b/helpers/mariadb-single2shared.sh
deleted file mode 100755
index 9a281271bd..0000000000
--- a/helpers/mariadb-single2shared.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash
-
-
-
-if [ ! "$1" ]; then
- echo "please define openshift project as first argument"
- exit 1;
-fi
-
-set -uo pipefail
-
-which shyaml > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install shyaml (pip3 install shyaml)"
- exit 1
-fi
-
-which jq > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install jq"
- exit 1
-fi
-
-which svcat > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install svcat"
- exit 1
-fi
-
-set -e
-
-PROJECT_NAME=$1
-
-echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}"
-
-SERVICE_NAME=mariadb
-SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:])
-SERVICE_TYPE=mariadb-shared
-
-ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE')
-
-MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas')
-
-if [ "$MARIADB_REPLICAS" == "0" ]; then
- oc -n $1 scale dc/mariadb --replicas=1
- oc -n $1 rollout status dc/mariadb
-fi
-
-# export old mariadb pod name
-OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME)
-
-if [[ "$OLD_POD" ]]; then
- echo "found $SERVICE_NAME pod $OLD_POD"
-else
- echo "no running pod found for service '${SERVICE_NAME}'', is it running?"
- exit 1
-fi
-
-echo "*** Pausing nginx and cli"
-NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas')
-CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas')
-oc -n $1 scale dc/nginx --replicas=0
-oc -n $1 scale dc/cli --replicas=0
-
-
-# create service broker
-## taken from build-deploy-docker-compose.sh
-
-OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml"
-SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb"
-SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}"
-OPENSHIFT_PROJECT=$1
-. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh
-
-# ServiceBrokers take a bit, wait until the credentials secret is available
-until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials
-do
- echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs"
- sleep 10
-done
-
-# Load credentials out of secret
-SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml
-oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS
-
-DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
-
-echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST"
-# transfer database between from old to new
-oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME"
-
-CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml"
-echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}"
-oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK
-
-echo "*** updating configmap to point to ${DB_HOST}."
-# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
-oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \
- -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
-
-
-echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later"
-oc -n $1 delete service mariadb
-oc -n $1 scale dc/mariadb --replicas=0
-
-# transfer complete, clean up
-rm -f $SECRETS
-
-oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS
-oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS
-
-oc -n $1 rollout latest dc/nginx
-oc -n $1 rollout latest dc/cli
-oc -n $1 rollout status dc/nginx
-oc -n $1 rollout status dc/cli
-echo "*** done."
diff --git a/helpers/migrate-resize-pv-nginx.sh b/helpers/migrate-resize-pv-nginx.sh
deleted file mode 100755
index d3976d1d25..0000000000
--- a/helpers/migrate-resize-pv-nginx.sh
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/bin/bash
-
-set -e -o pipefail
-
-# use oc
-OC=oc
-
-usage() {
- echo "Usage: ./migrate-resize-pv-nginx.sh -p solr -s 20Gi -d nginx,cli -n solr-namespace -c gp2 -m gluster"
- echo "WARNING: Specify the storageclass(-m) for the migrator pvc to be created in, must be multi-az mountable"
- echo " otherwise loss of data can occur"
- echo "Options:"
- echo " -m #required, should be a storageclass that is multi-az mountable, eg gluster,efs,etc.."
- echo " -p #required"
- echo " -s #optional, set to the size you want to resize it to, defaults to original requested claim"
- echo " -d #required, separate with commas to define multiple deploymentconfigs"
- echo " -n #required"
- echo " -c #optional, change the storage class of the new migrated/resized pv"
- exit 1
-}
-
-if [[ ! $@ =~ ^\-.+ ]]
-then
- usage
-fi
-
-while getopts ":p:d:s:n:c:m:h:" opt; do
- case ${opt} in
- p ) # process option p
- PVC=$OPTARG;;
- d ) # process option d
- DC=$OPTARG;;
- s ) # process option s
- PVSIZE=$OPTARG;;
- n ) # process option n
- NS=$OPTARG;;
- c ) # process option c
- SC=$OPTARG;;
- m ) # process option m
- MIGRATOR_SC=$OPTARG;;
- h )
- usage;;
- *)
- usage;;
- esac
-done
-
-# need these, make sure we have them
-if [[ -z "$PVC" || -z "$DC" || -z "$NS" || -z "$MIGRATOR_SC" ]]; then
- usage
-fi
-
-# convert given DC into an array
-IFS=',' read -r -a DC_ARRAY <<< "$DC"
-
-# check if the storage class exists if a request to change is made
-if [ ! -z "$SC" ]; then
- SC_EXIST=$(${OC} -n ${NS} get sc ${SC} -o name --no-headers)
- if [ "$SC_EXIST" = "" ]; then
- exit 1
- fi
-fi
-# check if the migrator storage class exists too
-if [ ! -z "$MIGRATOR_SC" ]; then
- MIGRATOR_SC_EXIST=$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o name --no-headers)
- if [ "$MIGRATOR_SC_EXIST" = "" ]; then
- exit 1
- fi
-fi
-if [ "$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o json | jq -r .provisioner)" == "kubernetes.io/aws-ebs" ]; then
- echo "You are using ${MIGRATOR_SC} which uses aws-ebs. This may result in loss of data if the pvc is created in a different az to the migrator pod."
- read -p "Are you sure? " -n 1 -r
- echo
- if [[ $REPLY =~ ^[Yy]$ ]]
- then
- echo "Proceeding"
- else
- exit 1
- fi
-fi
-
-PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC} -o name --no-headers)
-if [ "$PVC_EXIST" = "" ]; then
- exit 1
-else
- # get the existing size of the PV
- OLDSIZE=$(${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq -r '.spec.resources.requests.storage')
- if [ -z "$PVSIZE" ]; then
- echo "using existing PV size when migrating - $OLDSIZE"
- #if a new size is not defined, use the original size when creating the new pv
- PVSIZE=$OLDSIZE
- else
- if [ "$PVSIZE" != "$OLDSIZE" ]; then
- echo "migrated PV will be created with the new size $PVSIZE"
- fi
- fi
-
- # cleanup objects in case they already exist.
- ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvc-migrator || true
- ${OC} -n ${NS} delete serviceaccount pvc-migrator || true
- ${OC} -n ${NS} delete deploymentconfig/pv-migrator || true
- #${OC} -n ${NS} delete pvc/${PVC}-migrator --wait || true
-
-# create the migrator pvc early and fail if it can't be created
-cat << EOF | ${OC} -n ${NS} apply -f -
- apiVersion: v1
- items:
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: ${PVC}-migrator
- spec:
- storageClassName: ${MIGRATOR_SC}
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: $OLDSIZE
- kind: List
- metadata: {}
-EOF
- MIGRATOR_PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC}-migrator -o name --no-headers)
- if [ "$PVC_EXIST" = "" ]; then
- exit 1
- fi
-
-
-
- # create a svc account
- ${OC} -n ${NS} create serviceaccount pvc-migrator
- ${OC} -n ${NS} adm policy add-scc-to-user privileged -z pvc-migrator
-
- # run alpine base
- ${OC} -n ${NS} run --image alpine pv-migrator -- sh -c "apk add --no-cache rsync; trap : TERM INT; (while true; do sleep 3600; done) & wait"
- # pause the rollout to allow making multiple changes on the deploymentconfig
- ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator
- # change serviceaccount name so i can run as privileged
- ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvc-migrator"}}}}'
- # now run as root
- ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}'
- echo "adding ${PVC} to pv-migrator."
- ${OC} -n ${NS} set volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage
- # add migration pvc to migrator
- ${OC} -n ${NS} set volume deploymentconfig/pv-migrator --add --name=${PVC}-migrator --type=persistentVolumeClaim --claim-name=${PVC}-migrator --mount-path=/migrator
- ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator
- ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch
-
- # check if the migrator pod is actually running
- MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
- if [[ ! $MIGRATOR ]]; then
- echo "No running pod found for migrator"
- exit 1
- fi
-
- echo "copy ${PVC} to ${PVC}-migrator"
- ${OC} -n ${NS} exec $MIGRATOR -- rsync -av -W --inplace --delete --exclude='/css/' --exclude='/js/' --exclude='/advagg_css/' --exclude='/advagg_js/' --exclude='/styles/' --exclude='/php/' --info=progress2 /storage/. /migrator
-
- # update actual production pods with migrator PVC (this allows them to keep running while we migrate a second time)
- for DC in "${DC_ARRAY[@]}"
- do
- ${OC} -n ${NS} set volume deploymentconfig/${DC} --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC}-migrator --overwrite
- done
- for DC in "${DC_ARRAY[@]}"
- do
- ${OC} -n ${NS} rollout status deploymentconfig/${DC} --watch
- done
-
- TMP=$(mktemp temp.${PVC}.json.XXXX)
-
- echo "dumping pvc ${PVC} to ${TMP}."
- ## we can change the storage class instead of using the default
- if [ ! -z "$SC" ]; then
- ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' | jq --arg SC "${SC}" '.spec.storageClassName=$SC' > $TMP
- else
- ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' > $TMP
- fi
-
- # scale down migrator to change the volumes on it
- ${OC} -n ${NS} scale --replicas=0 deploymentconfig/pv-migrator
- # remove the original PVC from the migrator
-
- # remove the original PVC now that we have migrated everything to the PVC-migrator, we call `--wait` to make sure the PVC really has been deleted
- ${OC} -n ${NS} delete pvc/${PVC} --wait
-
- # recreate the PVC based on what we dumped before
- ${OC} -n ${NS} create -f $TMP && rm $TMP
-
- # check if deploymenconfig has at least 1 ready pod, if not, scale and check again in 3 secounds.
- while [[ $(${OC} -n ${NS} get deploymentconfig/pv-migrator -o go-template --template='{{.status.readyReplicas}}') = "" ]] || [[ $(${OC} -n ${NS} get deploymentconfig/pv-migrator -o go-template --template='{{.status.readyReplicas}}') = "0" ]]
- do
- # Sending the scaling command while it already scaling is no problem for the Kubernetes API
- ${OC} -n ${NS} scale --replicas=1 deploymentconfig/pv-migrator
- sleep 3
- done
-
- MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
- if [[ ! $MIGRATOR ]]; then
- echo "No running pod found for migrator"
- exit 1
- fi
-
- # copy data from the pvc-migrator to the newly created pvc
- ${OC} -n ${NS} exec $MIGRATOR -- rsync -av -W --inplace --delete --exclude='/css/' --exclude='/js/' --exclude='/advagg_css/' --exclude='/advagg_js/' --info=progress2 --exclude='/styles/' --exclude='/php/' /migrator/. /storage
-
- # updating the production pods with the copied storage again
- for DC in "${DC_ARRAY[@]}"
- do
- ${OC} -n ${NS} set volume deploymentconfig/${DC} --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --overwrite
- done
- for DC in "${DC_ARRAY[@]}"
- do
- ${OC} -n ${NS} rollout status deploymentconfig/${DC} --watch
- done
-
- # delete the migrator DC and PVC
- ${OC} -n ${NS} delete deploymentconfig/pv-migrator
- ${OC} -n ${NS} delete pvc/${PVC}-migrator
-
- # cleanup serviceaccounts
- ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvc-migrator
- ${OC} -n ${NS} delete serviceaccount pvc-migrator
-fi
diff --git a/helpers/migrate-resize-pv.sh b/helpers/migrate-resize-pv.sh
deleted file mode 100755
index 40ff2fc429..0000000000
--- a/helpers/migrate-resize-pv.sh
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/bin/bash
-
-# use oc
-OC=oc
-
-usage() {
- echo "Usage: ./migrate-resize-pv.sh -p solr -s 20Gi -d solr -n solr-namespace -c gp2 -m gluster"
- echo "WARNING: Specify the storageclass(-m) for the migrator pvc to be created in, must be multi-az mountable"
- echo " otherwise loss of data can occur"
- echo "Options:"
- echo " -m #required, should be a storageclass that is multi-az mountable, eg gluster,efs,etc.."
- echo " -p #required"
- echo " -s #optional, set to the size you want to resize it to, defaults to original requested claim"
- echo " -d #required"
- echo " -n #required"
- echo " -c #optional, change the storage class of the new migrated/resized pv"
- exit 1
-}
-
-if [[ ! $@ =~ ^\-.+ ]]
-then
- usage
-fi
-
-while getopts ":p:d:s:n:c:m:h:" opt; do
- case ${opt} in
- p ) # process option p
- PVC=$OPTARG;;
- d ) # process option d
- DC=$OPTARG;;
- s ) # process option s
- PVSIZE=$OPTARG;;
- n ) # process option n
- NS=$OPTARG;;
- c ) # process option c
- SC=$OPTARG;;
- m ) # process option m
- MIGRATOR_SC=$OPTARG;;
- h )
- usage;;
- *)
- usage;;
- esac
-done
-
-# echo "Select which storage class is multi-az mountable, or exit:"
-# COLUMNS=1
-# resourcelist=$(${OC} get sc --no-headers | awk '{print $1}')
-# select opt in $(echo ${resourcelist} | tr -s " " "\n") "Q) exit"
-# do
-# if [[ "$opt" == "Q) exit-mach" || $REPLY == [Qq] ]]; then
-# echo "Exiting"
-# exit 1
-# fi
-# MIGRATOR_SC=$opt
-# break
-# done
-
-# need these, make sure we have them
-if [[ -z "$PVC" || -z "$DC" || -z "$NS" || -z "$MIGRATOR_SC" ]]; then
- usage
-fi
-
-# check if the storage class exists if a request to change is made
-if [ ! -z "$SC" ]; then
- SC_EXIST=$(${OC} -n ${NS} get sc ${SC} -o name --no-headers)
- if [ "$SC_EXIST" = "" ]; then
- exit 1
- fi
-fi
-# check if the migrator storage class exists too
-if [ ! -z "$MIGRATOR_SC" ]; then
- MIGRATOR_SC_EXIST=$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o name --no-headers)
- if [ "$MIGRATOR_SC_EXIST" = "" ]; then
- exit 1
- fi
-fi
-if [ "$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o json | jq -r .provisioner)" == "kubernetes.io/aws-ebs" ]; then
- echo "You are using ${MIGRATOR_SC} which uses aws-ebs. This may result in loss of data if the pvc is created in a different az to the migrator pod."
- read -p "Are you sure? " -n 1 -r
- echo
- if [[ $REPLY =~ ^[Yy]$ ]]
- then
- echo "Proceeding"
- else
- exit 1
- fi
-fi
-
-PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC} -o name --no-headers)
-if [ "$PVC_EXIST" = "" ]; then
- exit 1
-else
- # get the existing size of the PV
- OLDSIZE=$(${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq -r '.spec.resources.requests.storage')
- if [ -z "$PVSIZE" ]; then
- echo "using existing PV size when migrating - $OLDSIZE"
- #if a new size is not defined, use the original size when creating the new pv
- PVSIZE=$OLDSIZE
- else
- if [ "$PVSIZE" != "$OLDSIZE" ]; then
- echo "migrated PV will be created with the new size $PVSIZE"
- fi
- fi
-
-# create the migrator pvc early and fail if it can't be created
-cat << EOF | ${OC} -n ${NS} apply -f -
- apiVersion: v1
- items:
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: migrator
- spec:
- storageClassName: ${MIGRATOR_SC}
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: $OLDSIZE
- kind: List
- metadata: {}
-EOF
- MIGRATOR_PVC_EXIST=$(${OC} -n ${NS} get pvc migrator -o name --no-headers)
- if [ "$PVC_EXIST" = "" ]; then
- exit 1
- fi
-
- # create a svc account
- ${OC} -n ${NS} create serviceaccount pvcreclaim
- ${OC} -n ${NS} adm policy add-scc-to-user privileged -z pvcreclaim
- # scale the DC to 0
- ${OC} -n ${NS} scale --replicas=0 dc/${DC}
- # run alpine base
- ${OC} -n ${NS} run --image alpine pv-migrator -- sh -c "while sleep 3600; do :; done"
- # change serviceaccount name so i can run as privileged
- ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvcreclaim"}}}}'
- # now run as root
- ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}'
- # pause the rollout
- ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator
- echo "adding ${PVC} to pv-migrator."
- ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC}
-
-
-
- ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator
- ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator
- ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch
-
- #
- MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
- if [[ ! $MIGRATOR ]]; then
- echo "No running pod found for migrator"
- exit 1
- fi
-
- echo "copy ${PVC} to storage"
- ${OC} -n ${NS} exec $MIGRATOR -- cp -Rpav /storage/${PVC} /migrator/
-
- TMP=$(mktemp temp.${PVC}.json.XXXX)
-
- echo "dumping pvc ${PVC} to ${TMP}."
- ## we can change the storage class instead of using the default
- if [ ! -z "$SC" ]; then
- ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' | jq --arg SC "${SC}" '.spec.storageClassName=$SC' > $TMP
- else
- ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' > $TMP
- fi
-
- ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator
-
- ${OC} -n ${NS} volume deploymentconfig/pv-migrator --remove --name=${PVC}
- ${OC} -n ${NS} delete pvc/${PVC}
- ${OC} -n ${NS} create -f $TMP && rm $TMP
- ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC}
-
- ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator
- ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch
-
- MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
-
- ${OC} -n ${NS} exec $MIGRATOR -- cp -Rpav /migrator/${PVC} /storage/
- ${OC} -n ${NS} exec $MIGRATOR -- ls -la /storage/${PVC}
-
- ${OC} -n ${NS} delete deploymentconfig/pv-migrator
- ${OC} -n ${NS} delete pvc/migrator
- ${OC} -n ${NS} scale --replicas=1 dc/${DC}
-
- ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvcreclaim
- ${OC} -n ${NS} delete serviceaccount pvcreclaim
-fi
diff --git a/helpers/nginx-healthchecks.sh b/helpers/nginx-healthchecks.sh
deleted file mode 100755
index 9dde57a5ab..0000000000
--- a/helpers/nginx-healthchecks.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-if [ -z "$OPENSHIFT_PROJECT" ]; then
- echo "OPENSHIFT_PROJECT not set"
- exit 1
-fi
-
-set -eu -o pipefail
-
-OC="oc"
-
-echo "${OPENSHIFT_PROJECT}: starting =================================================================="
-
-# Remove any backupcommand from nginx pods if they exit
-if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig nginx -o yaml --ignore-not-found | grep -q php &> /dev/null; then
- oc -n ${OPENSHIFT_PROJECT} patch dc/nginx --patch '{"spec":{"template":{"spec":{"containers":[{"name":"php","livenessProbe":{"$patch":"replace","tcpSocket":{"port":9000},"initialDelaySeconds":60,"periodSeconds":10},"readinessProbe":{"$patch":"replace","tcpSocket":{"port":9000},"initialDelaySeconds":2,"periodSeconds":10}}]}}}}' || true
- oc -n ${OPENSHIFT_PROJECT} rollout status --watch dc/nginx
-fi
-
-
-echo "${OPENSHIFT_PROJECT}: done =================================================================="
diff --git a/helpers/reclaim-pv.sh b/helpers/reclaim-pv.sh
deleted file mode 100755
index 53ad5bfba2..0000000000
--- a/helpers/reclaim-pv.sh
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/bash
-
-# written for openshift 3.7; small changes may be required for other versions.
-#
-# usage ./reclaim-pv.sh
-#
-# using the current openshift server and namepsace this script will:
-# 1. scale all deployments to zero pods
-# 2. create a pod and attach all temporary pvc.
-# 3. attach all other pvcs in the namepace current claims to this pod.
-# 4. for each pvc,
-# copy the contents to temporary pvc, recreate the claim.
-# this allows for the prefered pv to be used
-# attach the newly created pvc, copy contents back to it
-# 6. clean up
-
-OC=oc
-PVCS=($(${OC} get pvc -o name | sed 's/persistentvolumeclaims\///'))
-
-if [[ $# -gt 0 ]]; then
- unset PVCS
- PVCS=("${BASH_ARGV[@]}")
-fi
-
-if [[ ! ${#PVCS[@]} -gt 0 ]]; then
- echo "no PVCs found."
-
-else
- ${OC} create serviceaccount pvcreclaim
- ${OC} adm policy add-scc-to-user privileged -z pvcreclaim
-
- ${OC} get dc -o name --no-headers | xargs -P3 -n1 ${OC} scale --replicas=0
-
- ${OC} run --image alpine pv-migrator -- sh -c "while sleep 3600; do :; done"
-
- # change serviceaccount name so i can run as privileged
- ${OC} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvcreclaim"}}}}'
- # now run as root
- ${OC} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}'
-
- ${OC} rollout pause deploymentconfig/pv-migrator
-
- for PVC in "${PVCS[@]}"
- do
- echo "adding ${PVC} to pv-migrator."
- ${OC} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC}
- done
-
-cat << EOF | ${OC} apply -f -
- apiVersion: v1
- items:
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: migrator
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 20Gi
- kind: List
- metadata: {}
-EOF
-
-
- ${OC} volume deploymentconfig/pv-migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator
-
- ${OC} rollout resume deploymentconfig/pv-migrator
- ${OC} rollout status deploymentconfig/pv-migrator --watch
-
- #
- MIGRATOR=$(${OC} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
- #MIGRATOR=$(${OC} get pod -o custom-columns=NAME:.metadata.name --no-headers -l run=pv-migrator)
- if [[ ! $MIGRATOR ]]; then
- echo "No running pod found for migrator"
- exit 1
- fi
-
- for PVC in "${PVCS[@]}"
- do
- echo "copy ${PVC} to storage"
- ${OC} exec $MIGRATOR -- cp -Rpav /storage/${PVC} /migrator/
-
- TMP=$(mktemp temp.${PVC}.json.XXXX)
-
- echo "dumping pvc ${PVC} to ${TMP}."
- ${OC} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' > $TMP
-
-
- ${OC} rollout pause deploymentconfig/pv-migrator
-
- ${OC} volume deploymentconfig/pv-migrator --remove --name=${PVC}
- ${OC} delete pvc/${PVC}
- ${OC} create -f $TMP && rm $TMP
- ${OC} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC}
-
- ${OC} rollout resume deploymentconfig/pv-migrator
- ${OC} rollout status deploymentconfig/pv-migrator --watch
-
-
- MIGRATOR=$(${OC} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty')
-
- ${OC} exec $MIGRATOR -- cp -Rpav /migrator/${PVC} /storage/
- ${OC} exec $MIGRATOR -- ls -la /storage/${PVC}
-
-
-
-
- done
-
- ${OC} delete deploymentconfig/pv-migrator
- ${OC} delete pvc/migrator
- ${OC} get dc -o name --no-headers | xargs -P3 -n1 ${OC} scale --replicas=1
-
- ${OC} adm policy remove-scc-from-user privileged -z pvcreclaim
- ${OC} delete serviceaccount pvcreclaim
-
-fi
diff --git a/helpers/run-in-all-lagoon-projects.sh b/helpers/run-in-all-lagoon-projects.sh
deleted file mode 100755
index dbb82b969b..0000000000
--- a/helpers/run-in-all-lagoon-projects.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e -o pipefail
-
-oc get configmaps --all-namespaces --no-headers | grep lagoon-env | awk '{ print $1 }' | while read OPENSHIFT_PROJECT; do
- REGEX=${REGEX:-.*}
- if [[ $OPENSHIFT_PROJECT =~ $REGEX ]]; then
- . "$1"
- fi
-done
diff --git a/helpers/shared-cleanup.sh b/helpers/shared-cleanup.sh
deleted file mode 100755
index 6dedc3e62d..0000000000
--- a/helpers/shared-cleanup.sh
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env bash
-
-# this script will assumed you're logged into an openshift cluster locally.
-# and that you can connect directly to the database servers listed in DB_HOST
-# on port 3306 with a .my.cnf that allows you to run
-# non-interactive mysql commands.
-
-# use oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials
-# if the database is not directly connectable, an ssh tunnel can be used:
-# ~/.my.cnf-mysql-development-cluster.cluster-xxx.rds.amazonaws.com
-# [client]
-# host=127.0.0.1
-# port=33007
-# user=root
-# password=af105380aa4a2f034a083daeb9ed27b7a8395a44
-
-# ssh -L 33007:mysql-development-cluster.cluster-xxx.rds.amazonaws.com:3306 infra1.cluster1.amazee.io
-
-# after running this script, the user will be presented with a list of
-# databases that are probably ok to remove.
-
-set -euo pipefail
-
-for util in oc jq mysql; do
- if ! command -v ${util} > /dev/null; then
- echo "please install ${util}"
- exit 1
- fi
-done
-
-# Colours.
-shw_grey () {
- echo $(tput bold)$(tput setaf 0) $@ $(tput sgr 0)
-}
-shw_norm () {
- echo $(tput bold)$(tput setaf 9) $@ $(tput sgr 0)
-}
-shw_info () {
- echo $(tput bold)$(tput setaf 4) $@ $(tput sgr 0)
-}
-shw_warn () {
- echo $(tput bold)$(tput setaf 2) $@ $(tput sgr 0)
-}
-shw_err () {
- echo $(tput bold)$(tput setaf 1) $@ $(tput sgr 0)
-}
-
-# Services with a port are not servicebrokers.
-shw_grey "Getting a list of services for cluster $(oc whoami --show-server)."
-oc get service --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.externalName}{"\n"}{end}' \
- | awk '$2 ~ /^mariadb-/ {print}' > /tmp/mariadb-services
-# Remove read replica services.
-sed -i.bak '/mariadb-readreplica-/d' /tmp/mariadb-services
-# Remove random database pods.
-sed -i.bak '/mariadb-d7[[:space:]]*$/d' /tmp/mariadb-services
-
-# Get a list of database clusters:
-# - Ignore the dedicated clusters.
-# - Ignore the read replicas.
-SERVERS=$(awk '{print $3}' /tmp/mariadb-services | sort -u | grep -v "^dedicated" | grep -v ".cluster-ro-")
-
-# Ensure you can connect to all database clusters, once you do that, list every
-# database that you can that belongs to the Ansible Service Broker.
-for SERVER in $SERVERS; do
- CONFFILE="${HOME}/.my.cnf-${SERVER}"
- if [ -f "$CONFFILE" ]; then
- shw_info "Getting current database list for cluster ${SERVER}..."
- # The ASB will never create a database smaller than 5 characters.
- mysql --defaults-file="$CONFFILE" -se 'show databases;' | grep -Ev "mysql$|_schema$" | grep -E '^.{5,}$' > "/tmp/${SERVER}-databases"
- else
- shw_err "ERROR: please create $CONFFILE so I can know how to connect to $SERVER"
- exit 2
- fi
-done
-
-# For every active project, find out it's database name, and remove this the
-# database cluster file (to indicate it has been found).
-ERRORS=()
-for PROJECT in $(awk '$3 ~ /^dedicated/ {next} {print $1}' /tmp/mariadb-services); do
- shw_info "Checking namespace '${PROJECT}'."
-
- # In the case that there are multiple ASB configs for the 1 project, this will
- # return an array with each database in it.
- DATABASES=($(oc -n "${PROJECT}" get configmap lagoon-env -o json | jq -r '.data | with_entries(select(.key|match("_DATABASE";"i")))[]' || :))
-
- if [ ${#DATABASES[@]} -eq 0 ]; then
- shw_err " > Some problem with ${PROJECT}"
- ERRORS+=("${PROJECT}")
- else
- # Iterate over the potential many database names.
- for (( i=0; i<${#DATABASES[@]}; i++ )) ; do
- # @TODO it would be technically possible to have the 2 databases spread
- # across multiple database clusters, this code assumes a single project
- # uses a single database cluster.
- DBHOST=$(grep --max-count=1 "^${PROJECT}[[:space:]]" /tmp/mariadb-services | awk '{print $3}')
- shw_warn " > Found database '${DATABASES[$i]}' on host '${DBHOST}'."
- sed -i.bak -e "/${DATABASES[$i]}/d" "/tmp/${DBHOST}-databases"
- done
- fi
-done
-
-echo; echo
-if [ ${#ERRORS[@]} -gt 0 ]; then
- shw_info "These projects could not adaquately checked:"
- printf "%s\\n" "${ERRORS[@]}"
- echo
-fi
-
-for SERVER in $SERVERS; do
- CONFFILE="${HOME}/.my.cnf-${SERVER}"
- echo
- shw_info "Orphaned databases for '${SERVER}'"
-
- # List servcer uptime.
- shw_grey "MySQL uptime (last_update can only ever be this old)"
- mysql --defaults-file="${CONFFILE}" -e "SELECT TIME_FORMAT(SEC_TO_TIME(VARIABLE_VALUE ),'%Hh %im') as Uptime from performance_schema.global_status where VARIABLE_NAME='Uptime';"
-
- rm -f /tmp/${SERVER}-databases-drop
- while IFS= read -r line || [[ -n "$line" ]]; do
- shw_info " $line"
- echo -n " - Last updated: "
- mysql --defaults-file="${CONFFILE}" -se "SELECT from_unixtime(UNIX_TIMESTAMP(MAX(UPDATE_TIME))) as last_update FROM information_schema.tables WHERE TABLE_SCHEMA IN ('$line');"
- echo -n " - Table count: "
- mysql --defaults-file="${CONFFILE}" -se "SELECT COUNT(1) AS TableCount FROM information_schema.tables WHERE table_schema = '$line';"
- echo "DROP DATABASE \`$line\`;" >> /tmp/${SERVER}-databases-drop
- done < "/tmp/${SERVER}-databases"
-
- if [ -f "/tmp/${SERVER}-databases-drop" ]; then
- shw_grey "To remove these databases:"
- cat /tmp/${SERVER}-databases-drop
- fi
-done
diff --git a/helpers/shared-to-shared-migrate.sh b/helpers/shared-to-shared-migrate.sh
deleted file mode 100755
index f734e3d663..0000000000
--- a/helpers/shared-to-shared-migrate.sh
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# What this script is for
-# =======================
-# This script will migrate a database user, access, database and contents from
-# an existing cluster to a destination cluster.
-#
-# At the moment, this is geared towards the Ansible Service Broker, but likely
-# can be modified in the future to work with the DBaaS operator.
-#
-# It has been used successfully to migrate databases between RDS clusters.
-#
-# There are a whole bunch of checks after the migration to check to ensure the
-# migration was a success. Likely you should do additional testing as well.
-#
-# Requirements
-# ============
-# * You are logged into OpenShift CLI and have access to the NAMESPACE you want
-# to migrate.
-# * You have a `.my.cnf` file for the destination database cluster.
-# * If your destination database cluster is not directly accessible, then you
-# have created SSH tunnels to expose them on a local port.
-#
-# How to get your existing ASB root credentials
-# =============================================
-# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o json | jq '.data | map_values(@base64d)'
-#
-# How to create a `.my.cnf` file
-# ==============================
-# ~/.my.cnf-shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com
-# [client]
-# host=127.0.0.1
-# port=33007
-# user=root
-# password=banana2
-#
-# How to create an SSH tunnel through a jump box to your database cluster
-# =======================================================================
-# ssh -L 33007:shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com:3306 jumpbox.aws.amazee.io
-#
-# Example command 1
-# =================
-# ./helpers/shared-to-shared-migrate.sh \
-# --destination shared-cluster.cluster-apple.ap-southeast-2.rds.amazonaws.com \
-# --replica shared-cluster.cluster-r0-apple.ap-southeast-2.rds.amazonaws.com \
-# --namespace NAMESPACE \
-# --dry-run
-#
-# Example command 2
-# =================
-# namespaces="
-# foo-example-com-production
-# bar-example-com-production
-# baz-example-com-production
-# quux-example-com-production
-# "
-# for namespace in $namespaces; do
-# ./helpers/shared-to-shared-migrate.sh \
-# --dry-run \
-# --namespace "$namespace" \
-# --destination shared-mysql-production-1-cluster.cluster-plum.ap-southeast-2.rds.amazonaws.com \
-# --replica shared-mysql-production-1-cluster.cluster-ro-plum.ap-southeast-2.rds.amazonaws.com
-# done
-#
-set -euo pipefail
-
-# Initialize our own variables:
-DESTINATION_CLUSTER=""
-REPLICA_CLUSTER=""
-NAMESPACE=""
-DRY_RUN=""
-TIMESTAMP=$(date +%s)
-
-# Colours.
-shw_grey () {
- tput bold
- tput setaf 0
- echo "$@"
- tput sgr0
-}
-shw_norm () {
- tput bold
- tput setaf 9
- echo "$@"
- tput sgr0
-}
-shw_info () {
- tput bold
- tput setaf 4
- echo "$@"
- tput sgr0
-}
-shw_warn () {
- tput bold
- tput setaf 2
- echo "$@"
- tput sgr0
-}
-shw_err () {
- tput bold
- tput setaf 1
- echo "$@"
- tput sgr0
-}
-
-# Parse input arguments.
-while [[ $# -gt 0 ]] ; do
- case $1 in
- -d|--destination)
- DESTINATION_CLUSTER="$2"
- shift # past argument
- shift # past value
- ;;
- -r|--replica)
- REPLICA_CLUSTER="$2"
- shift # past argument
- shift # past value
- ;;
- -n|--namespace)
- NAMESPACE="$2"
- shift # past argument
- shift # past value
- ;;
- --dry-run)
- DRY_RUN="TRUE"
- shift # past argument
- ;;
- *)
- echo "Invalid Argument: $1"
- exit 3
- ;;
- esac
-done
-
-shw_grey "================================================"
-shw_grey " START_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'"
-shw_grey "================================================"
-shw_grey " DESTINATION_CLUSTER=$DESTINATION_CLUSTER"
-shw_grey " REPLICA_CLUSTER=$REPLICA_CLUSTER"
-shw_grey " NAMESPACE=$NAMESPACE"
-shw_grey "================================================"
-
-for util in oc jq mysql; do
- if ! command -v ${util} > /dev/null; then
- shw_err "Please install ${util}"
- exit 1
- fi
-done
-
-CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER}
-if [ ! -f "$CONF_FILE" ]; then
- shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${DESTINATION_CLUSTER}"
- exit 2
-fi
-
-if [ "$DRY_RUN" ] ; then
- shw_warn "Dry run is enabled, so no network service changes will take place."
-fi
-
-# Load the DBaaS credentials for the project
-SECRETS=$(oc -n "$NAMESPACE" get secret mariadb-servicebroker-credentials -o json)
-
-DB_NETWORK_SERVICE=$(echo "$SECRETS" | jq -er '.data.DB_HOST | @base64d')
-if echo "$SECRETS" | grep -q DB_READREPLICA_HOSTS ; then
- DB_READREPLICA_HOSTS=$(echo "$SECRETS" | jq -er '.data.DB_READREPLICA_HOSTS | @base64d')
-else
- DB_READREPLICA_HOSTS=""
-fi
-DB_USER=$(echo "$SECRETS" | jq -er '.data.DB_USER | @base64d')
-DB_PASSWORD=$(echo "$SECRETS" | jq -er '.data.DB_PASSWORD | @base64d')
-DB_NAME=$(echo "$SECRETS" | jq -er '.data.DB_NAME | @base64d')
-DB_PORT=$(echo "$SECRETS" | jq -er '.data.DB_PORT | @base64d')
-
-shw_grey "================================================"
-shw_grey " DB_NETWORK_SERVICE=$DB_NETWORK_SERVICE"
-shw_grey " DB_READREPLICA_HOSTS=$DB_READREPLICA_HOSTS"
-shw_grey " DB_USER=$DB_USER"
-shw_grey " DB_PASSWORD=$DB_PASSWORD"
-shw_grey " DB_NAME=$DB_NAME"
-shw_grey " DB_PORT=$DB_PORT"
-shw_grey "================================================"
-
-# Ensure there is a database in the destination.
-shw_info "> Preparing Database, User, and permissions on destination"
-shw_info "================================================"
-CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER}
-mysql --defaults-file="$CONF_FILE" -se "CREATE DATABASE IF NOT EXISTS \`${DB_NAME}\`;"
-mysql --defaults-file="$CONF_FILE" -se "CREATE USER IF NOT EXISTS \`${DB_USER}\`@'%' IDENTIFIED BY '${DB_PASSWORD}';"
-mysql --defaults-file="$CONF_FILE" -se "GRANT ALL ON \`${DB_NAME}\`.* TO \`${DB_USER}\`@'%';"
-mysql --defaults-file="$CONF_FILE" -se "FLUSH PRIVILEGES;"
-
-# Verify access.
-shw_info "> Verify MySQL access for the new user"
-shw_info "================================================"
-mysql --defaults-file="$CONF_FILE" -e "SELECT * FROM mysql.db WHERE Db = '${DB_NAME}'\G;"
-
-# Dump the database inside the CLI pod.
-POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -r '.items[0].metadata.name // empty')
-if [ -z "$POD" ]; then
- shw_warn "No running cli pod in namespace $NAMESPACE"
- shw_warn "Scaling up 1 cli DeploymentConfig pod"
- oc -n "$NAMESPACE" scale dc cli --replicas=1 --timeout=2m
- sleep 32 # hope for timely scheduling
- POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -er '.items[0].metadata.name')
-fi
-shw_info "> Dumping database $DB_NAME on pod $POD on host $DB_NETWORK_SERVICE"
-shw_info "================================================"
-oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysqldump -h '$DB_NETWORK_SERVICE' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' > /tmp/migration.sql"
-oc -n "$NAMESPACE" exec "$POD" -- ls -lh /tmp/migration.sql
-oc -n "$NAMESPACE" exec "$POD" -- head -n 5 /tmp/migration.sql
-oc -n "$NAMESPACE" exec "$POD" -- tail -n 5 /tmp/migration.sql
-shw_norm "> Dump is done"
-shw_norm "================================================"
-
-# Import to new database.
-shw_info "> Importing the dump into ${DESTINATION_CLUSTER}"
-shw_info "================================================"
-oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysql -h '$DESTINATION_CLUSTER' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' < /tmp/migration.sql"
-oc -n "$NAMESPACE" exec "$POD" -- rm /tmp/migration.sql
-
-shw_norm "> Import is done"
-shw_norm "================================================"
-
-# Alter the network service(s).
-shw_info "> Altering the Network Service $DB_NETWORK_SERVICE to point at $DESTINATION_CLUSTER"
-shw_info "================================================"
-ORIGINAL_DB_HOST=$(oc -n "$NAMESPACE" get "svc/$DB_NETWORK_SERVICE" -o json --export | tee "/tmp/$NAMESPACE-svc.json" | jq -er '.spec.externalName')
-if [ "$DRY_RUN" ] ; then
- echo "**DRY RUN**"
-else
- oc -n "$NAMESPACE" patch "svc/$DB_NETWORK_SERVICE" -p "{\"spec\":{\"externalName\": \"$DESTINATION_CLUSTER\"}}"
-fi
-if [ "$DB_READREPLICA_HOSTS" ]; then
- shw_info "> Altering the Network Service $DB_READREPLICA_HOSTS to point at $REPLICA_CLUSTER"
- shw_info "================================================"
- ORIGINAL_DB_READREPLICA_HOSTS=$(oc -n "$NAMESPACE" get "svc/$DB_READREPLICA_HOSTS" -o json --export | tee "/tmp/$NAMESPACE-svc-replica.json" | jq -er '.spec.externalName')
- if [ "$DRY_RUN" ] ; then
- echo "**DRY RUN**"
- else
- oc -n "$NAMESPACE" patch "svc/$DB_READREPLICA_HOSTS" -p "{\"spec\":{\"externalName\": \"$REPLICA_CLUSTER\"}}"
- fi
-fi
-
-# Unsure what if any delay there is in this to take effect, but 1 second sounds
-# completely reasonable.
-sleep 1
-
-# Verify the correct RDS cluster.
-shw_info "> Output the RDS cluster that Drush is connecting to"
-shw_info "================================================"
-oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'"
-
-# Drush status.
-shw_info "> Drush status"
-shw_info "================================================"
-oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush status"
-
-# Get routes, and ensure a cache bust works.
-ROUTE=$(oc -n "$NAMESPACE" get routes -o json | jq -er '.items[0].spec.host')
-shw_info "> Testing the route https://${ROUTE}/?${TIMESTAMP}"
-shw_info "================================================"
-curl -skLIXGET "https://${ROUTE}/?${TIMESTAMP}" \
- -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" \
- --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || true
-
-shw_grey "================================================"
-shw_grey ""
-shw_grey "In order to rollback this change, edit the Network Service(s) like so:"
-shw_grey ""
-shw_grey "oc -n $NAMESPACE patch svc/$DB_NETWORK_SERVICE -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_HOST\"}}'"
-if [ "$DB_READREPLICA_HOSTS" ]; then
- shw_grey "oc -n $NAMESPACE patch svc/$DB_READREPLICA_HOSTS -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_READREPLICA_HOSTS\"}}'"
-fi
-
-echo ""
-shw_grey "================================================"
-shw_grey " END_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'"
-shw_grey "================================================"
-shw_norm "Done in $SECONDS seconds"
-exit 0
diff --git a/helpers/sharedmigrate.sh b/helpers/sharedmigrate.sh
deleted file mode 100755
index 973ea90645..0000000000
--- a/helpers/sharedmigrate.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/sh
-
-for util in oc svcat jq; do
-which ${util} > /dev/null
-if [ $? -gt 0 ]; then
- echo "please install ${util}"
- exit 1
-fi
-done;
-
-usage() {
- cat << EOF
- ${0}: migrate a mariadb servicebroker to another mariadb servicebroker
- This script is useful when needing to change either the class or the plan
- of and existing service broker.
- By default, it will use:
- 'lagoon-dbaas-mariadb-apb' as the class,
- 'production' as the plan,
- current openshift context as the namespace, and
- first servicebroker in the namespace.
-
- when completed, run with -x to delete migration pvc, dc and serviceaccount.
-
- e.g: $0 -n mysite-devel -c lagoon-dbaas-mariadb-apb -p development -i mariadb
- $0 -n mysite-devel -x
-EOF
-}
-
-# n- namespace
-# c- class ( lagoon-dbaas-mariadb-apb )
-# p- plan ( production / stage )
-
-args=`getopt n:c:p:i:xh $*`
-if [[ $# -eq 0 ]]; then
- usage
- exit
-fi
-
-# set some defaults
-NAMESPACE=$(oc project -q)
-PLAN=production
-CLASS=lagoon-dbaas-mariadb-apb
-
-set -- $args
-for i
-do
- case "$i" in
- -n)
- NAMESPACE="$2"; shift;
- shift;;
- -c)
- CLASS="$2"; shift;
- shift;;
- -p)
- PLAN="$2"; shift;
- shift;;
- -i)
- INSTANCE="$2"; shift;
- shift;;
- -h)
- usage
- exit 0
- shift;;
-
- -x)
- echo "cleaning up "
- oc -n ${NAMESPACE} delete dc/migrator
- oc -n ${NAMESPACE} delete pvc/migrator
- oc -n ${NAMESPACE} adm policy remove-scc-from-user privileged -z migrator
- oc -n ${NAMESPACE} delete serviceaccount migrator
- exit 0
- shift;;
-
- --)
- shift; break;;
- esac
-done
-
-# set a default instance, if not specified.
-if [ -z ${INSTANCE+x} ]; then
- INSTANCE=$(svcat -n ${NAMESPACE} get instance -o json |jq -r '.items[0].metadata.name')
- echo "instance not specified, using $INSTANCE"
-fi
-
-# verify instance exists
-svcat -n ${NAMESPACE} get instance $INSTANCE
-if [ $? -gt 0 ] ;then
- echo "no instance found"
- exit 2
-fi
-
-echo "Verifying secret ${INSTANCE}-servicebroker-credentials "
-oc -n ${NAMESPACE} get --insecure-skip-tls-verify secret ${INSTANCE}-servicebroker-credentials || svcat bind $INSTANCE --name ${INSTANCE}-servicebroker-credentials
-
-# validate $broker
-
-oc -n ${NAMESPACE} create serviceaccount migrator
-oc -n ${NAMESPACE} adm policy add-scc-to-user privileged -z migrator
-
-oc -n ${NAMESPACE} run --image mariadb --env="MYSQL_RANDOM_ROOT_PASSWORD=yes" migrator
-
-# pause and make some changes
-oc -n ${NAMESPACE} rollout pause deploymentconfig/migrator
-
-# We don't care about the database in /var/lib/mysql; just privilege it and let it do its thing.
-oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "migrator"}}}}'
-oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}'
-oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"strategy":{"type":"Recreate"}}}'
-
-
-# create a volume to store the dump.
-cat << EOF | oc -n ${NAMESPACE} apply -f -
- apiVersion: v1
- items:
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: migrator
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 20Gi
- kind: List
- metadata: {}
-EOF
-
-oc -n ${NAMESPACE} volume deploymentconfig/migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator
-
-
-# look up the secret from the instance and add it to the new container
-SECRET=$(svcat -n ${NAMESPACE} get binding -o json |jq -r ".items[] | select (.spec.instanceRef.name == \"$INSTANCE\") | .spec.secretName")
-echo secret: $SECRET
-oc -n ${NAMESPACE} set env --from=secret/${SECRET} --prefix=OLD_ dc/migrator
-
-oc -n ${NAMESPACE} rollout resume deploymentconfig/migrator
-oc -n ${NAMESPACE} rollout latest deploymentconfig/migrator
-oc -n ${NAMESPACE} rollout status deploymentconfig/migrator --watch
-
-sleep 20;
-# Do the dump:
-POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l run=migrator | jq -r '.items[].metadata.name')
-
-oc -n ${NAMESPACE} exec $POD -- bash -c 'time mysqldump -h $OLD_DB_HOST -u $OLD_DB_USER -p${OLD_DB_PASSWORD} $OLD_DB_NAME > /migrator/migration.sql'
-
-echo "DUMP IS DONE;"
-oc -n ${NAMESPACE} exec $POD -- ls -al /migrator/migration.sql || exit 1
-oc -n ${NAMESPACE} exec $POD -- head /migrator/migration.sql
-oc -n ${NAMESPACE} exec $POD -- tail /migrator/migration.sql || exit 1
-
-
-printf "\n\n\nLAST CHANCE TO CANCEL BEFORE I DELETE THE OLD SERVICEBROKER.\n\n"
-echo "sleeping 30 seconds..."
-sleep 30
-
-# delete the old servicebroker
-time svcat -n ${NAMESPACE} unbind $INSTANCE
-time svcat -n ${NAMESPACE} deprovision $INSTANCE --wait --interval 2s --timeout=1h
-echo "===== old instance deprovisioned, waiting 30 seconds."
-sleep 30;
-
-echo "===== provisioning new $CLASS of plan $PLAN"
-time svcat -n ${NAMESPACE} provision $INSTANCE --class $CLASS --plan $PLAN --wait
-echo " and binding"
-time svcat -n ${NAMESPACE} bind $INSTANCE --name ${INSTANCE}-servicebroker-credentials --wait
-
-until oc get -n ${NAMESPACE} secret ${INSTANCE}-servicebroker-credentials
-do
- echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 5 secs"
- sleep 5
-done
-
-
-echo "rolling out migrator again so the secrets get propagated."
-oc -n ${NAMESPACE} rollout latest deploymentconfig/migrator
-oc -n ${NAMESPACE} rollout status deploymentconfig/migrator --watch
-
-sleep 10;
-
-# Do the dump:
-POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l run=migrator | jq -r '.items[].metadata.name')
-
-oc -n ${NAMESPACE} exec $POD -- bash -c 'cat /migrator/migration.sql |sed -e "s/DEFINER[ ]*=[ ]*[^*]*\*/\*/" | mysql -h $OLD_DB_HOST -u $OLD_DB_USER -p${OLD_DB_PASSWORD} $OLD_DB_NAME'
-
-
-# Load credentials out of secret
-SECRETS=$(mktemp).yaml
-echo "Exporting ${INSTANCE}-servicebroker-credentials into $SECRETS "
-oc -n ${NAMESPACE} get --insecure-skip-tls-verify secret ${INSTANCE}-servicebroker-credentials -o yaml > $SECRETS
-
-DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D)
-DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D)
-DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D)
-DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D)
-DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D)
-
-SERVICE_NAME_UPPERCASE=$(echo $INSTANCE | tr [:lower:] [:upper:])
-oc -n $NAMESPACE patch configmap lagoon-env \
- -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
diff --git a/helpers/update-versions.yml b/helpers/update-versions.yml
deleted file mode 100644
index db508c68fa..0000000000
--- a/helpers/update-versions.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-# Lagoon Version Update Helper
-#
-# Helper to update Version inside Dockerfiles
-# Update versions below in `vars` and execute locally
-#
-# ansible-playbook helpers/update-versions.yml
-- name: update versions
- hosts: 127.0.0.1
- connection: local
- vars:
- # Newrelic - https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/
- NEWRELIC_VERSION: '9.12.0.268'
- # Composer - https://getcomposer.org/download/
- COMPOSER_VERSION: '1.10.9'
- COMPOSER_HASH_SHA256: '70d6b9c3e0774b398a372dcb7f89dfe22fc25884e6e09ebf277286dd64cfaf35'
- # Drupal Console Launcher - https://github.com/hechoendrupal/drupal-console-launcher/releases
- DRUPAL_CONSOLE_LAUNCHER_VERSION: 1.9.4
- DRUPAL_CONSOLE_LAUNCHER_SHA: b7759279668caf915b8e9f3352e88f18e4f20659
- # Drush - https://github.com/drush-ops/drush/releases
- DRUSH_VERSION: 8.3.5
- # Drush Launcher Version - https://github.com/drush-ops/drush-launcher/releases
- DRUSH_LAUNCHER_VERSION: 0.6.0
- tasks:
- - name: update NEWRELIC_VERSION
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/fpm/Dockerfile"
- regexp: 'ENV NEWRELIC_VERSION='
- line: 'ENV NEWRELIC_VERSION={{ NEWRELIC_VERSION }}'
- - name: update COMPOSER_VERSION
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile"
- regexp: 'ENV COMPOSER_VERSION='
- line: 'ENV COMPOSER_VERSION={{ COMPOSER_VERSION }} \'
- - name: update COMPOSER_HASH_SHA256
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile"
- regexp: 'COMPOSER_HASH_SHA256='
- line: ' COMPOSER_HASH_SHA256={{ COMPOSER_HASH_SHA256 }}'
- - name: update DRUPAL_CONSOLE_LAUNCHER_VERSION
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
- regexp: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION='
- line: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION={{ DRUPAL_CONSOLE_LAUNCHER_VERSION }} \'
- - name: update DRUPAL_CONSOLE_LAUNCHER_SHA
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
- regexp: 'DRUPAL_CONSOLE_LAUNCHER_SHA='
- line: ' DRUPAL_CONSOLE_LAUNCHER_SHA={{ DRUPAL_CONSOLE_LAUNCHER_SHA }} \'
- - name: update DRUSH_VERSION
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
- regexp: 'DRUSH_VERSION='
- line: ' DRUSH_VERSION={{ DRUSH_VERSION }} \'
- - name: update DRUSH_LAUNCHER_VERSION
- lineinfile:
- path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile"
- regexp: 'DRUSH_LAUNCHER_VERSION='
- line: ' DRUSH_LAUNCHER_VERSION={{ DRUSH_LAUNCHER_VERSION }} \'
diff --git a/images/athenapdf-service/Dockerfile b/images/athenapdf-service/Dockerfile
index b67e1e1d11..8fa9ba4fb0 100644
--- a/images/athenapdf-service/Dockerfile
+++ b/images/athenapdf-service/Dockerfile
@@ -1,5 +1,6 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
FROM arachnysdocker/athenapdf-service:2.13.0
LABEL maintainer="amazee.io"
diff --git a/images/commons/.bashrc b/images/commons/.bashrc
deleted file mode 100644
index c5fb3f4205..0000000000
--- a/images/commons/.bashrc
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-
-# Make sure that new files generated by Docker have group write permission
-source /lagoon/entrypoints/00-umask.sh
-
-# Loading environment variables from .env and friends
-source /lagoon/entrypoints/50-dotenv.sh
-
-# Generate some additional enviornment variables
-source /lagoon/entrypoints/55-generate-env.sh
-
-# a nicer prompt
-if [ "$PS1" ]; then
- NORMAL="\[\e[0m\]"
- RED="\[\e[1;31m\]"
- GREEN="\[\e[0;32m\]"
- YELLOW="\[\e[1;33m\]"
- BLUE="\[\e[1;34m\]"
- WHITE="\[\e[1;37m\]"
- PS1="${YELLOW}\w${NORMAL}$ "
- if [ "$LAGOON" ]; then
- PS1="${GREEN}$LAGOON${NORMAL}:$PS1"
- fi
- if [ "$LAGOON_GIT_BRANCH" ]; then
- # production environments get a red color
- if [ "$LAGOON_ENVIRONMENT_TYPE" == "production" ]; then
- PS1="${RED}$LAGOON_GIT_BRANCH${NORMAL}@$PS1"
- else
- PS1="${BLUE}$LAGOON_GIT_BRANCH${NORMAL}@$PS1"
- fi
- fi
- if [ "$LAGOON_PROJECT" ]; then
- PS1="[${WHITE}$LAGOON_PROJECT${NORMAL}]$PS1"
- fi
-fi
-
-# Helpers
-alias ll="ls -l"
diff --git a/images/commons/Dockerfile b/images/commons/Dockerfile
deleted file mode 100644
index 153be44ea1..0000000000
--- a/images/commons/Dockerfile
+++ /dev/null
@@ -1,41 +0,0 @@
-ARG ALPINE_VERSION
-FROM alpine:${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-
-ENV LAGOON=commons
-
-COPY lagoon/ /lagoon/
-RUN mkdir -p /lagoon/bin
-COPY fix-permissions docker-sleep entrypoint-readiness /bin/
-COPY .bashrc /home/.bashrc
-
-RUN apk update \
- && apk upgrade \
- && apk add --no-cache curl tini \
- && rm -rf /var/cache/apk/* \
- && curl -sLo /bin/ep https://github.com/kreuzwerker/envplate/releases/download/1.0.0-RC1/ep-linux \
- && echo "48e234e067874a57a4d4bb198b5558d483ee37bcc285287fffb3864818b42f2785be0568faacbc054e97ca1c5047ec70382e1ca0e71182c9dba06649ad83a5f6 /bin/ep" | sha512sum -c \
- && chmod +x /bin/ep \
- && curl -sLo /lagoon/bin/cron https://github.com/christophlehmann/go-crond/releases/download/0.6.1-2-g7022a21/go-crond-64-linux \
- && echo "4ecbf269a00416086a855b760b6a691d1b8a6385adb18debec893bdbebccd20822b945c476406e3ca27c784812027c23745048fadc36c4067f12038aff972dce /lagoon/bin/cron" | sha512sum -c \
- && chmod +x /lagoon/bin/cron \
- && mkdir -p /lagoon/crontabs && fix-permissions /lagoon/crontabs \
- && ln -s /home/.bashrc /home/.profile
-
-RUN chmod g+w /etc/passwd
-
-ARG LAGOON_VERSION
-RUN echo $LAGOON_VERSION > /lagoon/version
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["/bin/docker-sleep"]
diff --git a/images/commons/docker-sleep b/images/commons/docker-sleep
deleted file mode 100755
index 426fb9e2a7..0000000000
--- a/images/commons/docker-sleep
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-while sleep 3600; do :; done
\ No newline at end of file
diff --git a/images/commons/entrypoint-readiness b/images/commons/entrypoint-readiness
deleted file mode 100755
index 64f88f0100..0000000000
--- a/images/commons/entrypoint-readiness
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# simple readiness check to see if a late-running entrypoint has completed
-# but only if the entrypoint actually exists
-if [ -f /lagoon/entrypoints/999-readiness.sh ]; then
- test -f /tmp/ready
-fi
diff --git a/images/commons/fix-permissions b/images/commons/fix-permissions
deleted file mode 100755
index 8259c29048..0000000000
--- a/images/commons/fix-permissions
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-# Fix permissions on the given directory to allow group read/write of
-# regular files and execute of directories.
-find -L "$1" -exec chgrp 0 {} \;
-find -L "$1" -exec chmod g+rw {} \;
-find -L "$1" -type d -exec chmod g+x {} +
diff --git a/images/commons/lagoon/cronjob.sh b/images/commons/lagoon/cronjob.sh
deleted file mode 100755
index a558cfc7df..0000000000
--- a/images/commons/lagoon/cronjob.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-exit_trap() {
- rv=$?
- # TODO: Send Lagoon API information about our CronJob Success or Failure
- exit $rv
-}
-
-# on exit, always call exit_trap
-trap exit_trap EXIT
-
-echo "$(date --utc +%FT%TZ) CRONJOB: $@"
-
-sh -c "/lagoon/entrypoints.sh $@"
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints.bash b/images/commons/lagoon/entrypoints.bash
deleted file mode 100755
index 8c07b21421..0000000000
--- a/images/commons/lagoon/entrypoints.bash
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-# This script will be the default ENTRYPOINT for all children docker images.
-# It just sources all files within /lagoon/entrypoints/* in an alphabetical order and then runs `exec` on the given parameter.
-
-if [ -d /lagoon/entrypoints ]; then
- for i in /lagoon/entrypoints/*; do
- if [ -r $i ]; then
- . $i
- fi
- done
- unset i
-fi
-
-exec "$@"
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints.sh b/images/commons/lagoon/entrypoints.sh
deleted file mode 100755
index 55cedfac83..0000000000
--- a/images/commons/lagoon/entrypoints.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-# This script will be the default ENTRYPOINT for all children docker images.
-# It just sources all files within /lagoon/entrypoints/* in an alphabetical order and then runs `exec` on the given parameter.
-
-if [ -d /lagoon/entrypoints ]; then
- for i in /lagoon/entrypoints/*; do
- if [ -r $i ]; then
- . $i
- fi
- done
- unset i
-fi
-
-exec "$@"
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints/00-umask.sh b/images/commons/lagoon/entrypoints/00-umask.sh
deleted file mode 100755
index 1f70ab745d..0000000000
--- a/images/commons/lagoon/entrypoints/00-umask.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-
-# Make sure that new files generated by Docker have group write permission
-umask 002
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints/10-passwd.sh b/images/commons/lagoon/entrypoints/10-passwd.sh
deleted file mode 100644
index 09aff906c2..0000000000
--- a/images/commons/lagoon/entrypoints/10-passwd.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-if [ -w /etc/passwd ]; then
- # Change root's home folder to /home
- # (we can't use `sed -i` as we sed would create the tempfile in /etc)
- TMPFILE=$(mktemp -p /tmp passwd.XXXXXX)
- sed 's/root:\/root:/root:\/home:/' /etc/passwd > "$TMPFILE"
- cat "$TMPFILE" > /etc/passwd
- rm "$TMPFILE"
-
- # If we don't know who we are (whoami returns false) add a new entry into the users list
- if ! whoami &> /dev/null; then
- echo "${USER_NAME:-user}:x:$(id -u):0:${USER_NAME:-user}:${HOME}:/bin/sh" >> /etc/passwd
- fi
-fi
diff --git a/images/commons/lagoon/entrypoints/50-dotenv.sh b/images/commons/lagoon/entrypoints/50-dotenv.sh
deleted file mode 100644
index a588ce1360..0000000000
--- a/images/commons/lagoon/entrypoints/50-dotenv.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-# dotenv implementation in Bash:
-# We basically search for files within the current working directory (defined by WORKDIR in the Dockerfile).
-# If it exists, we source them, which means their variables will exist as environment variables for all next processes
-# The files are expected to be in this format:
-#
-# var1=value
-# VAR2=value
-#
-# As there can already be env variables defined in either the Dockerfile of during runtime (via docker run), we have an hierarchy of Environment variables:
-# (env variables defined in lower numbers are stronger)
-# 1. Runtime env variables (docker run)
-# 2. Env variables defined in Dockerfile (ENV)
-# 3. Env variables defined in `.lagoon.env.$BRANCHNAME` (if file exists and where $BRANCHNAME is the Branch this Dockerimage has been built for),
-# use this for overwriting variables for only specific branches
-# 4. Env variables defined in `.env`
-# 5. Env variables defined in `.env.defaults`
-
-# first export all current environment variables into a file.
-# We do that in order to keep the hierarchy of environment variables. Already defined ones are probably overwritten
-# via an `docker run -e VAR=VAL` system and they should still be used even they are defined in dotenv files.
-TMPFILE=$(mktemp -t dotenv.XXXXXXXX)
-export -p > $TMPFILE
-
-# set -a is short for `set -o allexport` which will export all variables in a file
-set -a
-[ -f .env.defaults ] && . ./.env.defaults || true
-[ -f .env ] && . ./.env || true
-# Provide a file that adds variables to all Lagoon envs, but is overridable in a specific env below
-[ -f .lagoon.env ] && . ./.lagoon.env || true
-[ -f .lagoon.env.$LAGOON_GIT_BRANCH ] && . ./.lagoon.env.$LAGOON_GIT_BRANCH || true
-# Branch names can have weird special chars in them which are not allowed in File names, so we also try the Branch name with special chars replaced by dashes.
-[ -f .lagoon.env.$LAGOON_GIT_SAFE_BRANCH ] && . ./.lagoon.env.$LAGOON_GIT_SAFE_BRANCH || true
-set +a
-
-# now export all previously existing environments variables so they are stronger than maybe existing ones in the dotenv files
-. $TMPFILE || true
-
-# remove the tmpfile
-rm $TMPFILE
diff --git a/images/commons/lagoon/entrypoints/55-generate-env.sh b/images/commons/lagoon/entrypoints/55-generate-env.sh
deleted file mode 100644
index dc9aaa7783..0000000000
--- a/images/commons/lagoon/entrypoints/55-generate-env.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-
-# Create a LAGOON_DOMAIN from LAGOON_ROUTE but without the scheme (http:// or https://)
-export LAGOON_DOMAIN=${LAGOON_ROUTE#*://}
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints/90-cronjobs.sh b/images/commons/lagoon/entrypoints/90-cronjobs.sh
deleted file mode 100644
index c53b6a7d9a..0000000000
--- a/images/commons/lagoon/entrypoints/90-cronjobs.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-# Only if $CRONJOBS is not empty and /lagoon/crontabs/crontab is not existing yet
-if [ -x /lagoon/bin/cron ] && [ ! -z "$CRONJOBS" ] && [ ! -f /lagoon/crontabs/crontab ]; then
- echo "Setting up Cronjobs:"
- echo "${CRONJOBS}"
- echo "${CRONJOBS}" > /lagoon/crontabs/crontab
- # go-crond does not like if group and others have write access to the crontab
- chmod go-w /lagoon/crontabs/crontab
- /lagoon/bin/cron $(whoami):/lagoon/crontabs/crontab --allow-unprivileged --no-auto -v &
-fi
\ No newline at end of file
diff --git a/images/commons/lagoon/entrypoints/999-readiness.sh b/images/commons/lagoon/entrypoints/999-readiness.sh
deleted file mode 100644
index 701997993e..0000000000
--- a/images/commons/lagoon/entrypoints/999-readiness.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-touch /tmp/ready
diff --git a/images/curator/Dockerfile b/images/curator/Dockerfile
index a7e771630b..cb5e20721c 100644
--- a/images/curator/Dockerfile
+++ b/images/curator/Dockerfile
@@ -1,5 +1,6 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
FROM bobrik/curator:5.7.6
USER root
diff --git a/images/docker-host/Dockerfile b/images/docker-host/Dockerfile
index 5587f2bf4b..9cd6d536ba 100644
--- a/images/docker-host/Dockerfile
+++ b/images/docker-host/Dockerfile
@@ -1,6 +1,7 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM docker:19.03.10-dind
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
+FROM docker:19.03.14-dind
LABEL maintainer="amazee.io"
ENV LAGOON=docker-host
@@ -27,14 +28,16 @@ RUN apk add --no-cache bash
ENV DOCKER_HOST=docker-host \
REGISTRY=docker-registry.default.svc:5000 \
REPOSITORY_TO_UPDATE=amazeeio \
- BIP=172.16.0.1/16
+ BIP=172.16.0.1/16 \
+ REGISTRY_MIRROR=https://imagecache.amazeeio.cloud
RUN fix-permissions /home
COPY update-push-images.sh /update-push-images.sh
+COPY update-images.sh /update-images.sh
COPY prune-images.sh /prune-images.sh
COPY remove-exited.sh /remove-exited.sh
ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["sh", "-c", "sh /usr/local/bin/dind /usr/local/bin/dockerd --host=tcp://0.0.0.0:2375 --host=unix:///var/run/docker.sock --insecure-registry=${REGISTRY} --insecure-registry=172.17.0.1:8084 --bip=${BIP} --storage-driver=overlay2 --storage-opt=overlay2.override_kernel_check=1"]
+CMD ["sh", "-c", "sh /usr/local/bin/dind /usr/local/bin/dockerd --host=tcp://0.0.0.0:2375 --host=unix:///var/run/docker.sock --insecure-registry=${REGISTRY} --insecure-registry=harbor-harbor-core.harbor.svc.cluster.local:80 --bip=${BIP} --storage-driver=overlay2 --storage-opt=overlay2.override_kernel_check=1 --registry-mirror=${REGISTRY_MIRROR}"]
diff --git a/images/docker-host/update-images.sh b/images/docker-host/update-images.sh
new file mode 100755
index 0000000000..0de8fbba86
--- /dev/null
+++ b/images/docker-host/update-images.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+set -x
+
+if ! docker -H ${DOCKER_HOST} info &> /dev/null; then
+ echo "could not connect to ${DOCKER_HOST}"; exit 1
+fi
+
+# Iterates through all images that have the name of the repository we are interested in in it
+for FULL_IMAGE in $(docker image ls --format "{{.Repository}}:{{.Tag}}" | grep -E "${REPOSITORY_TO_UPDATE}/" | grep -v none); do
+ # pull newest version of found image
+ docker pull ${FULL_IMAGE} | cat
+done
diff --git a/images/elasticsearch/Dockerfile6 b/images/elasticsearch/Dockerfile6
deleted file mode 100644
index 4ecb16fede..0000000000
--- a/images/elasticsearch/Dockerfile6
+++ /dev/null
@@ -1,57 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-# Defining Versions - https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docker.html
-FROM docker.elastic.co/elasticsearch/elasticsearch:6.8.2
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=elasticsearch
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-COPY docker-entrypoint.sh.6 /lagoon/entrypoints/90-elasticsearch.sh
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN sed -i 's/discovery.zen.minimum_master_nodes: 1//' config/elasticsearch.yml
-
-RUN echo $'xpack.security.enabled: false\n\
-\n\
-node.name: "${HOSTNAME}"\n\
-node.master: "${NODE_MASTER}"\n\
-cluster.routing.allocation.disk.threshold_enabled: "true"\n\
-discovery.zen.minimum_master_nodes: "${DISCOVERY_ZEN_MINIMUM_MASTER_NODES}"' >> config/elasticsearch.yml
-
-RUN fix-permissions config
-
-ENV ES_JAVA_OPTS="-Xms200m -Xmx200m" \
- DISCOVERY_ZEN_MINIMUM_MASTER_NODES=1 \
- NODE_MASTER=true
-
-# Copy es-curl wrapper
-COPY es-curl /usr/share/elasticsearch/bin/es-curl
-
-VOLUME [ "/usr/share/elasticsearch/data" ]
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-
-CMD ["/usr/local/bin/docker-entrypoint.sh"]
diff --git a/images/elasticsearch/Dockerfile7 b/images/elasticsearch/Dockerfile7
deleted file mode 100644
index 4013e5dbde..0000000000
--- a/images/elasticsearch/Dockerfile7
+++ /dev/null
@@ -1,70 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-# Defining Versions - https://www.elastic.co/guide/en/elasticsearch/reference/7.6/docker.html
-FROM docker.elastic.co/elasticsearch/elasticsearch:7.6.1
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=elasticsearch
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-COPY docker-entrypoint.sh.7 /lagoon/entrypoints/90-elasticsearch.sh
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN echo $'\n\
-node.name: "${HOSTNAME}"\n\
-node.master: "${NODE_MASTER}"\n\
-node.data: "${NODE_DATA}"\n\
-node.ingest: "${NODE_INGEST}"\n\
-node.ml: "${NODE_ML}"\n\
-xpack.ml.enabled: "${XPACK_ML_ENABLED}"\n\
-xpack.watcher.enabled: "${XPACK_WATCHER_ENABLED}"\n\
-xpack.security.enabled: "${XPACK_SECURITY_ENABLED}"\n\
-processors: "${PROCESSORS}"\n\
-cluster.routing.allocation.disk.threshold_enabled: "true"\n\
-cluster.remote.connect: "${CLUSTER_REMOTE_CONNECT}"' >> config/elasticsearch.yml
-
-RUN fix-permissions config
-
-ENV ES_JAVA_OPTS="-Xms400m -Xmx400m" \
- NODE_MASTER=true \
- NODE_DATA=true \
- NODE_INGEST=true \
- NODE_ML=true \
- XPACK_ML_ENABLED=true \
- XPACK_WATCHER_ENABLED=true \
- XPACK_SECURITY_ENABLED=false \
- PROCESSORS=2 \
- CLUSTER_REMOTE_CONNECT=true \
- EXTRA_OPTS=""
-
-# Copy es-curl wrapper
-COPY es-curl /usr/share/elasticsearch/bin/es-curl
-
-
-VOLUME [ "/usr/share/elasticsearch/data" ]
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-
-CMD ["/usr/local/bin/docker-entrypoint.sh"]
diff --git a/images/elasticsearch/docker-entrypoint.sh.6 b/images/elasticsearch/docker-entrypoint.sh.6
deleted file mode 100755
index 06acef3331..0000000000
--- a/images/elasticsearch/docker-entrypoint.sh.6
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-ep /usr/share/elasticsearch/config/elasticsearch.yml
-
-if [ ! -z "$EXTRA_OPTS" ]; then
- echo -e "${EXTRA_OPTS}" >> /usr/share/elasticsearch/config/elasticsearch.yml
-fi
-
-if [ -z "$POD_NAMESPACE" ]; then
- # Single container runs in docker
- echo "POD_NAMESPACE not set, spin up single node"
-else
- # Is running in Kubernetes/OpenShift, so find all other pods
- # belonging to the namespace
- echo "Elasticsearch: Running in Kubernetes, setting up for clustering"
- K8S_SVC_NAME=$(hostname -f | cut -d"." -f2)
- echo "Using service name: ${K8S_SVC_NAME}"
- echo "discovery.zen.ping.unicast.hosts: ${K8S_SVC_NAME}" >> /usr/share/elasticsearch/config/elasticsearch.yml
-fi
diff --git a/images/elasticsearch/docker-entrypoint.sh.7 b/images/elasticsearch/docker-entrypoint.sh.7
deleted file mode 100755
index 92391dd7df..0000000000
--- a/images/elasticsearch/docker-entrypoint.sh.7
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-ep /usr/share/elasticsearch/config/elasticsearch.yml
-
-if [ ! -z "$EXTRA_OPTS" ]; then
- echo -e "${EXTRA_OPTS}" >> /usr/share/elasticsearch/config/elasticsearch.yml
-fi
-
-if [ -z "$POD_NAMESPACE" ]; then
- # Single container runs in docker
- echo "POD_NAMESPACE not set, spin up single node"
- sed -i 's/cluster.initial_master_nodes:.*//' /usr/share/elasticsearch/config/elasticsearch.yml
- echo "cluster.initial_master_nodes: ${HOSTNAME}" >> /usr/share/elasticsearch/config/elasticsearch.yml
-else
- # Is running in Kubernetes/OpenShift, so find all other pods
- # belonging to the namespace
- echo "Elasticsearch: Running in Kubernetes, setting up for clustering"
- K8S_SVC_NAME=$(hostname -f | cut -d"." -f2)
- echo "Using service name: ${K8S_SVC_NAME}"
- sed -i 's/discovery.seed_hosts:.*//' /usr/share/elasticsearch/config/elasticsearch.yml
- sed -i 's/cluster.initial_master_nodes:.*//' /usr/share/elasticsearch/config/elasticsearch.yml
- echo "discovery.seed_hosts: ${K8S_SVC_NAME}" >> /usr/share/elasticsearch/config/elasticsearch.yml
- echo "cluster.initial_master_nodes: ${K8S_SVC_NAME}-0" >> /usr/share/elasticsearch/config/elasticsearch.yml
-fi
diff --git a/images/elasticsearch/es-curl b/images/elasticsearch/es-curl
deleted file mode 100755
index 168b531372..0000000000
--- a/images/elasticsearch/es-curl
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-if [ -z "$1" ] || [ -z "$2" ]; then
- echo "Usage: es-curl.sh VERB COMMAND"
- exit 1
-fi
-
-VERB=$1
-COMMAND=$2
-
-shift
-shift
-
-if [ "$LOGSDB_ADMIN_PASSWORD" ]; then
- curl -X $VERB -k -u "admin:$LOGSDB_ADMIN_PASSWORD" "http://localhost:9200/$COMMAND" -H 'Content-Type: application/json' "$@"
-else
- curl -X $VERB -k "http://localhost:9200/$COMMAND" -H 'Content-Type: application/json' "$@"
-fi
diff --git a/images/kibana/Dockerfile6 b/images/kibana/Dockerfile6
deleted file mode 100644
index f7f2c63c23..0000000000
--- a/images/kibana/Dockerfile6
+++ /dev/null
@@ -1,42 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM docker.elastic.co/kibana/kibana:6.8.2
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=kibana
-
-USER root
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN fix-permissions /usr/share/kibana
-
-# tells the local development environment on which port we are running
-# ENV LAGOON_LOCALDEV_HTTP_PORT=5601
-
-ENV NODE_OPTIONS="--max-old-space-size=200"
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-CMD ["/bin/bash", "/usr/local/bin/kibana-docker"]
diff --git a/images/kibana/Dockerfile7 b/images/kibana/Dockerfile7
deleted file mode 100644
index 983ca496c2..0000000000
--- a/images/kibana/Dockerfile7
+++ /dev/null
@@ -1,42 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM docker.elastic.co/kibana/kibana:7.6.1
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=kibana
-
-USER root
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN fix-permissions /usr/share/kibana
-
-# tells the local development environment on which port we are running
-# ENV LAGOON_LOCALDEV_HTTP_PORT=5601
-
-ENV NODE_OPTIONS="--max-old-space-size=200"
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-CMD ["/bin/bash", "/usr/local/bin/kibana-docker"]
diff --git a/images/kubectl-build-deploy-dind/Dockerfile b/images/kubectl-build-deploy-dind/Dockerfile
index fb0c4da20b..7135f04685 100644
--- a/images/kubectl-build-deploy-dind/Dockerfile
+++ b/images/kubectl-build-deploy-dind/Dockerfile
@@ -6,7 +6,6 @@ RUN rm -rf /root && ln -s /home /root
ENV LAGOON=kubectl-build-deploy-dind
RUN mkdir -p /kubectl-build-deploy/git
-RUN mkdir -p /kubectl-build-deploy/tug
RUN mkdir -p /kubectl-build-deploy/lagoon
WORKDIR /kubectl-build-deploy/git
@@ -14,11 +13,11 @@ WORKDIR /kubectl-build-deploy/git
COPY docker-entrypoint.sh /lagoon/entrypoints/100-docker-entrypoint.sh
COPY build-deploy.sh /kubectl-build-deploy/build-deploy.sh
COPY build-deploy-docker-compose.sh /kubectl-build-deploy/build-deploy-docker-compose.sh
-COPY tug.sh /kubectl-build-deploy/tug.sh
-COPY tug /kubectl-build-deploy/tug
COPY scripts /kubectl-build-deploy/scripts
COPY helmcharts /kubectl-build-deploy/helmcharts
+ENV IMAGECACHE_REGISTRY=imagecache.amazeeio.cloud
+
CMD ["/kubectl-build-deploy/build-deploy.sh"]
diff --git a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
index 9792f967c4..f712854742 100755
--- a/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
+++ b/images/kubectl-build-deploy-dind/build-deploy-docker-compose.sh
@@ -21,6 +21,10 @@ function cronScheduleMoreOftenThan30Minutes() {
fi
}
+function contains() {
+ [[ $1 =~ (^|[[:space:]])$2($|[[:space:]]) ]] && return 0 || return 1
+}
+
##############################################
### PREPARATION
##############################################
@@ -72,7 +76,24 @@ if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
LAGOON_SERVICE_TYPES=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- LAGOON_SERVICE_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
+ TEMP_LAGOON_SERVICE_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_SERVICE_TYPES ]; then
+ LAGOON_SERVICE_TYPES=$TEMP_LAGOON_SERVICE_TYPES
+ fi
+fi
+# Allow the dbaas environment type to be overridden by the lagoon API
+# This accepts colon separated values like so `SERVICE_NAME:DBAAS_ENVIRONMENT_TYPE`, and multiple overrides
+# separated by commas
+# Example 1: mariadb:production < tells any docker-compose services named mariadb to use the production dbaas environment type
+# Example 2: mariadb:production,mariadb-test:development
+if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ LAGOON_DBAAS_ENVIRONMENT_TYPES=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_DBAAS_ENVIRONMENT_TYPES") | "\(.value)"'))
+fi
+if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_DBAAS_ENVIRONMENT_TYPES") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES ]; then
+ LAGOON_DBAAS_ENVIRONMENT_TYPES=$TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES
+ fi
fi
set -x
@@ -104,6 +125,11 @@ do
done
fi
+ # Previous versions of Lagoon used "python-ckandatapusher", this should be mapped to "python"
+ if [[ "$SERVICE_TYPE" == "python-ckandatapusher" ]]; then
+ SERVICE_TYPE="python"
+ fi
+
# "mariadb" is a meta service, which allows lagoon to decide itself which of the services to use:
# - mariadb-single (a single mariadb pod)
# - mariadb-dbaas (use the dbaas shared operator)
@@ -112,7 +138,7 @@ do
# mariadb-single deployed (probably from the past where there was no mariadb-shared yet, or mariadb-dbaas) and use that one
if kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get service "$SERVICE_NAME" &> /dev/null; then
SERVICE_TYPE="mariadb-single"
- # heck if this cluster supports the default one, if not we assume that this cluster is not capable of shared mariadbs and we use a mariadb-single
+ # check if this cluster supports the default one, if not we assume that this cluster is not capable of shared mariadbs and we use a mariadb-single
# real basic check to see if the mariadbconsumer exists as a kind
elif [[ "${CAPABILITIES[@]}" =~ "mariadb.amazee.io/v1/MariaDBConsumer" ]]; then
SERVICE_TYPE="mariadb-dbaas"
@@ -137,20 +163,103 @@ do
DBAAS_ENVIRONMENT=$ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE
fi
+ # If we have a dbaas environment type override in the api, consume it here
+ if [ ! -z "$LAGOON_DBAAS_ENVIRONMENT_TYPES" ]; then
+ IFS=',' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPES"
+ for LAGOON_DBAAS_ENVIRONMENT_TYPE in "${LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPE"
+ if [ "${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[0]}" == "$SERVICE_NAME" ]; then
+ DBAAS_ENVIRONMENT=${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[1]}
+ fi
+ done
+ fi
+
MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]="${DBAAS_ENVIRONMENT}"
fi
- if [ "$SERVICE_TYPE" == "mongodb-shared" ]; then
- MONGODB_SHARED_CLASS=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.mongo-shared\\.class "${MONGODB_SHARED_DEFAULT_CLASS}")
- MONGODB_SHARED_PLAN=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.mongo-shared\\.plan "${ENVIRONMENT_TYPE}")
+ # "postgres" is a meta service, which allows lagoon to decide itself which of the services to use:
+ # - postgres-single (a single postgres pod)
+ # - postgres-dbaas (use the dbaas shared operator)
+ if [ "$SERVICE_TYPE" == "postgres" ]; then
+ # if there is already a service existing with the service_name we assume that for this project there has been a
+ # postgres-single deployed (probably from the past where there was no postgres-shared yet, or postgres-dbaas) and use that one
+ if kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get service "$SERVICE_NAME" &> /dev/null; then
+ SERVICE_TYPE="postgres-single"
+ # heck if this cluster supports the default one, if not we assume that this cluster is not capable of shared PostgreSQL and we use a postgres-single
+ # real basic check to see if the postgreSQLConsumer exists as a kind
+ elif [[ "${CAPABILITIES[@]}" =~ "postgres.amazee.io/v1/PostgreSQLConsumer" ]]; then
+ SERVICE_TYPE="postgres-dbaas"
+ else
+ SERVICE_TYPE="postgres-single"
+ fi
+
+ fi
+
+ # Previous versions of Lagoon supported "postgres-shared", this has been superseeded by "postgres-dbaas"
+ if [[ "$SERVICE_TYPE" == "postgres-shared" ]]; then
+ SERVICE_TYPE="postgres-dbaas"
+ fi
+
+ if [[ "$SERVICE_TYPE" == "postgres-dbaas" ]]; then
+ # Default plan is the enviroment type
+ DBAAS_ENVIRONMENT=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.postgres-dbaas\\.environment "${ENVIRONMENT_TYPE}")
+
+ # Allow the dbaas shared servicebroker plan to be overriden by environment in .lagoon.yml
+ ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$SERVICE_NAME.postgres-dbaas\\.environment false)
+ if [ ! $DBAAS_ENVIRONMENT_OVERRIDE == "false" ]; then
+ DBAAS_ENVIRONMENT=$ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE
+ fi
+
+ MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]="${DBAAS_ENVIRONMENT}"
+ fi
- # Check if the defined service broker plan exists
- if svcat --scope cluster get plan --class "${MONGODB_SHARED_CLASS}" "${MONGODB_SHARED_PLAN}" > /dev/null; then
- MAP_SERVICE_NAME_TO_SERVICEBROKER_PLAN["${SERVICE_NAME}"]="${MONGODB_SHARED_PLAN}"
+ # "mongo" is a meta service, which allows lagoon to decide itself which of the services to use:
+ # - mongodb-single (a single mongodb pod)
+ # - mongodb-dbaas (use the dbaas shared operator)
+ if [ "$SERVICE_TYPE" == "mongo" ]; then
+ # if there is already a service existing with the service_name we assume that for this project there has been a
+ # mongodb-single deployed (probably from the past where there was no mongodb-shared yet, or mongodb-dbaas) and use that one
+ if kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get service "$SERVICE_NAME" &> /dev/null; then
+ SERVICE_TYPE="mongodb-single"
+ # heck if this cluster supports the default one, if not we assume that this cluster is not capable of shared MongoDB and we use a mongodb-single
+ # real basic check to see if the MongoDBConsumer exists as a kind
+ elif [[ "${CAPABILITIES[@]}" =~ "mongodb.amazee.io/v1/MongoDBConsumer" ]]; then
+ SERVICE_TYPE="mongodb-dbaas"
else
- echo "defined service broker plan '${MONGODB_SHARED_PLAN}' for service '$SERVICE_NAME' and service broker '$MONGODB_SHARED_CLASS' not found in cluster";
- exit 1
+ SERVICE_TYPE="mongodb-single"
fi
+
+ fi
+
+ # Previous versions of Lagoon supported "mongo-shared", this has been superseeded by "mongodb-dbaas"
+ if [[ "$SERVICE_TYPE" == "mongo-shared" ]]; then
+ SERVICE_TYPE="mongodb-dbaas"
+ fi
+
+ if [[ "$SERVICE_TYPE" == "mongodb-dbaas" ]]; then
+ # Default plan is the enviroment type
+ DBAAS_ENVIRONMENT=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.mongodb-dbaas\\.environment "${ENVIRONMENT_TYPE}")
+
+ # Allow the dbaas shared servicebroker plan to be overriden by environment in .lagoon.yml
+ ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.overrides.$SERVICE_NAME.mongodb-dbaas\\.environment false)
+ if [ ! $DBAAS_ENVIRONMENT_OVERRIDE == "false" ]; then
+ DBAAS_ENVIRONMENT=$ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE
+ fi
+
+ # If we have a dbaas environment type override in the api, consume it here
+ if [ ! -z "$LAGOON_DBAAS_ENVIRONMENT_TYPES" ]; then
+ IFS=',' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPES"
+ for LAGOON_DBAAS_ENVIRONMENT_TYPE in "${LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPE"
+ if [ "${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[0]}" == "$SERVICE_NAME" ]; then
+ DBAAS_ENVIRONMENT=${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[1]}
+ fi
+ done
+ fi
+
+ MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]="${DBAAS_ENVIRONMENT}"
fi
if [ "$SERVICE_TYPE" == "none" ]; then
@@ -167,8 +276,16 @@ do
# The ImageName is the same as the Name of the Docker Compose ServiceName
IMAGE_NAME=$COMPOSE_SERVICE
- # Generate List of Images to build
- IMAGES+=("${IMAGE_NAME}")
+ # Do not handle images for shared services
+ if [[ "$SERVICE_TYPE" != "mariadb-dbaas" ]] &&
+ [[ "$SERVICE_TYPE" != "mariadb-shared" ]] &&
+ [[ "$SERVICE_TYPE" != "postgres-shared" ]] &&
+ [[ "$SERVICE_TYPE" != "postgres-dbaas" ]] &&
+ [[ "$SERVICE_TYPE" != "mongodb-dbaas" ]] &&
+ [[ "$SERVICE_TYPE" != "mongodb-shared" ]]; then
+ # Generate List of Images to build
+ IMAGES+=("${IMAGE_NAME}")
+ fi
# Map Deployment ServiceType to the ImageName
MAP_DEPLOYMENT_SERVICETYPE_TO_IMAGENAME["${SERVICE_NAME}:${DEPLOYMENT_SERVICETYPE}"]="${IMAGE_NAME}"
@@ -196,8 +313,8 @@ done
### BUILD IMAGES
##############################################
-# we only need to build images for pullrequests and branches, but not during a TUG build
-if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $THIS_IS_TUG == "true" ]]; then
+# we only need to build images for pullrequests and branches
+if [[ "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ]]; then
BUILD_ARGS=()
@@ -240,6 +357,7 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
BUILD_ARGS+=(--build-arg IMAGE_REPO="${CI_OVERRIDE_IMAGE_REPO}")
BUILD_ARGS+=(--build-arg LAGOON_PROJECT="${PROJECT}")
BUILD_ARGS+=(--build-arg LAGOON_ENVIRONMENT="${ENVIRONMENT}")
+ BUILD_ARGS+=(--build-arg LAGOON_ENVIRONMENT_TYPE="${ENVIRONMENT_TYPE}")
BUILD_ARGS+=(--build-arg LAGOON_BUILD_TYPE="${BUILD_TYPE}")
BUILD_ARGS+=(--build-arg LAGOON_GIT_SOURCE_REPOSITORY="${SOURCE_REPOSITORY}")
@@ -253,6 +371,7 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
fi
if [ "$BUILD_TYPE" == "pullrequest" ]; then
+ BUILD_ARGS+=(--build-arg LAGOON_GIT_SHA="${LAGOON_GIT_SHA}")
BUILD_ARGS+=(--build-arg LAGOON_PR_HEAD_BRANCH="${PR_HEAD_BRANCH}")
BUILD_ARGS+=(--build-arg LAGOON_PR_HEAD_SHA="${PR_HEAD_SHA}")
BUILD_ARGS+=(--build-arg LAGOON_PR_BASE_BRANCH="${PR_BASE_BRANCH}")
@@ -294,6 +413,12 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
PULL_IMAGE=$(echo "${OVERRIDE_IMAGE}" | envsubst)
fi
+ # if the image just is an image name (like "alpine") we prefix it with `libary/` as the imagecache does not understand
+ # the magic `alpine` images
+ if [[ ! "$PULL_IMAGE" =~ "/" ]]; then
+ PULL_IMAGE="library/$PULL_IMAGE"
+ fi
+
# Add the images we should pull to the IMAGES_PULL array, they will later be tagged from dockerhub
IMAGES_PULL["${IMAGE_NAME}"]="${PULL_IMAGE}"
@@ -332,18 +457,6 @@ if [[ ( "$BUILD_TYPE" == "pullrequest" || "$BUILD_TYPE" == "branch" ) && ! $TH
fi
-# if $DEPLOY_TYPE is tug we just push the images to the defined docker registry and create a clone
-# of ourselves and push it into `lagoon-tug` image which is then executed in the destination openshift
-# If though this is the actual tug deployment in the destination openshift, we don't run this
-if [[ $DEPLOY_TYPE == "tug" && ! $THIS_IS_TUG == "true" ]]; then
-echo "TODO: lagoon-tug is not implemented yet in kubernetes"
-exit 1
- . /kubectl-build-deploy/tug/tug-build-push.sh
-
- # exit here, we are done
- exit
-fi
-
##############################################
### RUN PRE-ROLLOUT tasks defined in .lagoon.yml
##############################################
@@ -393,14 +506,14 @@ else
ROUTES_AUTOGENERATE_INSECURE=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.insecure Allow)
fi
-ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true)
-ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED)
+ROUTES_AUTOGENERATE_ENABLED=$(set -o pipefail; cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true | tr '[:upper:]' '[:lower:]')
+ROUTES_AUTOGENERATE_ALLOW_PRS=$(set -o pipefail; cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED | tr '[:upper:]' '[:lower:]')
if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then
ROUTES_AUTOGENERATE_ENABLED=true
fi
## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to
-ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED)
-if [ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]; then
+ROUTES_AUTOGENERATE_BRANCH=$(set -o pipefail; cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED | tr '[:upper:]' '[:lower:]')
+if [[ "$ROUTES_AUTOGENERATE_BRANCH" == "true" ]]; then
ROUTES_AUTOGENERATE_ENABLED=true
fi
@@ -417,6 +530,7 @@ yq write -i -- /kubectl-build-deploy/values.yaml 'buildType' $BUILD_TYPE
yq write -i -- /kubectl-build-deploy/values.yaml 'routesAutogenerateInsecure' $ROUTES_AUTOGENERATE_INSECURE
yq write -i -- /kubectl-build-deploy/values.yaml 'routesAutogenerateEnabled' $ROUTES_AUTOGENERATE_ENABLED
yq write -i -- /kubectl-build-deploy/values.yaml 'routesAutogenerateSuffix' $ROUTER_URL
+yq write -i -- /kubectl-build-deploy/values.yaml 'routesAutogenerateShortSuffix' $SHORT_ROUTER_URL
for i in $ROUTES_AUTOGENERATE_PREFIXES; do yq write -i -- /kubectl-build-deploy/values.yaml 'routesAutogeneratePrefixes[+]' $i; done
yq write -i -- /kubectl-build-deploy/values.yaml 'kubernetes' $KUBERNETES
yq write -i -- /kubectl-build-deploy/values.yaml 'lagoonVersion' $LAGOON_VERSION
@@ -488,20 +602,21 @@ do
helm template ${SERVICE_NAME} /kubectl-build-deploy/helmcharts/${SERVICE_TYPE} -s $HELM_SERVICE_TEMPLATE -f /kubectl-build-deploy/values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${SERVICE_NAME}.yaml
fi
- HELM_INGRESS_TEMPLATE="templates/ingress.yaml"
- if [ -f /kubectl-build-deploy/helmcharts/${SERVICE_TYPE}/$HELM_INGRESS_TEMPLATE ]; then
+ if [ $ROUTES_AUTOGENERATE_ENABLED == "true" ]; then
+ HELM_INGRESS_TEMPLATE="templates/ingress.yaml"
+ if [ -f /kubectl-build-deploy/helmcharts/${SERVICE_TYPE}/$HELM_INGRESS_TEMPLATE ]; then
- # The very first generated route is set as MAIN_GENERATED_ROUTE
- if [ -z "${MAIN_GENERATED_ROUTE+x}" ]; then
- MAIN_GENERATED_ROUTE=$SERVICE_NAME
- fi
+ # The very first generated route is set as MAIN_GENERATED_ROUTE
+ if [ -z "${MAIN_GENERATED_ROUTE+x}" ]; then
+ MAIN_GENERATED_ROUTE=$SERVICE_NAME
+ fi
- helm template ${SERVICE_NAME} /kubectl-build-deploy/helmcharts/${SERVICE_TYPE} -s $HELM_INGRESS_TEMPLATE -f /kubectl-build-deploy/values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${SERVICE_NAME}.yaml
+ helm template ${SERVICE_NAME} /kubectl-build-deploy/helmcharts/${SERVICE_TYPE} -s $HELM_INGRESS_TEMPLATE -f /kubectl-build-deploy/values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${SERVICE_NAME}.yaml
+ fi
fi
HELM_DBAAS_TEMPLATE="templates/dbaas.yaml"
if [ -f /kubectl-build-deploy/helmcharts/${SERVICE_TYPE}/$HELM_DBAAS_TEMPLATE ]; then
- # cat $KUBERNETES_SERVICES_TEMPLATE
# Load the requested class and plan for this service
DBAAS_ENVIRONMENT="${MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]}"
yq write -i -- /kubectl-build-deploy/${SERVICE_NAME}-values.yaml 'environment' $DBAAS_ENVIRONMENT
@@ -513,6 +628,132 @@ done
TEMPLATE_PARAMETERS=()
+
+##############################################
+### CUSTOM FASTLY API SECRETS .lagoon.yml
+##############################################
+
+# if a customer is using their own fastly configuration, then they can define their api token and platform tls configuration ID in the .lagoon.yml file
+# this will get created as a `kind: Secret` in kubernetes so that created ingresses will be able to use this secret to talk to the fastly api.
+#
+# in this example, the customer needs to add a build envvar called `FASTLY_API_TOKEN` and then populates the .lagoon.yml file with something like this
+#
+# fastly:
+# api-secrets:
+# - name: customer
+# apiTokenVariableName: FASTLY_API_TOKEN
+# platformTLSConfiguration: A1bcEdFgH12eD242Sds
+#
+# then the build process will attempt to check the lagoon variables for one called `FASTLY_API_TOKEN` and will use the value of this variable when creating the
+# `kind: Secret` in kubernetes
+#
+# support for multiple api-secrets is possible in the instance that a customer uses 2 separate services in different accounts in the one project
+
+
+## any fastly api secrets will be prefixed with this, so that we always add this to whatever the customer provides
+FASTLY_API_SECRET_PREFIX="fastly-api-"
+
+FASTLY_API_SECRETS_COUNTER=0
+FASTLY_API_SECRETS=()
+if [ -n "$(cat .lagoon.yml | shyaml keys fastly.api-secrets.$FASTLY_API_SECRETS_COUNTER 2> /dev/null)" ]; then
+ while [ -n "$(cat .lagoon.yml | shyaml get-value fastly.api-secrets.$FASTLY_API_SECRETS_COUNTER 2> /dev/null)" ]; do
+ FASTLY_API_SECRET_NAME=$FASTLY_API_SECRET_PREFIX$(cat .lagoon.yml | shyaml get-value fastly.api-secrets.$FASTLY_API_SECRETS_COUNTER.name 2> /dev/null)
+ if [ -z "$FASTLY_API_SECRET_NAME" ]; then
+ echo -e "A fastly api secret was defined in the .lagoon.yml file, but no name could be found the .lagoon.yml\n\nPlease check if the name has been set correctly."
+ exit 1
+ fi
+ FASTLY_API_TOKEN_VALUE=$(cat .lagoon.yml | shyaml get-value fastly.api-secrets.$FASTLY_API_SECRETS_COUNTER.apiTokenVariableName false)
+ if [[ $FASTLY_API_TOKEN_VALUE == "false" ]]; then
+ echo "No 'apiTokenVariableName' defined for fastly secret $FASTLY_API_SECRET_NAME"; exit 1;
+ fi
+ # if we have everything we need, we can proceed to logging in
+ if [ $FASTLY_API_TOKEN_VALUE != "false" ]; then
+ FASTLY_API_TOKEN=""
+ # check if we have a password defined anywhere in the api first
+ if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ FASTLY_API_TOKEN=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.scope == "build" and .name == "'$FASTLY_API_TOKEN_VALUE'") | "\(.value)"'))
+ fi
+ if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_FASTLY_API_TOKEN=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.scope == "build" and .name == "'$FASTLY_API_TOKEN_VALUE'") | "\(.value)"'))
+ if [ ! -z "$TEMP_FASTLY_API_TOKEN" ]; then
+ FASTLY_API_TOKEN=$TEMP_FASTLY_API_TOKEN
+ fi
+ fi
+ if [ -z "$FASTLY_API_TOKEN" ]; then
+ echo -e "A fastly api secret was defined in the .lagoon.yml file, but no token could be found in the Lagoon API matching the variable name provided\n\nPlease check if the token has been set correctly."
+ exit 1
+ fi
+ fi
+ FASTLY_API_PLATFORMTLS_CONFIGURATION=$(cat .lagoon.yml | shyaml get-value fastly.api-secrets.$FASTLY_API_SECRETS_COUNTER.platformTLSConfiguration "")
+ if [ -z "$FASTLY_API_PLATFORMTLS_CONFIGURATION" ]; then
+ echo -e "A fastly api secret was defined in the .lagoon.yml file, but no platform tls configuration id could be found in the .lagoon.yml\n\nPlease check if the platform tls configuration id has been set correctly."
+ exit 1
+ fi
+
+ # run the script to create the secrets
+ . /kubectl-build-deploy/scripts/exec-fastly-api-secrets.sh
+
+ let FASTLY_API_SECRETS_COUNTER=FASTLY_API_SECRETS_COUNTER+1
+ done
+fi
+
+# FASTLY API SECRETS FROM LAGOON API VARIABLE
+# Allow for defining fastly api secrets using lagoon api variables
+# This accepts colon separated values like so `SECRET_NAME:FASTLY_API_TOKEN:FASTLY_PLATFORMTLS_CONFIGURATION_ID`, and multiple overrides
+# separated by commas
+# Example 1: examplecom:x1s8asfafasf7ssf:fa23rsdgsdgas
+# ^^^ will create a kubernetes secret called `$FASTLY_API_SECRET_PREFIX-examplecom` with 2 data fields (one for api token, the other for platform tls id)
+# populated with `x1s8asfafasf7ssf` and `fa23rsdgsdgas` for whichever field it should be
+# and the name will get created with the prefix defined in `FASTLY_API_SECRET_PREFIX`
+# Example 2: examplecom:x1s8asfafasf7ssf:fa23rsdgsdgas,example2com:fa23rsdgsdgas:x1s8asfafasf7ssf,example3com:fa23rsdgsdgas:x1s8asfafasf7ssf:example3com
+if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ LAGOON_FASTLY_API_SECRETS=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_API_SECRETS") | "\(.value)"'))
+fi
+if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_LAGOON_FASTLY_API_SECRETS=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_API_SECRETS") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_FASTLY_API_SECRETS ]; then
+ LAGOON_FASTLY_API_SECRETS=$TEMP_LAGOON_FASTLY_API_SECRETS
+ fi
+fi
+if [ ! -z "$LAGOON_FASTLY_API_SECRETS" ]; then
+ IFS=',' read -ra LAGOON_FASTLY_API_SECRETS_SPLIT <<< "$LAGOON_FASTLY_API_SECRETS"
+ for LAGOON_FASTLY_API_SECRETS_DATA in "${LAGOON_FASTLY_API_SECRETS_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_FASTLY_API_SECRET_SPLIT <<< "$LAGOON_FASTLY_API_SECRETS_DATA"
+ if [ -z "${LAGOON_FASTLY_API_SECRET_SPLIT[0]}" ] || [ -z "${LAGOON_FASTLY_API_SECRET_SPLIT[1]}" ] || [ -z "${LAGOON_FASTLY_API_SECRET_SPLIT[2]}" ]; then
+ echo -e "An override was defined in the lagoon API with LAGOON_FASTLY_API_SECRETS but was not structured correctly, the format should be NAME:FASTLY_API_TOKEN:FASTLY_PLATFORMTLS_CONFIGURATION_ID and comma separated for multiples"
+ exit 1
+ fi
+ # the fastly api secret name will be created with the prefix that is defined above
+ FASTLY_API_SECRET_NAME=$FASTLY_API_SECRET_PREFIX${LAGOON_FASTLY_API_SECRET_SPLIT[0]}
+ FASTLY_API_TOKEN=${LAGOON_FASTLY_API_SECRET_SPLIT[1]}
+ FASTLY_API_PLATFORMTLS_CONFIGURATION=${LAGOON_FASTLY_API_SECRET_SPLIT[2]}
+ # run the script to create the secrets
+ . /kubectl-build-deploy/scripts/exec-fastly-api-secrets.sh
+ done
+fi
+
+# FASTLY SERVICE ID PER INGRESS OVERRIDE FROM LAGOON API VARIABLE
+# Allow the fastly serviceid for specific ingress to be overridden by the lagoon API
+# This accepts colon separated values like so `INGRESS_DOMAIN:FASTLY_SERVICE_ID:WATCH_STATUS:SECRET_NAME(OPTIONAL)`, and multiple overrides
+# separated by commas
+# Example 1: www.example.com:x1s8asfafasf7ssf:true
+# ^^^ tells the ingress creation to use the service id x1s8asfafasf7ssf for ingress www.example.com, with the watch status of true
+# Example 2: www.example.com:x1s8asfafasf7ssf:true,www.not-example.com:fa23rsdgsdgas:false
+# ^^^ same as above, but also tells the ingress creation to use the service id fa23rsdgsdgas for ingress www.not-example.com, with the watch status of false
+# Example 3: www.example.com:x1s8asfafasf7ssf:true:examplecom
+# ^^^ tells the ingress creation to use the service id x1s8asfafasf7ssf for ingress www.example.com, with the watch status of true
+# but it will also be annotated to be told to use the secret named `examplecom` that could be defined elsewhere
+if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ LAGOON_FASTLY_SERVICE_IDS=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_SERVICE_IDS") | "\(.value)"'))
+fi
+if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_LAGOON_FASTLY_SERVICE_IDS=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_SERVICE_IDS") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_FASTLY_SERVICE_IDS ]; then
+ LAGOON_FASTLY_SERVICE_IDS=$TEMP_LAGOON_FASTLY_SERVICE_IDS
+ fi
+fi
+
##############################################
### CUSTOM ROUTES FROM .lagoon.yml
##############################################
@@ -533,10 +774,22 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
+ ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
+ # get the fastly configuration values from .lagoon.yml
+ if cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly &> /dev/null; then
+ ROUTE_FASTLY_SERVICE_ID=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.service-id "")
+ ROUTE_FASTLY_SERVICE_API_SECRET=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.api-secret-name "")
+ ROUTE_FASTLY_SERVICE_WATCH=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.watch false | tr '[:upper:]' '[:lower:]')
+ else
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -544,21 +797,66 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_MIGRATE=true
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
+ MONITORING_PATH="/"
+ ROUTE_ANNOTATIONS="{}"
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
+
+ # work out if there are any lagoon api variable overrides for the annotations that are being added
+ . /kubectl-build-deploy/scripts/exec-fastly-annotations.sh
+ # if we get any other populated service id overrides in any of the steps in exec-fastly-annotations.sh
+ # make it available to the ingress creation here by overriding what may be defined in the lagoon.yml
+ if [ ! -z "$LAGOON_FASTLY_SERVICE_ID" ]; then
+ ROUTE_FASTLY_SERVICE_ID=$LAGOON_FASTLY_SERVICE_ID
+ ROUTE_FASTLY_SERVICE_WATCH=$LAGOON_FASTLY_SERVICE_WATCH
+ if [ ! -z $LAGOON_FASTLY_SERVICE_API_SECRET ]; then
+ ROUTE_FASTLY_SERVICE_API_SECRET=$LAGOON_FASTLY_SERVICE_API_SECRET
+ fi
+ fi
+
+ FASTLY_ARGS=()
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_ID" ]; then
+ FASTLY_ARGS+=(--set fastly.serviceId=${ROUTE_FASTLY_SERVICE_ID})
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_API_SECRET" ]; then
+ if contains $FASTLY_API_SECRETS "${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET}"; then
+ FASTLY_ARGS+=(--set fastly.apiSecretName=${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET})
+ else
+ echo "$ROUTE_FASTLY_SERVICE_API_SECRET requested, but not found in .lagoon.yml file"; exit 1;
+ fi
+ fi
fi
touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+ # ${ROUTE_DOMAIN} is used as a helm release name which be max 53 characters long.
+ # So we need some logic to make sure it's always max 53 characters
+ if [[ ${#ROUTE_DOMAIN} -gt 53 ]] ; then
+ # Trim the route domain to 47 characters, and add an 5 character hash of the domain at the end
+ # this gives a total of 53 characters
+ INGRESS_NAME=${ROUTE_DOMAIN:0:47}-$(echo ${ROUTE_DOMAIN} | md5sum | cut -f 1 -d " " | cut -c 1-5)
+ else
+ INGRESS_NAME=${ROUTE_DOMAIN}
+ fi
+
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
- MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ MAIN_CUSTOM_ROUTE=$INGRESS_NAME
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
- helm template ${ROUTE_DOMAIN} \
+ helm template ${INGRESS_NAME} \
/kubectl-build-deploy/helmcharts/custom-ingress \
--set host="${ROUTE_DOMAIN}" \
--set service="${ROUTE_SERVICE}" \
@@ -566,8 +864,15 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
--set insecure="${ROUTE_INSECURE}" \
--set hsts="${ROUTE_HSTS}" \
--set routeMigrate="${ROUTE_MIGRATE}" \
+ --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \
+ --set ingressmonitorcontroller.path="${MONITORING_PATH}" \
+ --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
+ --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
+ "${FASTLY_ARGS[@]}" --set fastly.watch="${ROUTE_FASTLY_SERVICE_WATCH}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -587,10 +892,22 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
+ MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
+ ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
+ # get the fastly configuration values from .lagoon.yml
+ if cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly &> /dev/null; then
+ ROUTE_FASTLY_SERVICE_ID=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.service-id "")
+ ROUTE_FASTLY_SERVICE_API_SECRET=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.api-secret-name "")
+ ROUTE_FASTLY_SERVICE_WATCH=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.watch false | tr '[:upper:]' '[:lower:]')
+ else
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -598,21 +915,68 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_MIGRATE=true
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
+ MONITORING_PATH="/"
+ ROUTE_ANNOTATIONS="{}"
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
+
+ # work out if there are any lagoon api variable overrides for the annotations that are being added
+ . /kubectl-build-deploy/scripts/exec-fastly-annotations.sh
+ # if we get any other populated service id overrides in any of the steps in exec-fastly-annotations.sh
+ # make it available to the ingress creation here by overriding what may be defined in the lagoon.yml
+ if [ ! -z "$LAGOON_FASTLY_SERVICE_ID" ]; then
+ ROUTE_FASTLY_SERVICE_ID=$LAGOON_FASTLY_SERVICE_ID
+ ROUTE_FASTLY_SERVICE_WATCH=$LAGOON_FASTLY_SERVICE_WATCH
+ if [ ! -z $LAGOON_FASTLY_SERVICE_API_SECRET ]; then
+ ROUTE_FASTLY_SERVICE_API_SECRET=$LAGOON_FASTLY_SERVICE_API_SECRET
+ fi
+ fi
+
+ # Create the fastly values required
+ FASTLY_ARGS=()
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_ID" ]; then
+ FASTLY_ARGS+=(--set fastly.serviceId=${ROUTE_FASTLY_SERVICE_ID})
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_API_SECRET" ]; then
+ if contains $FASTLY_API_SECRETS "${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET}"; then
+ FASTLY_ARGS+=(--set fastly.apiSecretName=${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET})
+ else
+ echo "$ROUTE_FASTLY_SERVICE_API_SECRET requested, but not found in .lagoon.yml file"; exit 1;
+ fi
+ fi
+ ROUTE_FASTLY_SERVICE_WATCH=true
fi
touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+ # ${ROUTE_DOMAIN} is used as a helm release name which be max 53 characters long.
+ # So we need some logic to make sure it's always max 53 characters
+ if [[ ${#ROUTE_DOMAIN} -gt 53 ]] ; then
+ # Trim the route domain to 47 characters, and add an 5 character hash of the domain at the end
+ # this gives a total of 53 characters
+ INGRESS_NAME=${ROUTE_DOMAIN:0:47}-$(echo ${ROUTE_DOMAIN} | md5sum | cut -f 1 -d " " | cut -c 1-5)
+ else
+ INGRESS_NAME=${ROUTE_DOMAIN}
+ fi
+
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
- MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ MAIN_CUSTOM_ROUTE=$INGRESS_NAME
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
- helm template ${ROUTE_DOMAIN} \
+ helm template ${INGRESS_NAME} \
/kubectl-build-deploy/helmcharts/custom-ingress \
--set host="${ROUTE_DOMAIN}" \
--set service="${ROUTE_SERVICE}" \
@@ -620,8 +984,15 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
--set insecure="${ROUTE_INSECURE}" \
--set hsts="${ROUTE_HSTS}" \
--set routeMigrate="${ROUTE_MIGRATE}" \
+ --set ingressmonitorcontroller.enabled="${MONITORING_ENABLED}" \
+ --set ingressmonitorcontroller.path="${MONITORING_PATH}" \
+ --set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
+ --set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
+ "${FASTLY_ARGS[@]}" --set fastly.watch="${ROUTE_FASTLY_SERVICE_WATCH}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -631,13 +1002,7 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
fi
fi
-# set some monitoring defaults
-if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
- MONITORING_ENABLED="true"
-else
- MONITORING_ENABLED="false"
-
-fi
+MONITORING_ENABLED="false" # monitoring is by default disabled, it will be enabled for the first route again
# Two while loops as we have multiple services that want routes and each service has multiple routes
ROUTES_SERVICE_COUNTER=0
@@ -652,12 +1017,22 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
+ # get the fastly configuration values from .lagoon.yml
+ if cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly &> /dev/null; then
+ ROUTE_FASTLY_SERVICE_ID=$(cat .lagoon.yml | shyaml ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.service-id "")
+ ROUTE_FASTLY_SERVICE_API_SECRET=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.api-secret-name "")
+ ROUTE_FASTLY_SERVICE_WATCH=$(set -o pipefail; cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.watch false | tr '[:upper:]' '[:lower:]')
+ else
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -665,7 +1040,47 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
ROUTE_MIGRATE=false
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
+ MONITORING_PATH="/"
ROUTE_ANNOTATIONS="{}"
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
+
+ # work out if there are any lagoon api variable overrides for the annotations that are being added
+ . /kubectl-build-deploy/scripts/exec-fastly-annotations.sh
+ # if we get any other populated service id overrides in any of the steps in exec-fastly-annotations.sh
+ # make it available to the ingress creation here by overriding what may be defined in the lagoon.yml
+ if [ ! -z "$LAGOON_FASTLY_SERVICE_ID" ]; then
+ ROUTE_FASTLY_SERVICE_ID=$LAGOON_FASTLY_SERVICE_ID
+ ROUTE_FASTLY_SERVICE_WATCH=$LAGOON_FASTLY_SERVICE_WATCH
+ if [ ! -z $LAGOON_FASTLY_SERVICE_API_SECRET ]; then
+ ROUTE_FASTLY_SERVICE_API_SECRET=$LAGOON_FASTLY_SERVICE_API_SECRET
+ fi
+ fi
+
+ # Create the fastly values required
+ FASTLY_ARGS=()
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_ID" ]; then
+ FASTLY_ARGS+=(--set fastly.serviceId=${ROUTE_FASTLY_SERVICE_ID})
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_API_SECRET" ]; then
+ if contains $FASTLY_API_SECRETS "${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET}"; then
+ FASTLY_ARGS+=(--set fastly.apiSecretName=${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET})
+ else
+ echo "$ROUTE_FASTLY_SERVICE_API_SECRET requested, but not found in .lagoon.yml file"; exit 1;
+ fi
+ fi
+ ROUTE_FASTLY_SERVICE_WATCH=true
+ fi
+
+ # ${ROUTE_DOMAIN} is used as a helm release name which be max 53 characters long.
+ # So we need some logic to make sure it's always max 53 characters
+ if [[ ${#ROUTE_DOMAIN} -gt 53 ]] ; then
+ # Trim the route domain to 47 characters, and add an 5 character hash of the domain at the end
+ # this gives a total of 53 characters
+ INGRESS_NAME=${ROUTE_DOMAIN:0:47}-$(echo ${ROUTE_DOMAIN} | md5sum | cut -f 1 -d " " | cut -c 1-5)
+ else
+ INGRESS_NAME=${ROUTE_DOMAIN}
fi
touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
@@ -673,14 +1088,20 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
- MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ MAIN_CUSTOM_ROUTE=$INGRESS_NAME
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
- helm template ${ROUTE_DOMAIN} \
+ helm template ${INGRESS_NAME} \
/kubectl-build-deploy/helmcharts/custom-ingress \
--set host="${ROUTE_DOMAIN}" \
--set service="${ROUTE_SERVICE}" \
@@ -692,8 +1113,11 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
--set ingressmonitorcontroller.path="${MONITORING_PATH}" \
--set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
--set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
+ "${FASTLY_ARGS[@]}" --set fastly.watch="${ROUTE_FASTLY_SERVICE_WATCH}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -710,12 +1134,22 @@ else
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
ROUTE_ANNOTATIONS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.annotations {})
+ # get the fastly configuration values from .lagoon.yml
+ if cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly &> /dev/null; then
+ ROUTE_FASTLY_SERVICE_ID=$(cat .lagoon.yml | shyaml environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.service-id "")
+ ROUTE_FASTLY_SERVICE_API_SECRET=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.api-secret-name "")
+ ROUTE_FASTLY_SERVICE_WATCH=$(set -o pipefail; cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.fastly.watch false | tr '[:upper:]' '[:lower:]')
+ else
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
else
# Only a value given, assuming some defaults
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
@@ -723,22 +1157,68 @@ else
ROUTE_MIGRATE=false
ROUTE_INSECURE=Redirect
ROUTE_HSTS=null
+ MONITORING_PATH="/"
ROUTE_ANNOTATIONS="{}"
+ ROUTE_FASTLY_SERVICE_ID=""
+ ROUTE_FASTLY_SERVICE_API_SECRET=""
+ ROUTE_FASTLY_SERVICE_WATCH=false
+ fi
+
+ # work out if there are any lagoon api variable overrides for the annotations that are being added
+ . /kubectl-build-deploy/scripts/exec-fastly-annotations.sh
+ # if we get any other populated service id overrides in any of the steps in exec-fastly-annotations.sh
+ # make it available to the ingress creation here by overriding what may be defined in the lagoon.yml
+ if [ ! -z "$LAGOON_FASTLY_SERVICE_ID" ]; then
+ ROUTE_FASTLY_SERVICE_ID=$LAGOON_FASTLY_SERVICE_ID
+ ROUTE_FASTLY_SERVICE_WATCH=$LAGOON_FASTLY_SERVICE_WATCH
+ if [ ! -z $LAGOON_FASTLY_SERVICE_API_SECRET ]; then
+ ROUTE_FASTLY_SERVICE_API_SECRET=$LAGOON_FASTLY_SERVICE_API_SECRET
+ fi
+ fi
+
+ # Create the fastly values required
+ FASTLY_ARGS=()
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_ID" ]; then
+ FASTLY_ARGS+=(--set fastly.serviceId=${ROUTE_FASTLY_SERVICE_ID})
+ if [ ! -z "$ROUTE_FASTLY_SERVICE_API_SECRET" ]; then
+ if contains $FASTLY_API_SECRETS "${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET}"; then
+ FASTLY_ARGS+=(--set fastly.apiSecretName=${FASTLY_API_SECRET_PREFIX}${ROUTE_FASTLY_SERVICE_API_SECRET})
+ else
+ echo "$ROUTE_FASTLY_SERVICE_API_SECRET requested, but not found in .lagoon.yml file"; exit 1;
+ fi
+ fi
+ ROUTE_FASTLY_SERVICE_WATCH=true
fi
touch /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
echo "$ROUTE_ANNOTATIONS" | yq p - annotations > /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
+ # ${ROUTE_DOMAIN} is used as a helm release name which be max 53 characters long.
+ # So we need some logic to make sure it's always max 53 characters
+ if [[ ${#ROUTE_DOMAIN} -gt 53 ]] ; then
+ # Trim the route domain to 47 characters, and add an 5 character hash of the domain at the end
+ # this gives a total of 53 characters
+ INGRESS_NAME=${ROUTE_DOMAIN:0:47}-$(echo ${ROUTE_DOMAIN} | md5sum | cut -f 1 -d " " | cut -c 1-5)
+ else
+ INGRESS_NAME=${ROUTE_DOMAIN}
+ fi
+
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
- MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+ MAIN_CUSTOM_ROUTE=$INGRESS_NAME
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
cat /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml
- helm template ${ROUTE_DOMAIN} \
+ helm template ${INGRESS_NAME} \
/kubectl-build-deploy/helmcharts/custom-ingress \
--set host="${ROUTE_DOMAIN}" \
--set service="${ROUTE_SERVICE}" \
@@ -750,8 +1230,11 @@ else
--set ingressmonitorcontroller.path="${MONITORING_PATH}" \
--set ingressmonitorcontroller.alertContacts="${MONITORING_ALERTCONTACT}" \
--set ingressmonitorcontroller.statuspageId="${MONITORING_STATUSPAGEID}" \
+ "${FASTLY_ARGS[@]}" --set fastly.watch="${ROUTE_FASTLY_SERVICE_WATCH}" \
-f /kubectl-build-deploy/values.yaml -f /kubectl-build-deploy/${ROUTE_DOMAIN}-values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${ROUTE_DOMAIN}.yaml
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -771,24 +1254,65 @@ if [[ "${CAPABILITIES[@]}" =~ "backup.appuio.ch/v1alpha1/Schedule" ]]; then
TEMPLATE_PARAMETERS=()
+ # Check for custom baas bucket name
+ if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ BAAS_BUCKET_NAME=$(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_BUCKET_NAME") | "\(.value)"')
+ fi
+ if [ -z $BAAS_BUCKET_NAME ]; then
+ BAAS_BUCKET_NAME=baas-${PROJECT}
+ fi
+
+ # Pull in .lagoon.yml variables
+ PRODUCTION_MONTHLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.monthly "")
+ PRODUCTION_WEEKLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.weekly "")
+ PRODUCTION_DAILY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.daily "")
+
+ # Set template parameters for retention values (prefer .lagoon.yml values over supplied defaults after ensuring they are valid integers via "-eq" comparison)
+ if [ ! -z $PRODUCTION_MONTHLY_BACKUP_RETENTION ] && [ "$PRODUCTION_MONTHLY_BACKUP_RETENTION" -eq "$PRODUCTION_MONTHLY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ MONTHLY_BACKUP_RETENTION=${PRODUCTION_MONTHLY_BACKUP_RETENTION}
+ else
+ MONTHLY_BACKUP_RETENTION=${MONTHLY_BACKUP_DEFAULT_RETENTION}
+ fi
+ if [ ! -z $PRODUCTION_WEEKLY_BACKUP_RETENTION ] && [ "$PRODUCTION_WEEKLY_BACKUP_RETENTION" -eq "$PRODUCTION_WEEKLY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ WEEKLY_BACKUP_RETENTION=${PRODUCTION_WEEKLY_BACKUP_RETENTION}
+ else
+ WEEKLY_BACKUP_RETENTION=${WEEKLY_BACKUP_DEFAULT_RETENTION}
+ fi
+ if [ ! -z $PRODUCTION_DAILY_BACKUP_RETENTION ] && [ "$PRODUCTION_DAILY_BACKUP_RETENTION" -eq "$PRODUCTION_DAILY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ DAILY_BACKUP_RETENTION=${PRODUCTION_DAILY_BACKUP_RETENTION}
+ else
+ DAILY_BACKUP_RETENTION=${DAILY_BACKUP_DEFAULT_RETENTION}
+ fi
+
# Run Backups every day at 2200-0200
BACKUP_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(22-2) * * *")
- TEMPLATE_PARAMETERS+=(-p BACKUP_SCHEDULE="${BACKUP_SCHEDULE}")
- # TODO: -p == --set in helm
- # Run Checks on Sunday at 0300-0600
- CHECK_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 0")
- TEMPLATE_PARAMETERS+=(-p CHECK_SCHEDULE="${CHECK_SCHEDULE}")
- # Run Prune on Saturday at 0300-0600
- PRUNE_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 6")
- TEMPLATE_PARAMETERS+=(-p PRUNE_SCHEDULE="${PRUNE_SCHEDULE}")
+ if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then
+ # Let the controller deduplicate checks (will run weekly at a random time throughout the week)
+ CHECK_SCHEDULE="@weekly-random"
+ else
+ # Run Checks on Sunday at 0300-0600
+ CHECK_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 0")
+ fi
+
+ if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then
+ # Let the controller deduplicate prunes (will run weekly at a random time throughout the week)
+ PRUNE_SCHEDULE="@weekly-random"
+ else
+ # Run Prune on Saturday at 0300-0600
+ PRUNE_SCHEDULE=$( /kubectl-build-deploy/scripts/convert-crontab.sh "${NAMESPACE}" "M H(3-6) * * 6")
+ fi
OPENSHIFT_TEMPLATE="/kubectl-build-deploy/openshift-templates/backup-schedule.yml"
helm template k8up-lagoon-backup-schedule /kubectl-build-deploy/helmcharts/k8up-schedule \
-f /kubectl-build-deploy/values.yaml \
--set backup.schedule="${BACKUP_SCHEDULE}" \
--set check.schedule="${CHECK_SCHEDULE}" \
- --set prune.schedule="${PRUNE_SCHEDULE}" "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/k8up-lagoon-backup-schedule.yaml
+ --set prune.schedule="${PRUNE_SCHEDULE}" "${HELM_ARGUMENTS[@]}" \
+ --set baasBucketName="${BAAS_BUCKET_NAME}" > $YAML_FOLDER/k8up-lagoon-backup-schedule.yaml \
+ --set prune.retention.keepMonthly=$MONTHLY_BACKUP_RETENTION \
+ --set prune.retention.keepWeekly=$WEEKLY_BACKUP_RETENTION \
+ --set prune.retention.keepDaily=$DAILY_BACKUP_RETENTION
fi
if [ "$(ls -A $YAML_FOLDER/)" ]; then
@@ -887,6 +1411,14 @@ do
. /kubectl-build-deploy/scripts/exec-kubectl-mariadb-dbaas.sh
;;
+ postgres-dbaas)
+ . /kubectl-build-deploy/scripts/exec-kubectl-postgres-dbaas.sh
+ ;;
+
+ mongodb-dbaas)
+ . /kubectl-build-deploy/scripts/exec-kubectl-mongodb-dbaas.sh
+ ;;
+
*)
echo "DBAAS Type ${SERVICE_TYPE} not implemented"; exit 1;
@@ -906,37 +1438,51 @@ yq write -i -- /kubectl-build-deploy/values.yaml 'configMapSha' $CONFIG_MAP_SHA
### PUSH IMAGES TO OPENSHIFT REGISTRY
##############################################
-if [[ $THIS_IS_TUG == "true" ]]; then
- # TODO: lagoon-tug is not implemented yet in kubernetes
- echo "lagoon-tug is not implemented yet in kubernetes"
- exit 1
- # Allow to disable registry auth
- if [ ! "${TUG_SKIP_REGISTRY_AUTH}" == "true" ]; then
- # This adds the defined credentials to the serviceaccount/default so that the deployments can pull from the remote registry
- if kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get secret tug-registry 2> /dev/null; then
- kubectl --insecure-skip-tls-verify -n ${NAMESPACE} delete secret tug-registry
- fi
-
- kubectl --insecure-skip-tls-verify -n ${NAMESPACE} secrets new-dockercfg tug-registry --docker-server="${TUG_REGISTRY}" --docker-username="${TUG_REGISTRY_USERNAME}" --docker-password="${TUG_REGISTRY_PASSWORD}" --docker-email="${TUG_REGISTRY_USERNAME}"
- kubectl --insecure-skip-tls-verify -n ${NAMESPACE} secrets add serviceaccount/default secrets/tug-registry --for=pull
- fi
-
- # Import all remote Images into ImageStreams
- readarray -t TUG_IMAGES < /kubectl-build-deploy/tug/images
- for TUG_IMAGE in "${TUG_IMAGES[@]}"
- do
- kubectl --insecure-skip-tls-verify -n ${NAMESPACE} tag --source=docker "${TUG_REGISTRY}/${TUG_REGISTRY_REPOSITORY}/${TUG_IMAGE_PREFIX}${TUG_IMAGE}:${SAFE_BRANCH}" "${TUG_IMAGE}:latest"
- done
-
-elif [ "$BUILD_TYPE" == "pullrequest" ] || [ "$BUILD_TYPE" == "branch" ]; then
+if [ "$BUILD_TYPE" == "pullrequest" ] || [ "$BUILD_TYPE" == "branch" ]; then
- # All images that should be pulled are tagged as Images directly in OpenShift Registry
+ # All images that should be pulled are copied to the harbor registry
for IMAGE_NAME in "${!IMAGES_PULL[@]}"
do
PULL_IMAGE="${IMAGES_PULL[${IMAGE_NAME}]}"
- # . /kubectl-build-deploy/scripts/exec-kubernetes-tag-dockerhub.sh
- # TODO: check if we can download and push the images to harbour (e.g. how artifactory does this)
- IMAGE_HASHES[${IMAGE_NAME}]=$(skopeo inspect docker://${PULL_IMAGE} --tls-verify=false | jq ".Name + \"@\" + .Digest" -r)
+
+ # Try to handle private registries first
+ if [ $PRIVATE_REGISTRY_COUNTER -gt 0 ]; then
+ if [ $PRIVATE_EXTERNAL_REGISTRY ]; then
+ EXTERNAL_REGISTRY=0
+ for EXTERNAL_REGISTRY_URL in "${PRIVATE_REGISTRY_URLS[@]}"
+ do
+ # strip off "http://" or "https://" from registry url if present
+ bare_url="${EXTERNAL_REGISTRY_URL#http://}"
+ bare_url="${EXTERNAL_REGISTRY_URL#https://}"
+
+ # Test registry to see if image is from an external registry or just private docker hub
+ case $bare_url in
+ "$PULL_IMAGE"*)
+ EXTERNAL_REGISTRY=1
+ ;;
+ esac
+ done
+
+ # If this image is hosted in an external registry, pull it from there
+ if [ $EXTERNAL_REGISTRY -eq 1 ]; then
+ skopeo copy --dest-tls-verify=false docker://${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+ # If this image is not from an external registry, but docker hub creds were supplied, pull it straight from Docker Hub
+ elif [ $PRIVATE_DOCKER_HUB_REGISTRY -eq 1 ]; then
+ skopeo copy --dest-tls-verify=false docker://${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+ # If image not from an external registry and no docker hub creds were supplied, pull image from the imagecache
+ else
+ skopeo copy --dest-tls-verify=false docker://${IMAGECACHE_REGISTRY}/${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+ fi
+ # If the private registry counter is 1 and no external registry was listed, we know a private docker hub was specified
+ else
+ skopeo copy --dest-tls-verify=false docker://${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+ fi
+ # If no private registries, use the imagecache
+ else
+ skopeo copy --dest-tls-verify=false docker://${IMAGECACHE_REGISTRY}/${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+ fi
+
+ IMAGE_HASHES[${IMAGE_NAME}]=$(skopeo inspect docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --tls-verify=false | jq ".Name + \"@\" + .Digest" -r)
done
for IMAGE_NAME in "${!IMAGES_BUILD[@]}"
@@ -960,12 +1506,13 @@ elif [ "$BUILD_TYPE" == "pullrequest" ] || [ "$BUILD_TYPE" == "branch" ]; then
IMAGE_HASHES[${IMAGE_NAME}]=$(docker inspect ${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --format '{{json .RepoDigests}}' | "${JQ_QUERY[@]}")
done
-# elif [ "$BUILD_TYPE" == "promote" ]; then
+elif [ "$BUILD_TYPE" == "promote" ]; then
-# for IMAGE_NAME in "${IMAGES[@]}"
-# do
-# . /kubectl-build-deploy/scripts/exec-kubernetes-tag.sh
-# done
+ for IMAGE_NAME in "${IMAGES[@]}"
+ do
+ . /kubectl-build-deploy/scripts/exec-kubernetes-promote.sh
+ IMAGE_HASHES[${IMAGE_NAME}]=$(skopeo inspect docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} --tls-verify=false | jq ".Name + \"@\" + .Digest" -r)
+ done
fi
@@ -1071,36 +1618,7 @@ do
yq write -i --tag '!!str' -- /kubectl-build-deploy/${SERVICE_NAME}-values.yaml 'inPodCronjobs' ''
fi
- #OVERRIDE_TEMPLATE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.template false)
- #ENVIRONMENT_OVERRIDE_TEMPLATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.templates.$SERVICE_NAME false)
- #if [[ "${OVERRIDE_TEMPLATE}" == "false" && "${ENVIRONMENT_OVERRIDE_TEMPLATE}" == "false" ]]; then # No custom template defined in docker-compose or .lagoon.yml, using the given service ones
- # Generate deployment if service type defines it
- . /kubectl-build-deploy/scripts/exec-kubectl-resources-with-images.sh
-
- # # Generate statefulset if service type defines it
- # OPENSHIFT_STATEFULSET_TEMPLATE="/kubectl-build-deploy/openshift-templates/${SERVICE_TYPE}/statefulset.yml"
- # if [ -f $OPENSHIFT_STATEFULSET_TEMPLATE ]; then
- # OPENSHIFT_TEMPLATE=$OPENSHIFT_STATEFULSET_TEMPLATE
- # . /kubectl-build-deploy/scripts/exec-kubernetes-resources-with-images.sh
- # fi
- # elif [[ "${ENVIRONMENT_OVERRIDE_TEMPLATE}" != "false" ]]; then # custom template defined for this service in .lagoon.yml, trying to use it
-
- # OPENSHIFT_TEMPLATE=$ENVIRONMENT_OVERRIDE_TEMPLATE
- # if [ ! -f $OPENSHIFT_TEMPLATE ]; then
- # echo "defined template $OPENSHIFT_TEMPLATE for service $SERVICE_TYPE in .lagoon.yml not found"; exit 1;
- # else
- # . /kubectl-build-deploy/scripts/exec-kubernetes-resources-with-images.sh
- # fi
- # elif [[ "${OVERRIDE_TEMPLATE}" != "false" ]]; then # custom template defined for this service in docker-compose, trying to use it
-
- # OPENSHIFT_TEMPLATE=$OVERRIDE_TEMPLATE
- # if [ ! -f $OPENSHIFT_TEMPLATE ]; then
- # echo "defined template $OPENSHIFT_TEMPLATE for service $SERVICE_TYPE in $DOCKER_COMPOSE_YAML not found"; exit 1;
- # else
- # . /kubectl-build-deploy/scripts/exec-kubernetes-resources-with-images.sh
- # fi
- #fi
-
+ . /kubectl-build-deploy/scripts/exec-kubectl-resources-with-images.sh
done
@@ -1141,34 +1659,18 @@ do
SERVICE_ROLLOUT_TYPE=$ENVIRONMENT_SERVICE_ROLLOUT_TYPE
fi
- if [ $SERVICE_TYPE == "elasticsearch-cluster" ]; then
-
- STATEFULSET="${SERVICE_NAME}"
- . /kubectl-build-deploy/scripts/exec-monitor-statefulset.sh
+ if [ $SERVICE_TYPE == "mariadb-dbaas" ]; then
- elif [ $SERVICE_TYPE == "rabbitmq-cluster" ]; then
-
- STATEFULSET="${SERVICE_NAME}"
- . /kubectl-build-deploy/scripts/exec-monitor-statefulset.sh
-
- elif [ $SERVICE_ROLLOUT_TYPE == "statefulset" ]; then
-
- STATEFULSET="${SERVICE_NAME}"
- . /kubectl-build-deploy/scripts/exec-monitor-statefulset.sh
+ echo "nothing to monitor for $SERVICE_TYPE"
- elif [ $SERVICE_ROLLOUT_TYPE == "deamonset" ]; then
+ elif [ $SERVICE_TYPE == "postgres-dbaas" ]; then
- DAEMONSET="${SERVICE_NAME}"
- . /kubectl-build-deploy/scripts/exec-monitor-deamonset.sh
+ echo "nothing to monitor for $SERVICE_TYPE"
- elif [ $SERVICE_TYPE == "mariadb-dbaas" ]; then
+ elif [ $SERVICE_TYPE == "mongodb-dbaas" ]; then
echo "nothing to monitor for $SERVICE_TYPE"
- elif [ $SERVICE_TYPE == "postgres" ]; then
- # TODO: Remove
- echo "nothing to monitor for $SERVICE_TYPE - for now"
-
elif [ ! $SERVICE_ROLLOUT_TYPE == "false" ]; then
. /kubectl-build-deploy/scripts/exec-monitor-deploy.sh
fi
@@ -1225,3 +1727,15 @@ if [ "${LAGOON_POSTROLLOUT_DISABLED}" != "true" ]; then
else
echo "post-rollout tasks are currently disabled LAGOON_POSTROLLOUT_DISABLED is set to true"
fi
+
+##############################################
+### PUSH the latest .lagoon.yml into lagoon-yaml configmap
+##############################################
+
+if kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get configmap lagoon-yaml &> /dev/null; then
+ # replace it
+ kubectl --insecure-skip-tls-verify -n ${NAMESPACE} create configmap lagoon-yaml --from-file=.lagoon.yml -o yaml --dry-run | kubectl replace -f -
+else
+ # create it
+ kubectl --insecure-skip-tls-verify -n ${NAMESPACE} create configmap lagoon-yaml --from-file=.lagoon.yml
+fi
diff --git a/images/kubectl-build-deploy-dind/build-deploy.sh b/images/kubectl-build-deploy-dind/build-deploy.sh
index ff7e1a557a..5100032dd1 100755
--- a/images/kubectl-build-deploy-dind/build-deploy.sh
+++ b/images/kubectl-build-deploy-dind/build-deploy.sh
@@ -44,6 +44,9 @@ fi
REGISTRY_SECRETS=()
PRIVATE_REGISTRY_COUNTER=0
+PRIVATE_REGISTRY_URLS=()
+PRIVATE_DOCKER_HUB_REGISTRY=0
+PRIVATE_EXTERNAL_REGISTRY=0
set +x
@@ -58,13 +61,7 @@ if [ ! -z ${INTERNAL_REGISTRY_URL} ] && [ ! -z ${INTERNAL_REGISTRY_USERNAME} ] &
echo "docker login -u '${INTERNAL_REGISTRY_USERNAME}' -p '${INTERNAL_REGISTRY_PASSWORD}' ${INTERNAL_REGISTRY_URL}" | /bin/bash
kubectl create secret docker-registry lagoon-internal-registry-secret --docker-server=${INTERNAL_REGISTRY_URL} --docker-username=${INTERNAL_REGISTRY_USERNAME} --docker-password=${INTERNAL_REGISTRY_PASSWORD} --dry-run -o yaml | kubectl apply -f -
REGISTRY_SECRETS+=("lagoon-internal-registry-secret")
- #docker login "-u '{$INTERNAL_REGISTRY_USERNAME}' -p '{$INTERNAL_REGISTRY_PASSWORD}' '{$INTERNAL_REGISTRY_URL}'"
REGISTRY=$INTERNAL_REGISTRY_URL # This will handle pointing Lagoon at the correct registry for non local builds
- #REGISTRY_REPOSITORY=$NAMESPACE
- # If we go with a different naming scheme, we can inject that here?
-#else
-# DOCKER_REGISTRY_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
-# docker login -u=jenkins -p="${DOCKER_REGISTRY_TOKEN}" ${REGISTRY}
fi
##############################################
@@ -97,23 +94,34 @@ do
PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
+ TEMP_PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
+ if [ ! -z "$TEMP_PRIVATE_REGISTRY_CREDENTIAL" ]; then
+ PRIVATE_REGISTRY_CREDENTIAL=$TEMP_PRIVATE_REGISTRY_CREDENTIAL
+ fi
fi
if [ -z $PRIVATE_REGISTRY_CREDENTIAL ]; then
#if no password defined in the lagoon api, pass the one in `.lagoon.yml` as a password
PRIVATE_REGISTRY_CREDENTIAL=$PRIVATE_CONTAINER_REGISTRY_PASSWORD
fi
+ if [ -z "$PRIVATE_REGISTRY_CREDENTIAL" ]; then
+ echo -e "A private container registry was defined in the .lagoon.yml file, but no password could be found in either the .lagoon.yml or in the Lagoon API\n\nPlease check if the password has been set correctly."
+ exit 1
+ fi
if [ $PRIVATE_CONTAINER_REGISTRY_URL != "false" ]; then
echo "Attempting to log in to $PRIVATE_CONTAINER_REGISTRY_URL with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD"
docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL $PRIVATE_CONTAINER_REGISTRY_URL
kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server=$PRIVATE_CONTAINER_REGISTRY_URL --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f -
REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret")
+ PRIVATE_REGISTRY_URLS+=($PRIVATE_CONTAINER_REGISTRY_URL)
+ PRIVATE_EXTERNAL_REGISTRY=1
let ++PRIVATE_REGISTRY_COUNTER
else
echo "Attempting to log in to docker hub with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD"
docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL
kubectl create secret docker-registry "lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret" --docker-server="https://index.docker.io/v1/" --docker-username=$PRIVATE_CONTAINER_REGISTRY_USERNAME --docker-password=$PRIVATE_REGISTRY_CREDENTIAL --dry-run -o yaml | kubectl apply -f -
REGISTRY_SECRETS+=("lagoon-private-registry-${PRIVATE_REGISTRY_COUNTER}-secret")
+ PRIVATE_REGISTRY_URLS+=("")
+ PRIVATE_DOCKER_HUB_REGISTRY=1
let ++PRIVATE_REGISTRY_COUNTER
fi
fi
@@ -121,23 +129,4 @@ done
set -x
-ADDITIONAL_YAMLS=($(cat .lagoon.yml | shyaml keys additional-yaml || echo ""))
-
-for ADDITIONAL_YAML in "${ADDITIONAL_YAMLS[@]}"
-do
- ADDITIONAL_YAML_PATH=$(cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.path false)
- if [ $ADDITIONAL_YAML_PATH == "false" ]; then
- echo "No 'path' defined for additional yaml $ADDITIONAL_YAML"; exit 1;
- fi
-
- if [ ! -f $ADDITIONAL_YAML_PATH ]; then
- echo "$ADDITIONAL_YAML_PATH for additional yaml $ADDITIONAL_YAML not found"; exit 1;
- fi
-
- ADDITIONAL_YAML_COMMAND=$(cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.command apply)
- ADDITIONAL_YAML_IGNORE_ERROR=$(cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.ignore_error false)
- ADDITIONAL_YAML_IGNORE_ERROR="${ADDITIONAL_YAML_IGNORE_ERROR,,}" # convert to lowercase, as shyaml returns "True" if the yaml is set to "true"
- . /kubectl-build-deploy/scripts/exec-additional-yaml.sh
-done
-
. /kubectl-build-deploy/build-deploy-docker-compose.sh
diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/_helpers.tpl
index 5685ea7827..af8b1a79ac 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/_helpers.tpl
@@ -66,4 +66,4 @@ lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
{{- end }}
-{{- end -}}
\ No newline at end of file
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
index 9504bc6258..58ae3b9d6f 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/templates/ingress.yaml
@@ -36,6 +36,15 @@ metadata:
# haproxy.router.openshift.io/hsts_header: {{ .Values.route_hsts }}
{{ end -}}
kubernetes.io/tls-acme: {{ .Values.tls_acme | quote }}
+ # use a specific fastly service
+ {{- if .Values.fastly.serviceId }}
+ fastly.amazee.io/service-id: "{{ .Values.fastly.serviceId }}"
+ {{- end }}
+ fastly.amazee.io/watch: "{{ .Values.fastly.watch }}"
+ # use a custom secret for this ingress (customer supplied fastly integration)
+ {{- if .Values.fastly.apiSecretName }}
+ fastly.amazee.io/api-secret-name: "{{ .Values.fastly.apiSecretName }}"
+ {{- end }}
{{- include "custom-ingress.annotations" . | nindent 4 }}
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
index 59eec1e5ee..ff22a39dce 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/custom-ingress/values.yaml
@@ -14,3 +14,9 @@ ingressmonitorcontroller:
enabled: 'false'
interval: '60'
alertContacts: 'unconfigured'
+
+## example fastly block
+fastly:
+ watch: false
+# serviceId: ''
+# apiSecretName: ''
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/Chart.yaml
new file mode 100644
index 0000000000..5763d9a892
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+name: elasticsearch-persistent
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/_helpers.tpl
new file mode 100644
index 0000000000..b1b3147864
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/_helpers.tpl
@@ -0,0 +1,99 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "elasticsearch.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "elasticsearch.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "elasticsearch.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "elasticsearch.autogeneratedHost" -}}
+{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Generate name of Persistent Storage
+Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
+*/}}
+{{- define "elasticsearch.persistentStorageName" -}}
+{{- default .Release.Name .Values.persistentStorage.name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "elasticsearch.labels" -}}
+helm.sh/chart: {{ include "elasticsearch.chart" . }}
+{{ include "elasticsearch.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "elasticsearch.lagoonLabels" . }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "elasticsearch.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "elasticsearch.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create a PriorityClassName.
+(this is based on the Lagoon Environment Type)).
+*/}}
+{{- define "elasticsearch.lagoonPriority" -}}
+{{- printf "lagoon-priority-%s" .Values.environmentType }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "elasticsearch.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Datadog Admission Controller label
+*/}}
+{{- define "elasticsearch.datadogLabels" -}}
+{{- if eq .Values.environmentType "production" -}}
+admission.datadoghq.com/enabled: "true"
+{{- end -}}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "elasticsearch.annotations" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/cronjob.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/cronjob.yaml
new file mode 100644
index 0000000000..5abfbf9083
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/cronjob.yaml
@@ -0,0 +1,71 @@
+{{- range $cronjobName, $cronjobConfig := .Values.nativeCronjobs }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ labels:
+ {{- include "elasticsearch.labels" $ | nindent 4 }}
+ annotations:
+ {{- include "elasticsearch.annotations" . | nindent 4 }}
+spec:
+ schedule: {{ $cronjobConfig.schedule | quote }}
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 0
+ failedJobsHistoryLimit: 1
+ jobTemplate:
+ metadata:
+ labels:
+ {{- include "elasticsearch.labels" $ | nindent 8 }}
+ annotations:
+ {{- include "elasticsearch.annotations" $ | nindent 8 }}
+ spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ labels:
+ {{- include "elasticsearch.labels" $ | nindent 12 }}
+ annotations:
+ {{- include "elasticsearch.annotations" $ | nindent 12 }}
+ spec:
+ {{- with $.Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ priorityClassName: {{ include "elasticsearch.lagoonPriority" $ }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml $.Values.podSecurityContext | nindent 12 }}
+ containers:
+ - image: {{ $.Values.image | quote }}
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ securityContext:
+ {{- toYaml $.Values.securityContext | nindent 16 }}
+ imagePullPolicy: {{ $.Values.imagePullPolicy }}
+ command:
+ - /lagoon/cronjob.sh
+ - {{ $cronjobConfig.command }}
+ env:
+ - name: LAGOON_GIT_SHA
+ value: {{ $.Values.gitSha | quote }}
+ - name: SERVICE_NAME
+ value: {{ $.Release.Name | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ resources:
+ {{- toYaml $.Values.resources | nindent 16 }}
+ restartPolicy: Never
+ {{- with $.Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.affinity }}
+ affinity:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.tolerations }}
+ tolerations:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/deployment.yaml
new file mode 100644
index 0000000000..61658e2980
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/deployment.yaml
@@ -0,0 +1,76 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "elasticsearch.fullname" . }}
+ labels:
+ {{- include "elasticsearch.labels" . | nindent 4 }}
+ annotations:
+ {{- include "elasticsearch.annotations" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ {{- include "elasticsearch.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "elasticsearch.labels" . | nindent 8 }}
+ {{- include "elasticsearch.datadogLabels" . | nindent 8 }}
+ annotations:
+ {{- include "elasticsearch.annotations" . | nindent 8 }}
+ lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "elasticsearch.persistentStorageName" . }}
+ persistentVolumeClaim:
+ claimName: {{ include "elasticsearch.persistentStorageName" . }}
+ priorityClassName: {{ include "elasticsearch.lagoonPriority" . }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ initContainers:
+ # This init container sets the appropriate limits for mmap counts on the hosting node.
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
+ - name: set-max-map-count
+ image: ubuntu:20.04
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ securityContext:
+ privileged: true
+ command:
+ - /bin/bash
+ - -c
+ - 'if [[ "$(sysctl vm.max_map_count --values)" -lt 262144 ]]; then sysctl -w vm.max_map_count=262144; fi'
+ containers:
+ - image: {{ .Values.image | quote }}
+ name: {{ .Chart.Name }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ ports:
+ - containerPort: 9200
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /_cluster/health?local=true
+ port: 9200
+ initialDelaySeconds: 20
+ livenessProbe:
+ httpGet:
+ path: /_cluster/health?local=true
+ port: 9200
+ initialDelaySeconds: 120
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ env:
+ - name: CRONJOBS
+ value: {{ .Values.inPodCronjobs | quote }}
+ volumeMounts:
+ - name: {{ include "elasticsearch.persistentStorageName" . }}
+ mountPath: {{ .Values.persistentStorage.path | quote }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/prebackuppod.yaml
similarity index 51%
rename from images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/prebackuppod.yaml
rename to images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/prebackuppod.yaml
index 83a39f7899..c2b90366ad 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/prebackuppod.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/prebackuppod.yaml
@@ -2,19 +2,19 @@
apiVersion: backup.appuio.ch/v1alpha1
kind: PreBackupPod
metadata:
- name: {{ include "redis-persistent.fullname" . }}-prebackuppod
+ name: {{ include "elasticsearch.fullname" . }}-prebackuppod
labels:
- {{- include "redis-persistent.labels" . | nindent 4 }}
+ {{- include "elasticsearch.labels" . | nindent 4 }}
annotations:
- {{- include "redis-persistent.annotations" . | nindent 4 }}
+ {{- include "elasticsearch.annotations" . | nindent 4 }}
spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path }} ."
- fileExtension: .{{ include "redis-persistent.fullname" . }}.tar
+ backupCommand: /bin/sh -c "tar -cf - -C {{ .Values.persistentStorage.path }} ."
+ fileExtension: .{{ include "elasticsearch.fullname" . }}.tar
pod:
metadata:
labels:
- prebackuppod: {{ include "redis-persistent.fullname" . }}
- {{- include "redis-persistent.labels" . | nindent 8 }}
+ prebackuppod: {{ include "elasticsearch.fullname" . }}
+ {{- include "elasticsearch.labels" . | nindent 8 }}
spec:
affinity:
podAffinity:
@@ -25,25 +25,24 @@ spec:
- key: lagoon.sh/service
operator: In
values:
- - {{ include "redis-persistent.fullname" . }}
+ - {{ include "elasticsearch.fullname" . }}
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
- image: alpine
+ image: imagecache.amazeeio.cloud/library/alpine
imagePullPolicy: Always
- name: {{ include "redis-persistent.fullname" . }}-prebackuppod
+ name: {{ include "elasticsearch.fullname" . }}-prebackuppod
volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "elasticsearch.persistentStorageName" . }}
mountPath: {{ .Values.persistentStorage.path | quote }}
volumes:
- - name: {{ .Values.persistentStorage.name }}
+ - name: {{ include "elasticsearch.persistentStorageName" . }}
persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
+ claimName: {{ include "elasticsearch.persistentStorageName" . }}
{{ end }}
-
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/pvc.yaml
new file mode 100644
index 0000000000..bfeab78176
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/pvc.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "elasticsearch.persistentStorageName" . }}
+ labels:
+ {{- include "elasticsearch.labels" . | nindent 4 }}
+ annotations:
+ k8up.syn.tools/backup: "false"
+ {{- include "elasticsearch.annotations" . | nindent 4 }}
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.persistentStorage.size | quote }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/service.yaml
new file mode 100644
index 0000000000..f5d37ba3b2
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "elasticsearch.fullname" . }}
+ labels:
+ {{- include "elasticsearch.labels" . | nindent 4 }}
+ annotations:
+ {{- include "elasticsearch.annotations" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: 9200
+ protocol: TCP
+ name: 9200-tcp
+ selector:
+ {{- include "elasticsearch.selectorLabels" . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/values.yaml
new file mode 100644
index 0000000000..6e55a8f9dd
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/elasticsearch/values.yaml
@@ -0,0 +1,51 @@
+# Default values for elasticsearch.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image: ""
+
+environmentType: production
+persistentStorage:
+ size: 5Gi
+ path: '/usr/share/elasticsearch/data'
+
+imagePullPolicy: Always
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 9200
+
+resources:
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+inPodCronjobs: ""
+
+configMapSha: ""
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/.helmignore
new file mode 100644
index 0000000000..fbe01f88f2
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/Chart.yaml
new file mode 100644
index 0000000000..78f90c0a9d
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+name: fastly-api-secret
+description: A Helm chart for Kubernetes creating fastly-api-secret
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/_helpers.tpl
new file mode 100644
index 0000000000..04bbb10c04
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/_helpers.tpl
@@ -0,0 +1,69 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "fastly-api-secret.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fastly-api-secret.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "fastly-api-secret.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "fastly-api-secret.labels" -}}
+helm.sh/chart: {{ include "fastly-api-secret.chart" . }}
+{{ include "fastly-api-secret.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "fastly-api-secret.lagoonLabels" . }}
+
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "fastly-api-secret.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "fastly-api-secret.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "fastly-api-secret.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "fastly-api-secret.annotations" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/secret.yaml b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/secret.yaml
new file mode 100644
index 0000000000..155623f80a
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/templates/secret.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "fastly-api-secret.fullname" . }}
+ labels:
+ {{- include "fastly-api-secret.labels" . | nindent 4 }}
+ annotations:
+ {{- include "fastly-api-secret.annotations" . | nindent 4 }}
+ {{- with .Values.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+stringData:
+ api-token: {{ .Values.fastly.apiToken }}
+ platform-tls-configuration: {{ .Values.fastly.platformTLSConfiguration }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/values.yaml
new file mode 100644
index 0000000000..5e79ee471f
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/fastly-api-secret/values.yaml
@@ -0,0 +1,7 @@
+# Default values for fastly-api-secret.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# fastly:
+# apiToken: ''
+# platformTLSConfiguration: ''
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/Chart.yaml
index 4dae308ab1..6ca616e8bd 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/Chart.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/Chart.yaml
@@ -14,4 +14,4 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
-version: 0.1.0
\ No newline at end of file
+version: 0.2.0
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/templates/schedule.yaml b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/templates/schedule.yaml
index 3849cd2f57..e76c51b08e 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/templates/schedule.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/templates/schedule.yaml
@@ -12,13 +12,14 @@ spec:
key: repo-pw
name: baas-repo-pw
s3:
- bucket: baas-{{ .Values.project }}
+ bucket: '{{ .Values.baasBucketName }}'
backup:
schedule: '{{ .Values.backup.schedule }}'
check:
schedule: '{{ .Values.check.schedule }}'
prune:
retention:
- keepDaily: 7
- keepWeekly: 6
+ keepDaily: {{ .Values.prune.retention.keepDaily }}
+ keepWeekly: {{ .Values.prune.retention.keepWeekly }}
+ keepMonthly: {{ .Values.prune.retention.keepMonthly }}
schedule: '{{ .Values.prune.schedule }}'
diff --git a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/values.yaml
index c809a8d6ad..611a4a9d16 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/k8up-schedule/values.yaml
@@ -11,4 +11,8 @@ check:
schedule: '0 * * * 0'
prune:
+ retention:
+ keepDaily: 7
+ keepWeekly: 6
+ keepMonthly: 1
schedule: '0 * * * 6'
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mariadb-dbaas/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/mariadb-dbaas/templates/prebackuppod.yaml
index 56fd7d8d7f..e41d00d0d9 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/mariadb-dbaas/templates/prebackuppod.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/mariadb-dbaas/templates/prebackuppod.yaml
@@ -38,7 +38,7 @@ spec:
containers:
- args:
- sleep
- - '3600'
+ - infinity
env:
- name: BACKUP_DB_HOST
valueFrom:
@@ -60,7 +60,7 @@ spec:
configMapKeyRef:
key: {{ include "mariadb-dbaas.fullnameUppercase" . }}_DATABASE
name: lagoon-env
- image: amazeeio/alpine-mysql-client
+ image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: {{ include "mariadb-dbaas.fullname" . }}-prebackuppod
{{ end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/deployment.yaml
index 6b80482742..b50bb073e7 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/deployment.yaml
@@ -5,9 +5,7 @@ metadata:
labels:
{{- include "mariadb-single.labels" . | nindent 4 }}
annotations:
- {{- include "mariadb-single.annotations" . | nindent 4 -}}
- appuio.ch/backupcommand: /bin/sh -c "mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --all-databases"
- backup.appuio.ch/file-extension: .{{ include "mariadb-single.fullname" . }}.sql
+ {{- include "mariadb-single.annotations" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
@@ -21,6 +19,8 @@ spec:
{{- include "mariadb-single.labels" . | nindent 8 }}
annotations:
{{- include "mariadb-single.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupcommand: /bin/sh -c "mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --all-databases"
+ k8up.syn.tools/file-extension: .{{ include "mariadb-single.fullname" . }}.sql
lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
spec:
{{- with .Values.imagePullSecrets }}
@@ -32,11 +32,9 @@ spec:
persistentVolumeClaim:
claimName: {{ include "mariadb-single.fullname" . }}
securityContext:
- fsGroup: 0
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
- securityContext:
- runAsGroup: 0
image: "{{ .Values.image }}"
imagePullPolicy: {{ .Values.imagePullPolicy }}
env:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/pvc.yaml
index 8cf7a2a68e..2de9304bb3 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/mariadb-single/templates/pvc.yaml
@@ -7,7 +7,7 @@ metadata:
annotations:
{{- include "mariadb-single.annotations" . | nindent 4 }}
annotations:
- appuio.ch/backup: "false"
+ k8up.syn.tools/backup: "false"
spec:
accessModes:
- ReadWriteOnce
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/Chart.yaml
new file mode 100644
index 0000000000..a9c381b641
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+name: mongodb-dbaas
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/_helpers.tpl
new file mode 100644
index 0000000000..a8487815ac
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/_helpers.tpl
@@ -0,0 +1,79 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "mongodb-dbaas.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "mongodb-dbaas.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "mongodb-dbaas.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "mongodb-dbaas.autogeneratedHost" -}}
+{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "mongodb-dbaas.fullnameUppercase" -}}
+{{ include "mongodb-dbaas.fullname" . | upper | replace "-" "_" }}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "mongodb-dbaas.labels" -}}
+helm.sh/chart: {{ include "mongodb-dbaas.chart" . }}
+{{ include "mongodb-dbaas.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "mongodb-dbaas.lagoonLabels" . }}
+
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "mongodb-dbaas.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "mongodb-dbaas.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "mongodb-dbaas.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "mongodb-dbaas.annotations" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/dbaas.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/dbaas.yaml
new file mode 100644
index 0000000000..84a1b3e447
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/dbaas.yaml
@@ -0,0 +1,11 @@
+apiVersion: mongodb.amazee.io/v1
+kind: MongoDBConsumer
+metadata:
+ name: {{ include "mongodb-dbaas.fullname" . }}
+ labels:
+ {{- include "mongodb-dbaas.labels" . | nindent 4 }}
+ annotations:
+ {{- include "mongodb-dbaas.annotations" . | nindent 4 }}
+
+spec:
+ environment: {{ .Values.environment}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/prebackuppod.yaml
new file mode 100644
index 0000000000..4f893420ac
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/templates/prebackuppod.yaml
@@ -0,0 +1,67 @@
+{{ if .Capabilities.APIVersions.Has "backup.appuio.ch/v1alpha1/PreBackupPod" }}
+apiVersion: backup.appuio.ch/v1alpha1
+kind: PreBackupPod
+metadata:
+ name: {{ include "mongodb-dbaas.fullname" . }}-prebackuppod
+ labels:
+ {{- include "mongodb-dbaas.labels" . | nindent 4 }}
+ annotations:
+ {{- include "mongodb-dbaas.annotations" . | nindent 4 }}
+spec:
+ backupCommand: /bin/sh -c "mongodump --uri=mongodb://${BACKUP_DB_USER}:${BACKUP_DB_PASSWORD}@${BACKUP_DB_HOST}:${BACKUP_DB_PORT}/${BACKUP_DB_NAME}?ssl=true&sslInsecure=true&tls=true&tlsInsecure=true --archive"
+ fileExtension: .{{ include "mongodb-dbaas.fullname" . }}.bson
+ pod:
+ metadata:
+ labels:
+ prebackuppod: {{ include "mongodb-dbaas.fullname" . }}
+ {{- include "mongodb-dbaas.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - args:
+ - sleep
+ - infinity
+ env:
+ - name: BACKUP_DB_HOST
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_HOST
+ name: lagoon-env
+ - name: BACKUP_DB_USER
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_USER
+ name: lagoon-env
+ - name: BACKUP_DB_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_PASSWORD
+ name: lagoon-env
+ - name: BACKUP_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_DB_NAME
+ name: lagoon-env
+ - name: BACKUP_DB_PORT
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_DB_PORT
+ name: lagoon-env
+ - name: BACKUP_DB_AUTHSOURCE
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_DB_AUTHSOURCE
+ name: lagoon-env
+ - name: BACKUP_DB_AUTHMECHANISM
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_DB_AUTHMECHANISM
+ name: lagoon-env
+ - name: BACKUP_DB_AUTHTLS
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "mongodb-dbaas.fullnameUppercase" . }}_DB_AUTHTLS
+ name: lagoon-env
+ image: imagecache.amazeeio.cloud/uselagoon/php-8.0-cli
+ imagePullPolicy: Always
+ name: {{ include "mongodb-dbaas.fullname" . }}-prebackuppod
+{{ end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/values.yaml
new file mode 100644
index 0000000000..da55d1ce5c
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-dbaas/values.yaml
@@ -0,0 +1,5 @@
+# Default values for nginx.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+environment: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/Chart.yaml
new file mode 100644
index 0000000000..2b2cce9222
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: mongodb-single
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application.
+appVersion: 1.16.0
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/_helpers.tpl
new file mode 100644
index 0000000000..b52874eba9
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/_helpers.tpl
@@ -0,0 +1,91 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "mongodb-single.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "mongodb-single.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "mongodb-single.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "mongodb-single.autogeneratedHost" -}}
+{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "mongodb-single.labels" -}}
+helm.sh/chart: {{ include "mongodb-single.chart" . }}
+{{ include "mongodb-single.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "mongodb-single.lagoonLabels" . }}
+{{- end -}}
+
+{{/*
+Add annotations
+*/}}
+{{- define "mongodb-single.annotations" -}}
+{{ if .Values.annotations }}
+{{- toYaml .Values.annotations }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "mongodb-single.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "mongodb-single.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create a PriorityClassName.
+(this is based on the Lagoon Environment Type)).
+*/}}
+{{- define "mongodb-single.lagoonPriority" -}}
+{{- printf "lagoon-priority-%s" .Values.environmentType }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "mongodb-single.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "mongodb-single" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/cronjob.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/cronjob.yaml
new file mode 100644
index 0000000000..8859436802
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/cronjob.yaml
@@ -0,0 +1,78 @@
+{{- range $cronjobName, $cronjobConfig := .Values.nativeCronjobs }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ labels:
+ {{- include "mongodb-single.labels" $ | nindent 4 }}
+ annotations:
+ {{- include "mongodb-single.annotations" $ | nindent 4 }}
+spec:
+ schedule: {{ $cronjobConfig.schedule | quote }}
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 0
+ failedJobsHistoryLimit: 1
+ jobTemplate:
+ metadata:
+ labels:
+ {{- include "mongodb-single.labels" $ | nindent 8 }}
+ annotations:
+ {{- include "mongodb-single.annotations" $ | nindent 8 }}
+ spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ labels:
+ {{- include "mongodb-single.labels" $ | nindent 12 }}
+ annotations:
+ {{- include "mongodb-single.annotations" $ | nindent 12 }}
+ spec:
+ {{- with $.Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ priorityClassName: {{ include "mongodb-single.lagoonPriority" $ }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml $.Values.podSecurityContext | nindent 12 }}
+ volumes:
+ - name: {{ include "mongodb-single.fullname" $ }}
+ persistentVolumeClaim:
+ claimName: {{ include "mongodb-single.fullname" $ }}
+ containers:
+ - image: {{ $.Values.image | quote }}
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ securityContext:
+ {{- toYaml $.Values.securityContext | nindent 16 }}
+ imagePullPolicy: {{ $.Values.imagePullPolicy }}
+ command:
+ - /lagoon/cronjob.sh
+ - {{ $cronjobConfig.command }}
+ env:
+ - name: LAGOON_GIT_SHA
+ value: {{ $.Values.gitSha | quote }}
+ - name: SERVICE_NAME
+ value: {{ $.Release.Name | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ resources:
+ {{- toYaml $.Values.resources | nindent 16 }}
+ volumeMounts:
+ - name: {{ include "mongodb-single.fullname" $ }}
+ mountPath: {{ $.Values.persistentStorage.path | quote }}
+ restartPolicy: Never
+ {{- with $.Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.affinity }}
+ affinity:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.tolerations }}
+ tolerations:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/deployment.yaml
new file mode 100644
index 0000000000..3e29659f2d
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/deployment.yaml
@@ -0,0 +1,72 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "mongodb-single.fullname" . }}
+ labels:
+ {{- include "mongodb-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "mongodb-single.annotations" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ {{- include "mongodb-single.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "mongodb-single.labels" . | nindent 8 }}
+ annotations:
+ {{- include "mongodb-single.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupcommand: /bin/sh -c '/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path | quote }} .'
+ k8up.syn.tools/file-extension: .{{ include "mongodb-single.fullname" . }}.tar
+ lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "mongodb-single.fullname" . }}
+ persistentVolumeClaim:
+ claimName: {{ include "mongodb-single.fullname" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ env:
+ - name: CRONJOBS
+ value: {{ .Values.inPodCronjobs | quote }}
+ - name: LAGOON_GIT_SHA
+ value: {{ .Values.gitSha | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ ports:
+ - containerPort: 27017
+ protocol: TCP
+ readinessProbe:
+ tcpSocket:
+ port: 27017
+ initialDelaySeconds: 1
+ timeoutSeconds: 1
+ volumeMounts:
+ - name: {{ include "mongodb-single.fullname" . }}
+ mountPath: {{ .Values.persistentStorage.path | quote }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/pvc.yaml
new file mode 100644
index 0000000000..f918b19a9e
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/pvc.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: "{{ include "mongodb-single.fullname" . }}"
+ labels:
+ {{- include "mongodb-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "mongodb-single.annotations" . | nindent 4 }}
+ annotations:
+ k8up.syn.tools/backup: "false"
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.persistentStorage.size | quote }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/service.yaml
new file mode 100644
index 0000000000..e0f8d76dbc
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "mongodb-single.fullname" . }}
+ labels:
+ {{- include "mongodb-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "mongodb-single.annotations" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: 27017
+ protocol: TCP
+ name: 27017-tcp
+ selector:
+ {{- include "mongodb-single.selectorLabels" . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/values.yaml
new file mode 100644
index 0000000000..f458c5dde2
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/mongodb-single/values.yaml
@@ -0,0 +1,75 @@
+# Default values for mongodb-single.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image: ""
+environmentType: production
+persistentStorage:
+ size: 5Gi
+ path: "/data/db"
+
+imagePullPolicy: Always
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 27017
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: mongodb-single
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+annotations: {}
+
+resources:
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+inPodCronjobs: ""
+
+tls_acme: false
+routesAutogenerateInsecure: Allow
+
+configMapSha: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
index 7783be5ffc..7a247eea11 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "nginx-php-persistent.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Generate name of Persistent Storage
Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/ingress.yaml
index 0bbdf51538..b52ad50766 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "nginx-php-persistent.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "nginx-php-persistent.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
index 286c758d02..46fef5dc39 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/templates/pvc.yaml
@@ -7,7 +7,7 @@ metadata:
annotations:
{{- include "nginx-php-persistent.annotations" . | nindent 4 }}
annotations:
- appuio.ch/backup: "true"
+ k8up.syn.tools/backup: "true"
spec:
accessModes:
- ReadWriteMany
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/values.yaml
index 003caaf163..e524d379e2 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php-persistent/values.yaml
@@ -2,7 +2,7 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-replicaCount:
+replicaCount: 1
images:
nginx: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/_helpers.tpl
index 7562523676..553f541623 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "nginx-php.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/ingress.yaml
index d993810ee6..4042a541d3 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx-php/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "nginx-php.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "nginx-php.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/_helpers.tpl
index 623d96ec7f..5925a431cf 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "nginx.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/ingress.yaml
index e241b8e1ed..35f0f41237 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/nginx/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "nginx.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "nginx.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
index 738ebe437b..6f69143d58 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "node-persistent.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Generate name of Persistent Storage
Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/ingress.yaml
index 90d7ccbea5..8368f4e6ac 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "node-persistent.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "node-persistent.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
index 97adc712a7..beea490c13 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/templates/pvc.yaml
@@ -5,11 +5,12 @@ metadata:
labels:
{{- include "node-persistent.labels" . | nindent 4 }}
annotations:
- appuio.ch/backup: "true"
+ k8up.syn.tools/backup: "true"
{{- include "node-persistent.annotations" . | nindent 4 }}
spec:
accessModes:
- - ReadWriteOnce
+ - ReadWriteMany
+ storageClassName: bulk
resources:
requests:
storage: {{ .Values.persistentStorage.size | quote }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/values.yaml
index 2de3dc9f82..474ee70e55 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node-persistent/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node-persistent/values.yaml
@@ -2,7 +2,7 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-replicaCount:
+replicaCount: 1
image: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/node/templates/_helpers.tpl
index 13495ff66d..2dfa04cb6c 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/node/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "node.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/node/templates/ingress.yaml
index 8e3e3a89ac..19b586ec0a 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "node.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "node.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/node/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/node/values.yaml
index a98c59f0cf..46c94380e2 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/node/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/node/values.yaml
@@ -2,7 +2,7 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-replicaCount:
+replicaCount: 1
image: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/Chart.yaml
new file mode 100644
index 0000000000..d14919043e
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: postgres-dbaas
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application.
+appVersion: 1.16.0
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/_helpers.tpl
new file mode 100644
index 0000000000..d8680f0a1b
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/_helpers.tpl
@@ -0,0 +1,87 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "postgres-dbaas.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgres-dbaas.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "postgres-dbaas.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "postgres-dbaas.autogeneratedHost" -}}
+{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{- end -}}
+
+{{- define "postgres-dbaas.fullnameUppercase" -}}
+{{ include "postgres-dbaas.fullname" . | upper | replace "-" "_" }}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "postgres-dbaas.labels" -}}
+helm.sh/chart: {{ include "postgres-dbaas.chart" . }}
+{{ include "postgres-dbaas.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "postgres-dbaas.lagoonLabels" . }}
+{{- end -}}
+
+{{/*
+Add annotations
+*/}}
+{{- define "postgres-dbaas.annotations" -}}
+{{ if .Values.annotations }}
+{{- toYaml .Values.annotations }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "postgres-dbaas.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "postgres-dbaas.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "postgres-dbaas.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "postgres-dbaas" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/dbaas.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/dbaas.yaml
new file mode 100644
index 0000000000..231c05a628
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/dbaas.yaml
@@ -0,0 +1,11 @@
+apiVersion: postgres.amazee.io/v1
+kind: PostgreSQLConsumer
+metadata:
+ name: {{ include "postgres-dbaas.fullname" . }}
+ labels:
+ {{- include "postgres-dbaas.labels" . | nindent 4 }}
+ annotations:
+ {{- include "postgres-dbaas.annotations" . | nindent 4 }}
+
+spec:
+ environment: {{ .Values.environment}}
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/prebackuppod.yaml
new file mode 100644
index 0000000000..39f509e896
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/templates/prebackuppod.yaml
@@ -0,0 +1,52 @@
+{{ if .Capabilities.APIVersions.Has "backup.appuio.ch/v1alpha1/PreBackupPod" }}
+apiVersion: backup.appuio.ch/v1alpha1
+kind: PreBackupPod
+metadata:
+ name: {{ include "postgres-dbaas.fullname" . }}-prebackuppod
+ labels:
+ {{- include "postgres-dbaas.labels" . | nindent 4 }}
+ annotations:
+ {{- include "postgres-dbaas.annotations" . | nindent 4 }}
+spec:
+ backupCommand: /bin/sh -c "PGPASSWORD=$BACKUP_DB_PASSWORD pg_dump --host=$BACKUP_DB_HOST --port=$BACKUP_DB_PORT --dbname=$BACKUP_DB_NAME --format=t -w"
+ fileExtension: .{{ include "postgres-dbaas.fullname" . }}.tar
+ pod:
+ metadata:
+ labels:
+ prebackuppod: {{ include "postgres-dbaas.fullname" . }}
+ {{- include "postgres-dbaas.labels" . | nindent 8 }}
+ spec:
+ containers:
+ - args:
+ - sleep
+ - infinity
+ env:
+ - name: BACKUP_DB_HOST
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "postgres-dbaas.fullnameUppercase" . }}_HOST
+ name: lagoon-env
+ - name: BACKUP_DB_PORT
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "postgres-dbaas.fullnameUppercase" . }}_PORT
+ name: lagoon-env
+ - name: BACKUP_DB_USERNAME
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "postgres-dbaas.fullnameUppercase" . }}_USERNAME
+ name: lagoon-env
+ - name: BACKUP_DB_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "postgres-dbaas.fullnameUppercase" . }}_PASSWORD
+ name: lagoon-env
+ - name: BACKUP_DB_NAME
+ valueFrom:
+ configMapKeyRef:
+ key: {{ include "postgres-dbaas.fullnameUppercase" . }}_NAME
+ name: lagoon-env
+ image: imagecache.amazeeio.cloud/uselagoon/php-8.0-cli
+ imagePullPolicy: Always
+ name: {{ include "postgres-dbaas.fullname" . }}-prebackuppod
+{{ end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/values.yaml
new file mode 100644
index 0000000000..730bcf19b9
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-dbaas/values.yaml
@@ -0,0 +1,5 @@
+# Default values for postgres-dbaas.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+environment: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/Chart.yaml
new file mode 100644
index 0000000000..3d92547a07
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/Chart.yaml
@@ -0,0 +1,21 @@
+apiVersion: v2
+name: postgres-single
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application.
+appVersion: 1.16.0
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/_helpers.tpl
new file mode 100644
index 0000000000..06529fe3b5
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/_helpers.tpl
@@ -0,0 +1,91 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "postgres-single.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "postgres-single.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "postgres-single.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "postgres-single.autogeneratedHost" -}}
+{{- printf "%s.%s" .Release.Name .Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "postgres-single.labels" -}}
+helm.sh/chart: {{ include "postgres-single.chart" . }}
+{{ include "postgres-single.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "postgres-single.lagoonLabels" . }}
+{{- end -}}
+
+{{/*
+Add annotations
+*/}}
+{{- define "postgres-single.annotations" -}}
+{{ if .Values.annotations }}
+{{- toYaml .Values.annotations }}
+{{- end }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "postgres-single.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "postgres-single.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create a PriorityClassName.
+(this is based on the Lagoon Environment Type)).
+*/}}
+{{- define "postgres-single.lagoonPriority" -}}
+{{- printf "lagoon-priority-%s" .Values.environmentType }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "postgres-single.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "postgres-single" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/cronjob.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/cronjob.yaml
new file mode 100644
index 0000000000..2baf28c650
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/cronjob.yaml
@@ -0,0 +1,78 @@
+{{- range $cronjobName, $cronjobConfig := .Values.nativeCronjobs }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ labels:
+ {{- include "postgres-single.labels" $ | nindent 4 }}
+ annotations:
+ {{- include "postgres-single.annotations" $ | nindent 4 }}
+spec:
+ schedule: {{ $cronjobConfig.schedule | quote }}
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 0
+ failedJobsHistoryLimit: 1
+ jobTemplate:
+ metadata:
+ labels:
+ {{- include "postgres-single.labels" $ | nindent 8 }}
+ annotations:
+ {{- include "postgres-single.annotations" $ | nindent 8 }}
+ spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ labels:
+ {{- include "postgres-single.labels" $ | nindent 12 }}
+ annotations:
+ {{- include "postgres-single.annotations" $ | nindent 12 }}
+ spec:
+ {{- with $.Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ priorityClassName: {{ include "postgres-single.lagoonPriority" $ }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml $.Values.podSecurityContext | nindent 12 }}
+ volumes:
+ - name: {{ include "postgres-single.fullname" $ }}
+ persistentVolumeClaim:
+ claimName: {{ include "postgres-single.fullname" $ }}
+ containers:
+ - image: {{ $.Values.image | quote }}
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ securityContext:
+ {{- toYaml $.Values.securityContext | nindent 16 }}
+ imagePullPolicy: {{ $.Values.imagePullPolicy }}
+ command:
+ - /lagoon/cronjob.sh
+ - {{ $cronjobConfig.command }}
+ env:
+ - name: LAGOON_GIT_SHA
+ value: {{ $.Values.gitSha | quote }}
+ - name: SERVICE_NAME
+ value: {{ $.Release.Name | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ resources:
+ {{- toYaml $.Values.resources | nindent 16 }}
+ volumeMounts:
+ - name: {{ include "postgres-single.fullname" $ }}
+ mountPath: {{ $.Values.persistentStorage.path | quote }}
+ restartPolicy: Never
+ {{- with $.Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.affinity }}
+ affinity:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.tolerations }}
+ tolerations:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/deployment.yaml
new file mode 100644
index 0000000000..d767b23e71
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/deployment.yaml
@@ -0,0 +1,77 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "postgres-single.fullname" . }}
+ labels:
+ {{- include "postgres-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "postgres-single.annotations" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ strategy:
+ type: Recreate
+ selector:
+ matchLabels:
+ {{- include "postgres-single.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "postgres-single.labels" . | nindent 8 }}
+ annotations:
+ {{- include "postgres-single.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupcommand: /bin/sh -c '/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path | quote }} .'
+ k8up.syn.tools/file-extension: .{{ include "postgres-single.fullname" . }}.tar
+ lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: {{ include "postgres-single.fullname" . }}
+ persistentVolumeClaim:
+ claimName: {{ include "postgres-single.fullname" . }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image }}"
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ env:
+ - name: CRONJOBS
+ value: {{ .Values.inPodCronjobs | quote }}
+ - name: LAGOON_GIT_SHA
+ value: {{ .Values.gitSha | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ ports:
+ - containerPort: 5432
+ protocol: TCP
+ readinessProbe:
+ tcpSocket:
+ port: 5432
+ initialDelaySeconds: 1
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 5432
+ initialDelaySeconds: 120
+ periodSeconds: 5
+ volumeMounts:
+ - name: {{ include "postgres-single.fullname" . }}
+ mountPath: {{ .Values.persistentStorage.path | quote }}
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/pvc.yaml
new file mode 100644
index 0000000000..020ad75883
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/pvc.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: "{{ include "postgres-single.fullname" . }}"
+ labels:
+ {{- include "postgres-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "postgres-single.annotations" . | nindent 4 }}
+ annotations:
+ k8up.syn.tools/backup: "false"
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ .Values.persistentStorage.size | quote }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/service.yaml
new file mode 100644
index 0000000000..55d5e22bf4
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "postgres-single.fullname" . }}
+ labels:
+ {{- include "postgres-single.labels" . | nindent 4 }}
+ annotations:
+ {{- include "postgres-single.annotations" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: 5432
+ protocol: TCP
+ name: 5432-tcp
+ selector:
+ {{- include "postgres-single.selectorLabels" . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/postgres-single/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/values.yaml
new file mode 100644
index 0000000000..b24ad7e506
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/postgres-single/values.yaml
@@ -0,0 +1,75 @@
+# Default values for postgres-single.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image: ""
+environmentType: production
+persistentStorage:
+ size: 5Gi
+ path: "/var/lib/postgresql/data"
+
+imagePullPolicy: Always
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 5432
+
+ingress:
+ enabled: false
+ annotations: {}
+ # kubernetes.io/ingress.class: postgres-single
+ # kubernetes.io/tls-acme: "true"
+ hosts:
+ - host: chart-example.local
+ paths: []
+ tls: []
+ # - secretName: chart-example-tls
+ # hosts:
+ # - chart-example.local
+
+annotations: {}
+
+resources:
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+inPodCronjobs: ""
+
+tls_acme: false
+routesAutogenerateInsecure: Allow
+
+configMapSha: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/.helmignore b/images/kubectl-build-deploy-dind/helmcharts/python/.helmignore
new file mode 100644
index 0000000000..50af031725
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/Chart.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/Chart.yaml
new file mode 100644
index 0000000000..f444d94772
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+name: python
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/python/templates/_helpers.tpl
new file mode 100644
index 0000000000..c2bfa3ce5f
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/templates/_helpers.tpl
@@ -0,0 +1,94 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "python.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "python.fullname" -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "python.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create full hostname for autogenerated hosts
+*/}}
+{{- define "python.autogeneratedHost" -}}
+{{ if not .prefix }}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{ else }}
+{{- printf "%s.%s.%s" .prefix .root.Release.Name .root.Values.routesAutogenerateSuffix | trimSuffix "-" -}}
+{{ end }}
+{{- end -}}
+
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "python.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "python.labels" -}}
+helm.sh/chart: {{ include "python.chart" . }}
+{{ include "python.selectorLabels" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{ include "python.lagoonLabels" . }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "python.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "python.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create a PriorityClassName.
+(this is based on the Lagoon Environment Type)).
+*/}}
+{{- define "python.lagoonPriority" -}}
+{{- printf "lagoon-priority-%s" .Values.environmentType }}
+{{- end -}}
+
+{{/*
+Lagoon Labels
+*/}}
+{{- define "python.lagoonLabels" -}}
+lagoon.sh/service: {{ .Release.Name }}
+lagoon.sh/service-type: {{ .Chart.Name }}
+lagoon.sh/project: {{ .Values.project }}
+lagoon.sh/environment: {{ .Values.environment }}
+lagoon.sh/environmentType: {{ .Values.environmentType }}
+lagoon.sh/buildType: {{ .Values.buildType }}
+{{- end -}}
+
+{{/*
+Annotations
+*/}}
+{{- define "python.annotations" -}}
+lagoon.sh/version: {{ .Values.lagoonVersion | quote }}
+{{- if .Values.branch }}
+lagoon.sh/branch: {{ .Values.branch | quote }}
+{{- end }}
+{{- if .Values.prNumber }}
+lagoon.sh/prNumber: {{ .Values.prNumber | quote }}
+lagoon.sh/prHeadBranch: {{ .Values.prHeadBranch | quote }}
+lagoon.sh/prBaseBranch: {{ .Values.prBaseBranch | quote }}
+{{- end }}
+{{- end -}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/templates/cronjob.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/templates/cronjob.yaml
new file mode 100644
index 0000000000..019eac8d8c
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/templates/cronjob.yaml
@@ -0,0 +1,71 @@
+{{- range $cronjobName, $cronjobConfig := .Values.nativeCronjobs }}
+---
+apiVersion: batch/v1beta1
+kind: CronJob
+metadata:
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ labels:
+ {{- include "python.labels" $ | nindent 4 }}
+ annotations:
+ {{- include "python.annotations" . | nindent 4 }}
+spec:
+ schedule: {{ $cronjobConfig.schedule | quote }}
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 0
+ failedJobsHistoryLimit: 1
+ jobTemplate:
+ metadata:
+ labels:
+ {{- include "python.labels" $ | nindent 8 }}
+ annotations:
+ {{- include "python.annotations" $ | nindent 8 }}
+ spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ labels:
+ {{- include "python.labels" $ | nindent 12 }}
+ annotations:
+ {{- include "python.annotations" $ | nindent 12 }}
+ spec:
+ {{- with $.Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ priorityClassName: {{ include "python.lagoonPriority" . }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml $.Values.podSecurityContext | nindent 12 }}
+ containers:
+ - image: {{ $.Values.image | quote }}
+ name: cronjob-{{ $.Release.Name }}-{{ $cronjobName }}
+ securityContext:
+ {{- toYaml $.Values.securityContext | nindent 16 }}
+ imagePullPolicy: {{ $.Values.imagePullPolicy }}
+ command:
+ - /lagoon/cronjob.sh
+ - {{ $cronjobConfig.command }}
+ env:
+ - name: LAGOON_GIT_SHA
+ value: {{ $.Values.gitSha | quote }}
+ - name: SERVICE_NAME
+ value: {{ $.Release.Name | quote }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ resources:
+ {{- toYaml $.Values.resources | nindent 16 }}
+ restartPolicy: Never
+ {{- with $.Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.affinity }}
+ affinity:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+ {{- with $.Values.tolerations }}
+ tolerations:
+ {{- toYaml $ | nindent 12 }}
+ {{- end }}
+{{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/templates/deployment.yaml
new file mode 100644
index 0000000000..5d36f25d03
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/templates/deployment.yaml
@@ -0,0 +1,73 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "python.fullname" . }}
+ labels:
+ {{- include "python.labels" . | nindent 4 }}
+ annotations:
+ {{- include "python.annotations" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "python.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "python.labels" . | nindent 8 }}
+ annotations:
+ {{- include "python.annotations" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ priorityClassName: {{ include "python.lagoonPriority" . }}
+ enableServiceLinks: false
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ containers:
+ - image: {{ .Values.image | quote }}
+ name: {{ .Chart.Name }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 12 }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8800
+ protocol: TCP
+ readinessProbe:
+ tcpSocket:
+ port: 8800
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 8800
+ initialDelaySeconds: 60
+ periodSeconds: 5
+ env:
+ ## LAGOON_GIT_SHA is injected directly and not loaded via `lagoon-env` config
+ ## This will cause the pod to redeploy on every deployment, even the files have not changed
+ - name: LAGOON_GIT_SHA
+ value: {{ .Values.gitSha | quote }}
+ - name: CRONJOBS
+ value: |
+ {{- toYaml .Values.inPodCronjobs | nindent 16 }}
+ envFrom:
+ - configMapRef:
+ name: lagoon-env
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/templates/ingress.yaml
new file mode 100644
index 0000000000..67bde8c4be
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/templates/ingress.yaml
@@ -0,0 +1,60 @@
+{{- if .Values.routesAutogenerateEnabled -}}
+{{- $fullName := include "python.fullname" . -}}
+---
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ lagoon.sh/autogenerated: "true"
+ {{- include "python.labels" . | nindent 4 }}
+ annotations:
+ {{- if eq .Values.routesAutogenerateInsecure "Allow"}}
+ nginx.ingress.kubernetes.io/ssl-redirect: "false"
+ ingress.kubernetes.io/ssl-redirect: "false"
+ {{- else if eq .Values.routesAutogenerateInsecure "Redirect"}}
+ nginx.ingress.kubernetes.io/ssl-redirect: "true"
+ ingress.kubernetes.io/ssl-redirect: "true"
+ {{- else if eq .Values.routesAutogenerateInsecure "None"}}
+ nginx.ingress.kubernetes.io/ssl-redirect: "true"
+ ingress.kubernetes.io/ssl-redirect: "true"
+ {{- end }}
+ kubernetes.io/tls-acme: "true"
+ {{- include "python.annotations" . | nindent 4 }}
+spec:
+ tls:
+ - hosts:
+ {{- $host := include "python.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "python.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
+ - {{ $host | quote }}
+ {{- if $.Values.routesAutogeneratePrefixes }}
+ {{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
+ {{- $host := include "python.autogeneratedHost" (dict "root" $ "prefix" $prefix) }}
+ - {{ $host | quote }}
+ {{- end }}
+ {{- end }}
+ secretName: {{ $fullName }}-tls
+ rules:
+ {{- $host := include "python.autogeneratedHost" (dict "root" $) }}
+ - host: {{ $host | quote }}
+ http:
+ paths:
+ - backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $.Values.service.port }}
+ {{- if $.Values.routesAutogeneratePrefixes }}
+ {{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
+ {{- $host := include "python.autogeneratedHost" (dict "root" $ "prefix" $prefix) }}
+ - host: {{ $host | quote }}
+ http:
+ paths:
+ - backend:
+ serviceName: {{ $fullName }}
+ servicePort: {{ $.Values.service.port }}
+ {{- end }}
+ {{- end }}
+{{- else }}
+# empty
+{{- end }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/templates/service.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/templates/service.yaml
new file mode 100644
index 0000000000..86073d5df4
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/templates/service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "python.fullname" . }}
+ labels:
+ {{- include "python.labels" . | nindent 4 }}
+ annotations:
+ {{- include "python.annotations" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "python.selectorLabels" . | nindent 4 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/python/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/python/values.yaml
new file mode 100644
index 0000000000..d327798748
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/helmcharts/python/values.yaml
@@ -0,0 +1,48 @@
+# Default values for python.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image: ""
+
+environmentType: production
+
+imagePullPolicy: Always
+
+imagePullSecrets: []
+nameOverride: ""
+fullnameOverride: ""
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+ # capabilities:
+ # drop:
+ # - ALL
+ # readOnlyRootFilesystem: true
+ # runAsNonRoot: true
+ # runAsUser: 1000
+
+service:
+ type: ClusterIP
+ port: 8800
+
+resources:
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ requests:
+ cpu: 10m
+ memory: 10Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+inPodCronjobs: ""
+
+configMapSha: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
index c1fa488359..1c2d75e5de 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/deployment.yaml
@@ -20,6 +20,8 @@ spec:
{{- include "redis-persistent.datadogLabels" . | nindent 8 }}
annotations:
{{- include "redis-persistent.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path }} ."
+ k8up.syn.tools/fileExtension: .{{ include "redis-persistent.fullname" . }}.tar
lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
spec:
{{- with .Values.imagePullSecrets }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
index a2e665a74f..b36bb405ab 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis-persistent/templates/pvc.yaml
@@ -5,7 +5,7 @@ metadata:
labels:
{{- include "redis-persistent.labels" . | nindent 4 }}
annotations:
- appuio.ch/backup: "false"
+ k8up.syn.tools/backup: "false"
{{- include "redis-persistent.annotations" . | nindent 4 }}
spec:
accessModes:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/redis/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/redis/templates/deployment.yaml
index 68fb7fc5d1..25df1f78c1 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/redis/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/redis/templates/deployment.yaml
@@ -9,7 +9,7 @@ metadata:
spec:
replicas: {{ .Values.replicaCount }}
strategy:
- type: Recreate
+ type: RollingUpdate
selector:
matchLabels:
{{- include "redis.selectorLabels" . | nindent 6 }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
index 3940b72a9d..0d319c0bb7 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/deployment.yaml
@@ -20,6 +20,8 @@ spec:
{{- include "solr.datadogLabels" . | nindent 8 }}
annotations:
{{- include "solr.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupcommand: /bin/sh -c '/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path | quote }} .'
+ k8up.syn.tools/file-extension: .{{ include "solr.fullname" . }}.tar
lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
spec:
{{- with .Values.imagePullSecrets }}
@@ -33,7 +35,7 @@ spec:
priorityClassName: {{ include "solr.lagoonPriority" . }}
enableServiceLinks: false
securityContext:
- fsGroup: 0
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- image: {{ .Values.image | quote }}
name: {{ .Chart.Name }}
@@ -52,8 +54,6 @@ spec:
initialDelaySeconds: 90
timeoutSeconds: 3
failureThreshold: 5
- securityContext:
- runAsGroup: 0
envFrom:
- configMapRef:
name: lagoon-env
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/prebackuppod.yaml
deleted file mode 100644
index 6d437693cc..0000000000
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/prebackuppod.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-{{ if .Capabilities.APIVersions.Has "backup.appuio.ch/v1alpha1/PreBackupPod" }}
-apiVersion: backup.appuio.ch/v1alpha1
-kind: PreBackupPod
-metadata:
- name: {{ include "solr.fullname" . }}-prebackuppod
- labels:
- {{- include "solr.labels" . | nindent 4 }}
- annotations:
- {{- include "solr.annotations" . | nindent 4 }}
-spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path }} ."
- fileExtension: .{{ include "solr.fullname" . }}.tar
- pod:
- metadata:
- labels:
- prebackuppod: {{ include "solr.fullname" . }}
- {{- include "solr.labels" . | nindent 8 }}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: lagoon.sh/service
- operator: In
- values:
- - {{ include "solr.fullname" . }}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: {{ include "solr.fullname" . }}-prebackuppod
- volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
- mountPath: {{ .Values.persistentStorage.path | quote }}
- volumes:
- - name: {{ .Values.persistentStorage.name }}
- persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
-{{ end }}
-
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
index 5c9a313b1c..78e6f13d23 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/templates/pvc.yaml
@@ -5,7 +5,7 @@ metadata:
labels:
{{- include "solr.labels" . | nindent 4 }}
annotations:
- appuio.ch/backup: "false"
+ k8up.syn.tools/backup: "false"
{{- include "solr.annotations" . | nindent 4 }}
spec:
accessModes:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
index 825122a659..b0b393e1bc 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/solr/values.yaml
@@ -2,7 +2,7 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-replicaCount:
+replicaCount: 1
image: ""
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
index 22fc0dfa31..01f550d0c3 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "varnish-persistent.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Generate name of Persistent Storage
Uses the Release Name (Lagoon Service Name) unless it's overwritten via .Values.persistentStorage.name
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
index 4dd5f59eec..a781afeff3 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/deployment.yaml
@@ -20,6 +20,8 @@ spec:
{{- include "varnish-persistent.datadogLabels" . | nindent 8 }}
annotations:
{{- include "varnish-persistent.annotations" . | nindent 8 }}
+ k8up.syn.tools/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path }} ."
+ k8up.syn.tools/fileExtension: .{{ include "varnish-persistent.fullname" . }}.tar
lagoon.sh/configMapSha: {{ .Values.configMapSha | quote }}
spec:
{{- with .Values.imagePullSecrets }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/ingress.yaml
index 62a0334083..6f70ebe95a 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "varnish-persistent.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "varnish-persistent.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/prebackuppod.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/prebackuppod.yaml
deleted file mode 100644
index 91d4cbdabb..0000000000
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/prebackuppod.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-{{ if .Capabilities.APIVersions.Has "backup.appuio.ch/v1alpha1/PreBackupPod" }}
-apiVersion: backup.appuio.ch/v1alpha1
-kind: PreBackupPod
-metadata:
- name: {{ include "varnish-persistent.fullname" . }}-prebackuppod
- labels:
- {{- include "varnish-persistent.labels" . | nindent 4 }}
- annotations:
- {{- include "varnish-persistent.annotations" . | nindent 4 }}
-spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C {{ .Values.persistentStorage.path }} ."
- fileExtension: .{{ include "varnish-persistent.fullname" . }}.tar
- pod:
- metadata:
- labels:
- prebackuppod: {{ include "varnish-persistent.fullname" . }}
- {{- include "varnish-persistent.labels" . | nindent 8 }}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: lagoon.sh/service
- operator: In
- values:
- - {{ include "varnish-persistent.fullname" . }}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: {{ include "varnish-persistent.fullname" . }}-prebackuppod
- volumeMounts:
- - name: {{ .Values.persistentStorage.name }}
- mountPath: {{ .Values.persistentStorage.path | quote }}
- volumes:
- - name: {{ .Values.persistentStorage.name }}
- persistentVolumeClaim:
- claimName: {{ .Values.persistentStorage.name }}
-{{ end }}
-
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
index 9e956adfa5..0c715597bc 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish-persistent/templates/pvc.yaml
@@ -5,7 +5,7 @@ metadata:
labels:
{{- include "varnish-persistent.labels" . | nindent 4 }}
annotations:
- appuio.ch/backup: "false"
+ k8up.syn.tools/backup: "false"
{{- include "varnish-persistent.annotations" . | nindent 4 }}
spec:
accessModes:
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/_helpers.tpl b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/_helpers.tpl
index 1a9990e361..78456819da 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/_helpers.tpl
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/_helpers.tpl
@@ -32,6 +32,14 @@ Create full hostname for autogenerated hosts
{{ end }}
{{- end -}}
+{{/*
+Create short hostname for autogenerated hosts.
+This is used to work around problems with long CN fields in certificates.
+*/}}
+{{- define "varnish.autogeneratedShortHost" -}}
+{{- printf "%s.%s" .root.Release.Name .root.Values.routesAutogenerateShortSuffix }}
+{{- end }}
+
{{/*
Common labels
*/}}
diff --git a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/ingress.yaml b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/ingress.yaml
index 43f310f9da..2f158d8831 100644
--- a/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/ingress.yaml
+++ b/images/kubectl-build-deploy-dind/helmcharts/varnish/templates/ingress.yaml
@@ -25,6 +25,9 @@ spec:
tls:
- hosts:
{{- $host := include "varnish.autogeneratedHost" (dict "root" $) }}
+ {{- if and .Values.routesAutogenerateShortSuffix (gt ($host | len) 63) }}
+ - {{ include "varnish.autogeneratedShortHost" (dict "root" $) | quote }}
+ {{- end }}
- {{ $host | quote }}
{{- if $.Values.routesAutogeneratePrefixes }}
{{- range $k, $prefix := $.Values.routesAutogeneratePrefixes }}
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-additional-yaml.sh b/images/kubectl-build-deploy-dind/scripts/exec-additional-yaml.sh
deleted file mode 100755
index 170efa78ed..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-additional-yaml.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash -x
-
-oc process --local -o yaml --insecure-skip-tls-verify \
- -n ${NAMESPACE} \
- -f "${ADDITIONAL_YAML_PATH}" \
- -p SAFE_BRANCH="${SAFE_BRANCH}" \
- -p SAFE_PROJECT="${SAFE_PROJECT}" \
- -p BRANCH="${BRANCH}" \
- -p PROJECT="${PROJECT}" \
- -p LAGOON_GIT_SHA="${LAGOON_GIT_SHA}" \
- -p NAMESPACE=${NAMESPACE} \
- | oc ${ADDITIONAL_YAML_COMMAND} --insecure-skip-tls-verify -n ${NAMESPACE} -f - || ${ADDITIONAL_YAML_IGNORE_ERROR}
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-fastly-annotations.sh b/images/kubectl-build-deploy-dind/scripts/exec-fastly-annotations.sh
new file mode 100755
index 0000000000..39343d2c50
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-fastly-annotations.sh
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+# this script is used to work out the fastly annotation overrides that could be defined in the lagoon api
+
+# if no service id is provided in the `.lagoon.yml` which will be present in `ROUTE_FASTLY_SERVICE_ID`
+if [ -z "$ROUTE_FASTLY_SERVICE_ID" ]; then
+ # then insert the one provided by lagoon in `LAGOON_FASTLY_NOCACHE_SERVICE_ID` if it is available
+ if [ ! -z "$LAGOON_FASTLY_NOCACHE_SERVICE_ID" ]; then
+ ROUTE_FASTLY_SERVICE_ID=$LAGOON_FASTLY_NOCACHE_SERVICE_ID
+ # if the nocache service id was injected by the lagoon builddeploy controller
+ # then set the watch status to true so it is set in the ingress annotations
+ # if the lagoon builddeploy controller has the fastly service injection disabled
+ # then the `LAGOON_FASTLY_NOCACHE_SERVICE_ID` will be empty
+ ROUTE_FASTLY_SERVICE_WATCH=true
+ fi
+fi
+
+# check lagoon api variables for `LAGOON_FASTLY_SERVICE_ID`
+# this is supported as `SERVICE_ID:WATCH_STATUS:SECRET_NAME(optional)` eg: "fa23rsdgsdgas:false", "fa23rsdgsdgas:true" or "fa23rsdgsdgas:true:examplecom"
+# this will apply to ALL ingresses if one is not specifically defined in the `LAGOON_FASTLY_SERVICE_IDS` environment variable override
+# see section `FASTLY SERVICE ID PER INGRESS OVERRIDE` in `build-deploy-docker-compose.sh` for info on `LAGOON_FASTLY_SERVICE_IDS`
+if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ LAGOON_FASTLY_SERVICE_ID_DATA=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_SERVICE_ID") | "\(.value)"'))
+ echo $LAGOON_FASTLY_SERVICE_ID_DATA
+ if [ ! -z "$LAGOON_FASTLY_SERVICE_ID_DATA" ]; then
+ IFS=':' read -ra LAGOON_FASTLY_SERVICE_ID_SPLIT <<< "$LAGOON_FASTLY_SERVICE_ID_DATA"
+ if [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}" ] || [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}" ]; then
+ echo -e "An override was defined in the lagoon API with LAGOON_FASTLY_SERVICE_ID but one of the components was missing, the format should be FASTLY_SERVICE_ID:WATCH_STATUS"
+ exit 1
+ fi
+ LAGOON_FASTLY_SERVICE_ID=${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}
+ LAGOON_FASTLY_SERVICE_WATCH=${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}
+ fi
+fi
+if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_LAGOON_FASTLY_SERVICE_ID_DATA=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_FASTLY_SERVICE_ID") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_FASTLY_SERVICE_ID_DATA ]; then
+ IFS=':' read -ra LAGOON_FASTLY_SERVICE_ID_SPLIT <<< "$TEMP_LAGOON_FASTLY_SERVICE_ID_DATA"
+ if [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}" ] || [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}" ]; then
+ echo -e "An override was defined in the lagoon API with LAGOON_FASTLY_SERVICE_ID but one of the components was missing, the format should be FASTLY_SERVICE_ID:WATCH_STATUS"
+ exit 1
+ fi
+ LAGOON_FASTLY_SERVICE_ID=${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}
+ LAGOON_FASTLY_SERVICE_WATCH=${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}
+ # if the optional secret name is defined in the colon separated values configure that here
+ if [ ! -z ${LAGOON_FASTLY_SERVICE_ID_SPLIT[2]} ]; then
+ LAGOON_FASTLY_SERVICE_API_SECRET=${LAGOON_FASTLY_SERVICE_ID_SPLIT[2]}
+ fi
+ fi
+fi
+
+# check the `LAGOON_FASTLY_SERVICE_IDS` to see if we have a domain specific override
+# this is useful if all domains are using the nocache service, but you have a specific domain that should use a different service
+# and you haven't defined it in the lagoon.yml file
+# see section `FASTLY SERVICE ID PER INGRESS OVERRIDE` in `build-deploy-docker-compose.sh` for info on `LAGOON_FASTLY_SERVICE_IDS`
+if [ ! -z "$LAGOON_FASTLY_SERVICE_IDS" ]; then
+ IFS=',' read -ra LAGOON_FASTLY_SERVICE_IDS_SPLIT <<< "$LAGOON_FASTLY_SERVICE_IDS"
+ for LAGOON_FASTLY_SERVICE_ID_DATA in "${LAGOON_FASTLY_SERVICE_IDS_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_FASTLY_SERVICE_ID_SPLIT <<< "$LAGOON_FASTLY_SERVICE_ID_DATA"
+ if [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}" ] || [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}" ] || [ -z "${LAGOON_FASTLY_SERVICE_ID_SPLIT[2]}" ]; then
+ echo -e "An override was defined in the lagoon API with LAGOON_FASTLY_SERVICE_IDS but was not structured correctly, the format should be DOMAIN_NAME:FASTLY_SERVICE_ID:WATCH_STATUS and comma separated for multiples"
+ exit 1
+ fi
+ if [ "${LAGOON_FASTLY_SERVICE_ID_SPLIT[0]}" == "$ROUTE_DOMAIN" ]; then
+ LAGOON_FASTLY_SERVICE_ID=${LAGOON_FASTLY_SERVICE_ID_SPLIT[1]}
+ LAGOON_FASTLY_SERVICE_WATCH=${LAGOON_FASTLY_SERVICE_ID_SPLIT[2]}
+ # if the optional secret name is defined in the colon separated values configure that here
+ if [ ! -z ${LAGOON_FASTLY_SERVICE_ID_SPLIT[3]} ]; then
+ LAGOON_FASTLY_SERVICE_API_SECRET=${LAGOON_FASTLY_SERVICE_ID_SPLIT[3]}
+ fi
+ fi
+ done
+fi
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-fastly-api-secrets.sh b/images/kubectl-build-deploy-dind/scripts/exec-fastly-api-secrets.sh
new file mode 100755
index 0000000000..40d8e3fb4d
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-fastly-api-secrets.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# this script is used to create/update the fastly api secrets
+
+helm template ${FASTLY_API_SECRET_NAME} \
+ /kubectl-build-deploy/helmcharts/fastly-api-secret \
+ --set fastly.apiToken="${FASTLY_API_TOKEN}" \
+ --set fastly.platformTLSConfiguration="${FASTLY_API_PLATFORMTLS_CONFIGURATION}" \
+ -f /kubectl-build-deploy/values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/00-${FASTLY_API_SECRET_NAME}.yaml
+ ## this api secret needs to exist before the ingress is created, so try prioritise it by putting it numerically ahead of any ingresses
+
+# add the name to the array because it will be used during the ingress steps to ensure that the secret will exist before annotating any
+# ingresses that may want to use it
+FASTLY_API_SECRETS+=(${FASTLY_API_SECRET_NAME})
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mongodb-dbaas.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mongodb-dbaas.sh
new file mode 100644
index 0000000000..85b7a21335
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-mongodb-dbaas.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# The operator can sometimes take a bit, wait until the details are available
+# We added a timeout of 10 minutes (120 retries) before exit
+OPERATOR_COUNTER=1
+OPERATOR_TIMEOUT=180
+# use the secret name from the consumer to prevent credential clash
+until kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database
+do
+if [ $OPERATOR_COUNTER -lt $OPERATOR_TIMEOUT ]; then
+ let OPERATOR_COUNTER=OPERATOR_COUNTER+1
+ echo "Service for ${SERVICE_NAME} not available yet, waiting for 5 secs"
+ sleep 5
+else
+ echo "Timeout of $OPERATOR_TIMEOUT for ${SERVICE_NAME} creation reached"
+ exit 1
+fi
+done
+set +x
+# Grab the details from the consumer spec
+DB_HOST=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.services.primary)
+DB_USER=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.username)
+DB_PASSWORD=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.password)
+DB_NAME=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database)
+DB_PORT=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.port)
+DB_AUTHSOURCE=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.source)
+DB_AUTHMECHANISM=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.mechanism)
+DB_AUTHTLS=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.tls)
+
+# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
+kubectl patch --insecure-skip-tls-verify \
+ -n ${NAMESPACE} \
+ configmap lagoon-env \
+ -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\", \"${SERVICE_NAME_UPPERCASE}_AUTHSOURCE\":\"${DB_AUTHSOURCE}\", \"${SERVICE_NAME_UPPERCASE}_AUTHMECHANISM\":\"${DB_AUTHMECHANISM}\", \"${SERVICE_NAME_UPPERCASE}_AUTHTLS\":\"${DB_AUTHTLS}\" }}"
+
+set -x
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh
new file mode 100644
index 0000000000..319f6f30e1
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-kubectl-postgres-dbaas.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# The operator can sometimes take a bit, wait until the details are available
+# We added a timeout of 10 minutes (120 retries) before exit
+OPERATOR_COUNTER=1
+OPERATOR_TIMEOUT=180
+# use the secret name from the consumer to prevent credential clash
+until kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database
+do
+if [ $OPERATOR_COUNTER -lt $OPERATOR_TIMEOUT ]; then
+ let SERVICE_BROKER_COUNTER=SERVICE_BROKER_COUNTER+1
+ echo "Service for ${SERVICE_NAME} not available yet, waiting for 5 secs"
+ sleep 5
+else
+ echo "Timeout of $OPERATOR_TIMEOUT for ${SERVICE_NAME} creation reached"
+ exit 1
+fi
+done
+set +x
+# Grab the details from the consumer spec
+DB_HOST=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.services.primary)
+DB_USER=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.username)
+DB_PASSWORD=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.password)
+DB_NAME=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database)
+DB_PORT=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.port)
+
+# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
+kubectl patch --insecure-skip-tls-verify \
+ -n ${NAMESPACE} \
+ configmap lagoon-env \
+ -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}"
+
+# only add the DB_READREPLICA_HOSTS variable if it exists in the consumer spec
+# since the operator can support multiple replica hosts being defined, we should comma seperate them here
+if DB_READREPLICA_HOSTS=$(kubectl --insecure-skip-tls-verify -n ${NAMESPACE} get postgresqlconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.services.replicas); then
+ DB_READREPLICA_HOSTS=$(echo $DB_READREPLICA_HOSTS | cut -c 3- | rev | cut -c 1- | rev | sed 's/^\|$//g' | paste -sd, -)
+ kubectl patch --insecure-skip-tls-verify \
+ -n ${NAMESPACE} \
+ configmap lagoon-env \
+ -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_READREPLICA_HOSTS\":\"${DB_READREPLICA_HOSTS}\"}}"
+fi
+
+set -x
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-copy-to-registry.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-copy-to-registry.sh
new file mode 100644
index 0000000000..86d29b567f
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-copy-to-registry.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+skopeo copy --dest-tls-verify=false docker://${IMAGECACHE_REGISTRY}/${PULL_IMAGE} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-pvc.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-pvc.sh
deleted file mode 100644
index 08cb06a8c1..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-pvc.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-# Only generate PVC if it does not exist yet
-if ! oc --insecure-skip-tls-verify -n ${NAMESPACE} get pvc "$PVC_NAME" &> /dev/null; then
- . /oc-build-deploy/scripts/exec-openshift-resources.sh
-fi
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-servicebroker.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-servicebroker.sh
deleted file mode 100755
index 9fad838182..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-create-servicebroker.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Check if the ServiceInstance exists and create if not
-if svcat -n ${NAMESPACE} get instances "${SERVICE_NAME}" &> /dev/null; then
- echo "ServiceInstance ${SERVICE_NAME} already existing, not attempting to update"
-else
- # Provision the Instance
- svcat -n ${NAMESPACE} provision "${SERVICE_NAME}" --class "${SERVICEBROKER_CLASS}" --plan "${SERVICEBROKER_PLAN}"
-fi
-
-# Check if the resulting Secret from the ServiceBinding exists and create if not.
-if oc --insecure-skip-tls-verify -n ${NAMESPACE} get secret "${SERVICE_NAME}-servicebroker-credentials" &> /dev/null; then
- echo "Secret '${SERVICE_NAME}-servicebroker-credentials' already existing, not attempting to update"
-else
- # Sometimes the secret is not existing anymore even though the binding still exists.
- # Not exactly sure yet how and why that happens, but we handle it with unbinding first and then bind again.
- if svcat -n ${NAMESPACE} get bindings "${SERVICE_NAME}-servicebroker-credentials" &> /dev/null; then
- echo "WARNING: Binding '${SERVICE_NAME}-servicebroker-credentials' existing, but the secret not, unbinding and bind again."
- svcat -n ${NAMESPACE} unbind ${SERVICE_NAME} --name "${SERVICE_NAME}-servicebroker-credentials" --wait
- # wait 5 seconds as sometimes the unbinding is not fully through yet
- sleep 5
- fi
- # Create the binding, the secret will be named after the name of the binding.
- svcat -n ${NAMESPACE} bind ${SERVICE_NAME} --name "${SERVICE_NAME}-servicebroker-credentials"
-fi
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-promote.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-promote.sh
new file mode 100644
index 0000000000..307fee7cb5
--- /dev/null
+++ b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-promote.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+skopeo copy --src-tls-verify=false --dest-tls-verify=false docker://${REGISTRY}/${PROJECT}/${PROMOTION_SOURCE_ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest} docker://${REGISTRY}/${PROJECT}/${ENVIRONMENT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-resources.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-resources.sh
deleted file mode 100755
index 0806c4b9a0..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-resources.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-cat /kubectl-build-deploy/values.yaml
-
-helm template ${SERVICE_NAME} /kubectl-build-deploy/helmcharts/${SERVICE_TYPE} -s ${HELM_TEMPLATE} -f /kubectl-build-deploy/values.yaml "${HELM_ARGUMENTS[@]}" > $YAML_FOLDER/${SERVICE_NAME}.yaml
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag-dockerhub.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag-dockerhub.sh
deleted file mode 100644
index c905c0b82b..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag-dockerhub.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-oc --insecure-skip-tls-verify -n ${NAMESPACE} tag --reference-policy=local --source=docker ${PULL_IMAGE} ${NAMESPACE}/${IMAGE_NAME}:latest
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag.sh b/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag.sh
deleted file mode 100644
index 3df9c7f9b8..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-kubernetes-tag.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-oc --insecure-skip-tls-verify -n ${NAMESPACE} tag ${PROMOTION_SOURCE_NAMESPACE}/${IMAGE_NAME}:latest ${NAMESPACE}/${IMAGE_NAME}:latest
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-monitor-daemonset.sh b/images/kubectl-build-deploy-dind/scripts/exec-monitor-daemonset.sh
deleted file mode 100755
index 9daf28e7b1..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-monitor-daemonset.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-# while the rollout of a new daemonset is running we gather the logs of the new generated pods and save them in a known location
-# in case this rollout fails, we show the logs of the new containers to the user as they might contain information about why
-# the rollout has failed
-stream_logs_daemonset() {
- set +x
- # load the generation of the new pods
- GENERATION=$(oc -n ${NAMESPACE} get --insecure-skip-tls-verify daemonset ${DAEMONSET} -o=go-template --template='{{.metadata.generation}}')
- mkdir -p /tmp/oc-build-deploy/logs/container/${DAEMONSET}
-
- # this runs in a loop forever (until killed)
- while [ 1 ]
- do
- # Gather all pods and their containers for the current rollout and stream their logs into files
- oc -n ${NAMESPACE} get --insecure-skip-tls-verify pods -l "pod-template-generation=${GENERATION},service=${DAEMONSET}" -o json | jq -r '.items[] | .metadata.name + " " + .spec.containers[].name' |
- {
- while read -r POD CONTAINER ; do
- oc -n ${NAMESPACE} logs --insecure-skip-tls-verify --timestamps -f $POD -c $CONTAINER 2> /dev/null > /tmp/oc-build-deploy/logs/container/${DAEMONSET}/$POD-$CONTAINER.log &
- done
-
- # this will wait for all log streaming we started to finish
- wait
- }
-
- # If we are here, this means the pods have all stopped (probably because they failed), we just restart
- done
-}
-
-# start background logs streaming
-stream_logs_daemonset &
-STREAM_LOGS_PID=$!
-
-DESIRED_NUMBER=$(oc --insecure-skip-tls-verify -n ${NAMESPACE} get daemonset "${DAEMONSET}" -o=go-template --template='{{.status.desiredNumberScheduled}}')
-MAX_WAIT_SECONDS=600
-END=$((SECONDS+$MAX_WAIT_SECONDS))
-
-while true; do
- if [[ $SECONDS -gt $END ]]; then
- # stop all running stream logs
- pkill -P $STREAM_LOGS_PID || true
-
- # shows all logs we collected for the new containers
- if [ -z "$(ls -A /tmp/oc-build-deploy/logs/container/${DAEMONSET})" ]; then
- echo "Daemonset '${DAEMONSET}' was not fully scaled within $MAX_WAIT_SECONDS seconds, tried to gather some startup logs of the containers, but unfortunately there were none created, sorry."
- else
- echo "Daemonset '${DAEMONSET}' was not fully scaled within $MAX_WAIT_SECONDS seconds, tried to gather some startup logs of the containers, hope this helps debugging:"
- find /tmp/oc-build-deploy/logs/container/${DAEMONSET}/ -type f -print0 2>/dev/null | xargs -0 -I % sh -c 'echo ======== % =========; cat %; echo'
- fi
-
- exit 1
- fi
-
- NUMBER_READY=$(oc --insecure-skip-tls-verify -n ${NAMESPACE} get daemonset "${DAEMONSET}" -o=go-template --template='{{.status.numberReady}}')
- if [[ $NUMBER_READY == $DESIRED_NUMBER ]]; then
- echo "Daemonset '${DAEMONSET}' ready: $NUMBER_READY of $DESIRED_NUMBER ready"
- break
- else
- echo "Daemonset '${DAEMONSET}' not ready yet: $NUMBER_READY of $DESIRED_NUMBER ready, waiting..."
- fi
-
- sleep 10
-done
-
-# stop all running stream logs
-pkill -P $STREAM_LOGS_PID || true
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-monitor-statefulset.sh b/images/kubectl-build-deploy-dind/scripts/exec-monitor-statefulset.sh
deleted file mode 100755
index 3613cfef51..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-monitor-statefulset.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-# while the rollout of a new statefulset is running we gather the logs of the new generated pods and save them in a known location
-# in case this rollout fails, we show the logs of the new containers to the user as they might contain information about why
-# the rollout has failed
-stream_logs_statefulset() {
- set +x
- # load the version of the new pods
- UPDATE_REVISION=$(oc -n ${NAMESPACE} get --insecure-skip-tls-verify statefulset ${STATEFULSET} -o=go-template --template='{{.status.updateRevision}}')
- mkdir -p /tmp/oc-build-deploy/logs/container/${STATEFULSET}
-
- # this runs in a loop forever (until killed)
- while [ 1 ]
- do
- # Gather all pods and their containers for the current statefulset revision and stream their logs into files
- oc -n ${NAMESPACE} get --insecure-skip-tls-verify pods -l controller-revision-hash=${UPDATE_REVISION} -o json | jq -r '.items[] | .metadata.name + " " + .spec.containers[].name' |
- {
- while read -r POD CONTAINER ; do
- oc -n ${NAMESPACE} logs --insecure-skip-tls-verify --timestamps -f $POD -c $CONTAINER 2> /dev/null > /tmp/oc-build-deploy/logs/container/${STATEFULSET}/$POD-$CONTAINER.log &
- done
-
- # this will wait for all log streaming we started to finish
- wait
- }
-
- # If we are here, this means the pods have all stopped (probably because they failed), we just restart
- done
-}
-
-# start background logs streaming
-stream_logs_statefulset &
-STREAM_LOGS_PID=$!
-
-REPLICAS=$(oc --insecure-skip-tls-verify -n ${NAMESPACE} get statefulset "${STATEFULSET}" -o=go-template --template='{{.spec.replicas}}')
-MAX_WAIT_SECONDS=600
-END=$((SECONDS+$MAX_WAIT_SECONDS))
-
-while true; do
- if [[ $SECONDS -gt $END ]]; then
- # stop all running stream logs
- pkill -P $STREAM_LOGS_PID || true
-
- # shows all logs we collected for the new containers
- if [ -z "$(ls -A /tmp/oc-build-deploy/logs/container/${STATEFULSET})" ]; then
- echo "Statefulset '${STATEFULSET}' was not fully scaled within $MAX_WAIT_SECONDS seconds, tried to gather some startup logs of the containers, but unfortunately there were none created, sorry."
- else
- echo "Statefulset '${STATEFULSET}' was not fully scaled within $MAX_WAIT_SECONDS seconds, tried to gather some startup logs of the containers, hope this helps debugging:"
- find /tmp/oc-build-deploy/logs/container/${STATEFULSET}/ -type f -print0 2>/dev/null | xargs -0 -I % sh -c 'echo ======== % =========; cat %; echo'
- fi
-
- exit 1
- fi
-
- READY_REPLICAS=$(oc --insecure-skip-tls-verify -n ${NAMESPACE} get statefulset "${STATEFULSET}" -o=go-template --template='{{.status.readyReplicas}}')
- if [[ $READY_REPLICAS == $REPLICAS ]]; then
- echo "Statefulset '${STATEFULSET}' ready: $READY_REPLICAS of $REPLICAS ready"
- break
- else
- echo "Statefulset '${STATEFULSET}' not ready yet: $READY_REPLICAS of $REPLICAS ready, waiting..."
- fi
-
- sleep 10
-done
-
-# stop all running stream logs
-pkill -P $STREAM_LOGS_PID || true
diff --git a/images/kubectl-build-deploy-dind/scripts/exec-push-parallel-tug.sh b/images/kubectl-build-deploy-dind/scripts/exec-push-parallel-tug.sh
deleted file mode 100755
index 0583775833..0000000000
--- a/images/kubectl-build-deploy-dind/scripts/exec-push-parallel-tug.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-docker tag ${TEMPORARY_IMAGE_NAME} ${REGISTRY}/${TUG_REGISTRY_REPOSITORY}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
-
-echo "docker push ${REGISTRY}/${TUG_REGISTRY_REPOSITORY}/${IMAGE_NAME}:${IMAGE_TAG:-latest}" >> /oc-build-deploy/lagoon/push
-
diff --git a/images/kubectl-build-deploy-dind/tug.sh b/images/kubectl-build-deploy-dind/tug.sh
deleted file mode 100755
index 766a015c23..0000000000
--- a/images/kubectl-build-deploy-dind/tug.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-set -x
-set -eo pipefail
-
-THIS_IS_TUG=true
-
-# Import environment variables with keeping overwritten env variables
-TMPFILE=$(mktemp -t dotenv.XXXXXXXX)
-export -p > $TMPFILE
-
-# set -a is short for `set -o allexport` which will export all variables in a file
-set -a
-. /oc-build-deploy/tug/env
-set +a
-
-# now export all previously existing environments variables so they are stronger than maybe existing ones in the dotenv files
-. $TMPFILE || true
-# remove the tmpfile
-rm $TMPFILE
-
-
-
-REGISTRY=docker-registry.default.svc:5000
-NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
-REGISTRY_REPOSITORY=$NAMESPACE
-
-if [ "$CI" == "true" ]; then
- CI_OVERRIDE_IMAGE_REPO=${REGISTRY}/lagoon
-else
- CI_OVERRIDE_IMAGE_REPO=""
-fi
-
-if [ ! -f .lagoon.yml ]; then
- echo "no .lagoon.yml file found"; exit 1;
-fi
-
-DEPLOYER_TOKEN=$(cat /var/run/secrets/lagoon/deployer/token)
-
-oc login --insecure-skip-tls-verify --token="${DEPLOYER_TOKEN}" https://kubernetes.default.svc
-
-. /oc-build-deploy/build-deploy-docker-compose.sh
diff --git a/images/kubectl-build-deploy-dind/tug/Dockerfile b/images/kubectl-build-deploy-dind/tug/Dockerfile
deleted file mode 100644
index 00b47d05f4..0000000000
--- a/images/kubectl-build-deploy-dind/tug/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-amazeeio}/oc
-
-ENV LAGOON=oc-build-deploy-tug
-
-RUN mkdir -p /oc-build-deploy
-
-# Copying already checked out git repo from oc-build-deploy-dind into tug
-COPY . /oc-build-deploy
-
-WORKDIR /oc-build-deploy/git
-
-
-CMD ["/oc-build-deploy/tug.sh"]
\ No newline at end of file
diff --git a/images/kubectl-build-deploy-dind/tug/tug-build-push.sh b/images/kubectl-build-deploy-dind/tug/tug-build-push.sh
deleted file mode 100644
index 797411a43c..0000000000
--- a/images/kubectl-build-deploy-dind/tug/tug-build-push.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-TUG_REGISTRY=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.tug.registry false)
-TUG_REGISTRY_USERNAME=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.tug.username false)
-TUG_REGISTRY_PASSWORD=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.tug.password false)
-TUG_REGISTRY_REPOSITORY=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.tug.repository false)
-TUG_IMAGE_PREFIX=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.tug.image-prefix '')
-
-
-# Login into TUG registry
-docker login -u="${TUG_REGISTRY_USERNAME}" -p="${TUG_REGISTRY_PASSWORD}" ${TUG_REGISTRY}
-# Overwrite the registry with the tug registry, so Images are pushed to there
-REGISTRY=$TUG_REGISTRY
-REGISTRY_REPOSITORY=$TUG_REGISTRY_REPOSITORY
-
-# Make sure the images in IMAGES_PULL are available and can be tagged for pushing them to the external repository afterwards
-# In order to get the Service Name and the Image we need to get the Keys `${!IMAGES_PULL[@]}` of the Array first to resolve it to the value afterwards ${IMAGES_PULL[${IMAGE_NAME}]}
-
-for PULL_IMAGE_NAME in "${!IMAGES_PULL[@]}"
-do
- PULL_IMAGE="${IMAGES_PULL[${PULL_IMAGE_NAME}]}"
- TEMPORARY_IMAGE_NAME="${NAMESPACE}-${PULL_IMAGE_NAME}"
- docker pull ${PULL_IMAGE}
- docker tag ${PULL_IMAGE} ${TEMPORARY_IMAGE_NAME}
-done
-
-for IMAGE_NAME in "${IMAGES[@]}"
-do
- # Before the push the temporary name is resolved to the future tag with the registry in the image name
- TEMPORARY_IMAGE_NAME="${NAMESPACE}-${IMAGE_NAME}"
- ORIGINAL_IMAGE_NAME="${IMAGE_NAME}"
- IMAGE_NAME="${TUG_IMAGE_PREFIX}${IMAGE_NAME}"
- IMAGE_TAG="${SAFE_BRANCH}"
- . /oc-build-deploy/scripts/exec-push-parallel-tug.sh
- echo "${ORIGINAL_IMAGE_NAME}" >> /oc-build-deploy/tug/images
-done
-
-# Save the current environment variables so the tug deployment can use them
-echo "TYPE=\"${TYPE}\"" >> /oc-build-deploy/tug/env
-echo "SAFE_BRANCH=\"${SAFE_BRANCH}\"" >> /oc-build-deploy/tug/env
-echo "BRANCH=\"${BRANCH}\"" >> /oc-build-deploy/tug/env
-echo "SAFE_PROJECT=\"${SAFE_PROJECT}\"" >> /oc-build-deploy/tug/env
-echo "PROJECT=\"${PROJECT}\"" >> /oc-build-deploy/tug/env
-echo "ROUTER_URL=\"${ROUTER_URL}\"" >> /oc-build-deploy/tug/env
-echo "ENVIRONMENT_TYPE=\"${ENVIRONMENT_TYPE}\"" >> /oc-build-deploy/tug/env
-echo "CI=\"${CI}\"" >> /oc-build-deploy/tug/env
-echo "LAGOON_GIT_SHA=\"${LAGOON_GIT_SHA}\"" >> /oc-build-deploy/tug/env
-echo "TUG_REGISTRY=\"${TUG_REGISTRY}\"" >> /oc-build-deploy/tug/env
-echo "TUG_REGISTRY_USERNAME=\"${TUG_REGISTRY_USERNAME}\"" >> /oc-build-deploy/tug/env
-echo "TUG_REGISTRY_PASSWORD=\"${TUG_REGISTRY_PASSWORD}\"" >> /oc-build-deploy/tug/env
-echo "TUG_REGISTRY_REPOSITORY=\"${TUG_REGISTRY_REPOSITORY}\"" >> /oc-build-deploy/tug/env
-echo "TUG_IMAGE_PREFIX=\"${TUG_IMAGE_PREFIX}\"" >> /oc-build-deploy/tug/env
-
-# build the tug docker image
-IMAGE_NAME="${TUG_IMAGE_PREFIX}lagoon-tug"
-BUILD_CONTEXT="/oc-build-deploy/"
-DOCKERFILE="tug/Dockerfile"
-BUILD_ARGS=()
-BUILD_ARGS+=(--build-arg IMAGE_REPO="${CI_OVERRIDE_IMAGE_REPO}")
-TEMPORARY_IMAGE_NAME="${NAMESPACE}-${IMAGE_NAME}"
-. /oc-build-deploy/scripts/exec-build.sh
-IMAGE_TAG="${SAFE_BRANCH}"
-. /oc-build-deploy/scripts/exec-push-parallel-tug.sh
-
-# If we have Images to Push to the Registry, let's do so
-if [ -f /oc-build-deploy/lagoon/push ]; then
- parallel --retries 4 < /oc-build-deploy/lagoon/push
-fi
diff --git a/images/kubectl/Dockerfile b/images/kubectl/Dockerfile
index 99bc279f48..1cfbb0fdd1 100644
--- a/images/kubectl/Dockerfile
+++ b/images/kubectl/Dockerfile
@@ -1,12 +1,12 @@
-ARG ALPINE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM golang:1.13-alpine${ALPINE_VERSION} as golang
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
+FROM golang:1.13-alpine3.12 as golang
RUN apk add --no-cache git
RUN go get github.com/a8m/envsubst/cmd/envsubst
-FROM docker:19.03.10
+FROM docker:19.03.14
LABEL maintainer="amazee.io"
ENV LAGOON=oc
@@ -34,9 +34,9 @@ ENV TMPDIR=/tmp \
BASH_ENV=/home/.bashrc
# Defining Versions
-ENV KUBECTL_VERSION=v1.16.2 \
- HELM_VERSION=v3.0.0-rc.2 \
- HELM_SHA256=b6fff8e01aa6cd9a4541bd48172bb53b9a0ae38d7e7783a8e0fcc1db63802aaa
+ENV KUBECTL_VERSION=v1.20.4 \
+ HELM_VERSION=v3.5.2 \
+ HELM_SHA256=01b317c506f8b6ad60b11b1dc3f093276bb703281cb1ae01132752253ec706a2
RUN apk add -U --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing aufs-util \
&& apk add --update openssl curl jq parallel \
diff --git a/images/logstash/Dockerfile6 b/images/logstash/Dockerfile6
deleted file mode 100644
index 7963ee0d63..0000000000
--- a/images/logstash/Dockerfile6
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM docker.elastic.co/logstash/logstash:6.8.2
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=logstash
-
-USER root
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN fix-permissions /usr/share/logstash/data \
- && fix-permissions /usr/share/logstash/config
-
-ENV LS_JAVA_OPTS "-Xms400m -Xmx400m"
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash", "/usr/local/bin/docker-entrypoint"]
\ No newline at end of file
diff --git a/images/logstash/Dockerfile7 b/images/logstash/Dockerfile7
deleted file mode 100644
index b2c6c6d4ec..0000000000
--- a/images/logstash/Dockerfile7
+++ /dev/null
@@ -1,39 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM docker.elastic.co/logstash/logstash:7.3.0
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=logstash
-
-USER root
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /home /home
-
-RUN curl -sL https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-# Reproduce behavior of Alpine: Run Bash as sh
-RUN rm -f /bin/sh && ln -s /bin/bash /bin/sh
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN fix-permissions /usr/share/logstash/data \
- && fix-permissions /usr/share/logstash/config
-
-ENV LS_JAVA_OPTS "-Xms400m -Xmx400m"
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash", "/usr/local/bin/docker-entrypoint"]
\ No newline at end of file
diff --git a/images/mariadb-drupal/Dockerfile b/images/mariadb-drupal/Dockerfile
deleted file mode 100644
index cdce5d1b35..0000000000
--- a/images/mariadb-drupal/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/mariadb
-
-ENV MARIADB_DATABASE=drupal \
- MARIADB_USER=drupal \
- MARIADB_PASSWORD=drupal
diff --git a/images/mariadb/Dockerfile b/images/mariadb/Dockerfile
deleted file mode 100644
index bb400b8b70..0000000000
--- a/images/mariadb/Dockerfile
+++ /dev/null
@@ -1,78 +0,0 @@
-ARG ALPINE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM alpine:${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-ENV BACKUPS_DIR="/var/lib/mysql/backup"
-
-ENV MARIADB_DATABASE=lagoon \
- MARIADB_USER=lagoon \
- MARIADB_PASSWORD=lagoon \
- MARIADB_ROOT_PASSWORD=Lag00n
-
-RUN \
- apk add --no-cache --virtual .common-run-deps \
- bash \
- curl \
- mariadb \
- mariadb-client \
- mariadb-common \
- mariadb-server-utils \
- net-tools \
- pwgen \
- tzdata \
- wget \
- gettext; \
- rm -rf /tmp/* /var/tmp/* /var/cache/apk/* /var/cache/distfiles/*; \
- rm -rf /var/lib/mysql/* /etc/mysql/ /etc/my.cnf*; \
- curl -sSL http://mysqltuner.pl/ -o mysqltuner.pl
-
-COPY entrypoints/ /lagoon/entrypoints/
-COPY mysql-backup.sh /lagoon/
-COPY my.cnf /etc/mysql/my.cnf
-
-RUN for i in /var/run/mysqld /var/lib/mysql /etc/mysql/conf.d /docker-entrypoint-initdb.d/ "${BACKUPS_DIR}" /home; \
- do mkdir -p $i; chown mysql $i; /bin/fix-permissions $i; \
- done
-
-COPY root/usr/share/container-scripts/mysql/readiness-probe.sh /usr/share/container-scripts/mysql/readiness-probe.sh
-RUN /bin/fix-permissions /usr/share/container-scripts/mysql/ \
- && /bin/fix-permissions /etc/mysql
-
-RUN touch /var/log/mariadb-slow.log && /bin/fix-permissions /var/log/mariadb-slow.log \
- && touch /var/log/mariadb-queries.log && /bin/fix-permissions /var/log/mariadb-queries.log
-
-# We cannot start mysql as root, we add the user mysql to the group root and
-# change the user of the Docker Image to this user.
-RUN addgroup mysql root
-USER mysql
-ENV USER_NAME mysql
-
-WORKDIR /var/lib/mysql
-VOLUME /var/lib/mysql
-EXPOSE 3306
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-CMD ["mysqld"]
diff --git a/images/mariadb/entrypoints/100-mariadb-logging.bash b/images/mariadb/entrypoints/100-mariadb-logging.bash
deleted file mode 100755
index 976d9218a0..0000000000
--- a/images/mariadb/entrypoints/100-mariadb-logging.bash
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-if [ -n "$MARIADB_LOG_SLOW" ]; then
- echo "MARIADB_LOG_SLOW set, logging to /etc/mysql/conf.d/log-slow.cnf"
- cat < /etc/mysql/conf.d/log-slow.cnf
-[mysqld]
-slow_query_log = 1
-slow_query_log_file = /var/log/mariadb-slow.log
-EOF
-fi
-
-
-if [ -n "$MARIADB_LOG_QUERIES" ]; then
- echo "MARIADB_LOG_QUERIES set, logging to /etc/mysql/conf.d/log-queries.cnf"
- cat < /etc/mysql/conf.d/log-queries.cnf
-
-[mysqld]
-general-log
-log-output=file
-general-log-file=/var/log/mariadb-queries.log
-EOF
-fi
diff --git a/images/mariadb/entrypoints/150-mariadb-performance.bash b/images/mariadb/entrypoints/150-mariadb-performance.bash
deleted file mode 100755
index c8b73b2d45..0000000000
--- a/images/mariadb/entrypoints/150-mariadb-performance.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-if [ "$LAGOON_ENVIRONMENT_TYPE" == "production" ]; then
- # only set if not already defined
- if [ -z ${MARIADB_INNODB_BUFFER_POOL_SIZE+x} ]; then
- export MARIADB_INNODB_BUFFER_POOL_SIZE=1024M
- fi
- if [ -z ${MARIADB_INNODB_LOG_FILE_SIZE+x} ]; then
- export MARIADB_INNODB_LOG_FILE_SIZE=256M
- fi
-fi
\ No newline at end of file
diff --git a/images/mariadb/entrypoints/200-mariadb-envplate.bash b/images/mariadb/entrypoints/200-mariadb-envplate.bash
deleted file mode 100755
index b5a659cc07..0000000000
--- a/images/mariadb/entrypoints/200-mariadb-envplate.bash
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-/bin/ep /etc/mysql/*
\ No newline at end of file
diff --git a/images/mariadb/entrypoints/9999-mariadb-init.bash b/images/mariadb/entrypoints/9999-mariadb-init.bash
deleted file mode 100755
index 72dc40ad22..0000000000
--- a/images/mariadb/entrypoints/9999-mariadb-init.bash
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-# Locations
-CONTAINER_SCRIPTS_DIR="/usr/share/container-scripts/mysql"
-
-if [ "$(ls -A /etc/mysql/conf.d/)" ]; then
- ep /etc/mysql/conf.d/*
-fi
-
-if [ "${1:0:1}" = '-' ]; then
- set -- mysqld "$@"
-fi
-
-wantHelp=
-for arg; do
- case "$arg" in
- -'?'|--help|--print-defaults|-V|--version)
- wantHelp=1
- break
- ;;
- esac
-done
-
-# check if MARIADB_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder
-# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container).
-# This is different than just setting $MARIADB_DATA_DIR to the source folder, as only /var/lib/mysql is a persistent folder, so setting
-# $MARIADB_DATA_DIR to another folder will make mariadb to not store the datadir across container restarts, while with this copy system
-# the data will be prefilled and persistent across container restarts.
-if [ -n "$MARIADB_COPY_DATA_DIR_SOURCE" ]; then
- if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then
- echo "MARIADB_COPY_DATA_DIR_SOURCE is set, but MySQL directory already present in '${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql' skipping copying"
- else
- echo "MARIADB_COPY_DATA_DIR_SOURCE is set, copying datadir contents from '$MARIADB_COPY_DATA_DIR_SOURCE' to '${MARIADB_DATA_DIR:-/var/lib/mysql}'"
- CUR_DIR=${PWD}
- cd ${MARIADB_COPY_DATA_DIR_SOURCE}/; tar cf - . | (cd ${MARIADB_DATA_DIR:-/var/lib/mysql}; tar xvf -)
- cd $CUR_DIR
- fi
-fi
-
-ln -sf ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf /home/.my.cnf
-
-if [ "$1" = 'mysqld' -a -z "$wantHelp" ]; then
- if [ ! -d "/run/mysqld" ]; then
- mkdir -p /run/mysqld
- chown -R mysql:mysql /run/mysqld
- fi
-
- if [ -d ${MARIADB_DATA_DIR:-/var/lib/mysql}/mysql ]; then
- echo "MySQL directory already present, skipping creation"
-
- echo "starting mysql for mysql upgrade."
- /usr/bin/mysqld --skip-networking --wsrep_on=OFF &
- pid="$!"
- echo "pid is $pid"
-
- for i in {30..0}; do
- if echo 'SELECT 1' | mysql -u root; then
- break
- fi
- echo 'MySQL init process in progress...'
- sleep 1
- done
-
- mysql_upgrade --force
-
- if ! kill -s TERM "$pid" || ! wait "$pid"; then
- echo >&2 'MySQL init process failed.'
- exit 1
- fi
- else
- echo "MySQL data directory not found, creating initial DBs"
-
- mysql_install_db --skip-name-resolve --skip-test-db --auth-root-authentication-method=normal --datadir=${MARIADB_DATA_DIR:-/var/lib/mysql} --basedir=/usr
-
- echo "starting mysql for initdb.d import."
- /usr/bin/mysqld --skip-networking --wsrep_on=OFF &
- pid="$!"
- echo "pid is $pid"
-
- for i in {30..0}; do
- if echo 'SELECT 1' | mysql -u root; then
- break
- fi
- echo 'MySQL init process in progress...'
- sleep 1
- done
-
- if [ "$MARIADB_ROOT_PASSWORD" = "" ]; then
- MARIADB_ROOT_PASSWORD=`pwgen 16 1`
- echo "[i] MySQL root Password: $MARIADB_ROOT_PASSWORD"
- fi
-
- MARIADB_DATABASE=${MARIADB_DATABASE:-""}
- MARIADB_USER=${MARIADB_USER:-""}
- MARIADB_PASSWORD=${MARIADB_PASSWORD:-""}
-
- tfile=`mktemp`
- if [ ! -f "$tfile" ]; then
- return 1
- fi
-
- cat << EOF > $tfile
-DROP DATABASE IF EXISTS test;
-USE mysql;
-ALTER USER root@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD("$MARIADB_ROOT_PASSWORD");
-FLUSH PRIVILEGES;
-
-EOF
-
- if [ "$MARIADB_DATABASE" != "" ]; then
- echo "[i] Creating database: $MARIADB_DATABASE"
- echo "CREATE DATABASE IF NOT EXISTS \`$MARIADB_DATABASE\` ;" >> $tfile
- if [ "$MARIADB_USER" != "" ]; then
- echo "[i] Creating user: $MARIADB_USER with password $MARIADB_PASSWORD"
- echo "GRANT ALL ON \`$MARIADB_DATABASE\`.* to '$MARIADB_USER'@'%' IDENTIFIED BY '$MARIADB_PASSWORD';" >> $tfile
- fi
- fi
-
-
- cat $tfile
- cat $tfile | mysql -v -u root
- rm -v -f $tfile
-
- echo "[client]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
- echo "user=root" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
- echo "password=${MARIADB_ROOT_PASSWORD}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
- echo "[mysql]" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
- echo "database=${MARIADB_DATABASE}" >> ${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf
-
- for f in `ls /docker-entrypoint-initdb.d/*`; do
- case "$f" in
- *.sh) echo "$0: running $f"; . "$f" ;;
- *.sql) echo "$0: running $f"; cat $f| envsubst | tee | mysql -u root -p${MARIADB_ROOT_PASSWORD}; echo ;;
- *) echo "$0: ignoring $f" ;;
- esac
- echo
- done
-
- if ! kill -s TERM "$pid" || ! wait "$pid"; then
- echo >&2 'MySQL init process failed.'
- exit 1
- fi
-
- fi
-
- echo "done, now starting daemon"
-
-fi
diff --git a/images/mariadb/my.cnf b/images/mariadb/my.cnf
deleted file mode 100644
index 2a727bb2d0..0000000000
--- a/images/mariadb/my.cnf
+++ /dev/null
@@ -1,42 +0,0 @@
-# The following options will be passed to all MariaDB clients
-[client]
-port = 3306
-socket = /run/mysqld/mysqld.sock
-
-# Here follows entries for some specific programs
-
-# The MariaDB server
-[mysqld]
-port = 3306
-socket = /run/mysqld/mysqld.sock
-datadir = ${MARIADB_DATA_DIR:-/var/lib/mysql}
-character_set_server = ${MARIADB_CHARSET:-utf8mb4}
-collation_server = ${MARIADB_COLLATION:-utf8mb4_bin}
-expire_logs_days = 10
-ignore_db_dirs=backup
-innodb_buffer_pool_size = ${MARIADB_INNODB_BUFFER_POOL_SIZE:-256M}
-innodb_buffer_pool_instances = ${MARIADB_INNODB_BUFFER_POOL_INSTANCES:-1}
-innodb_log_buffer_size = 32M
-innodb_log_file_size = ${MARIADB_INNODB_LOG_FILE_SIZE:-64M}
-join_buffer_size = 2M
-key_buffer_size = 16M
-max_allowed_packet = ${MARIADB_MAX_ALLOWED_PACKET:-64M}
-max_binlog_size = 100M
-max_connections = 400
-max_heap_table_size = 512M
-myisam-recover-options = BACKUP
-query_cache_size = 0
-query_cache_type = 0
-skip-external-locking
-skip_name_resolve = 1
-table_open_cache = 200000
-thread_cache_size = 8
-thread_stack = 256K
-tmp_table_size = 512M
-tmpdir = /tmp
-transaction-isolation = READ-COMMITTED
-skip-name-resolve
-optimizer_search_depth = 0
-innodb_flush_log_at_trx_commit = 0
-
-!includedir /etc/mysql/conf.d
diff --git a/images/mariadb/mysql-backup.sh b/images/mariadb/mysql-backup.sh
deleted file mode 100755
index bea76622ef..0000000000
--- a/images/mariadb/mysql-backup.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/sh
-#==============================================================================
-#TITLE: mysql_backup.sh
-#DESCRIPTION: script for automating the daily mysql backups on development computer
-#AUTHOR: tleish
-#DATE: 2013-12-20
-#VERSION: 0.4
-#USAGE: ./mysql_backup.sh
-#CRON:
- # example cron for daily db backup @ 9:15 am
- # min hr mday month wday command
- # 15 9 * * * /Users/[your user name]/scripts/mysql_backup.sh
-
-#RESTORE FROM BACKUP
- #$ gunzip < [backupfile.sql.gz] | mysql -u [uname] -p[pass] [dbname]
-
-#==============================================================================
-# CUSTOM SETTINGS
-#==============================================================================
-
-set -eu -o pipefail
-
-# directory to put the backup files
-BACKUP_DIR=${MARIADB_DATA_DIR:-/var/lib/mysql}/backup
-
-# MYSQL Parameters
-MARIADB_USER=${MARIADB_USER:-lagoon}
-MARIADB_PASSWORD=${MARIADB_PASSWORD:-lagoon}
-
-MARIADB_HOST=$1
-
-# Don't backup databases with these names
-# Example: starts with mysql (^mysql) or ends with _schema (_schema$)
-IGNORE_DB="(^mysql|_schema$)"
-
-# Number of days to keep backups
-KEEP_BACKUPS_FOR=4 #days
-
-#==============================================================================
-# METHODS
-#==============================================================================
-
-# YYYY-MM-DD_HHMMSS
-TIMESTAMP=$(date +%F_%H%M%S)
-
-function prepare()
-{
- mkdir -p $BACKUP_DIR
-}
-
-function delete_old_backups()
-{
- echo "Deleting $BACKUP_DIR/*.sql.gz older than $KEEP_BACKUPS_FOR days"
- find $BACKUP_DIR -type f -name "*.sql.gz" -mtime +$KEEP_BACKUPS_FOR -exec rm {} \;
-}
-
-function mysql_login() {
- cmd="-u $MARIADB_USER -h $MARIADB_HOST"
- if [ -n "$MARIADB_PASSWORD" ]; then
- cmd="$cmd -p$MARIADB_PASSWORD"
- fi
- echo $cmd
-}
-
-function database_list() {
- local show_databases_sql="SHOW DATABASES WHERE \`Database\` NOT REGEXP '$IGNORE_DB'"
- echo $(mysql $(mysql_login) -e "$show_databases_sql"|awk -F " " '{if (NR!=1) print $1}')
-}
-
-function echo_status(){
- printf '\r';
- printf ' %0.s' {0..100}
- printf '\r';
- printf "$1"'\r'
-}
-
-function backup_database(){
- backup_file="$BACKUP_DIR/$TIMESTAMP.$database.sql.gz"
- output="${output}${database} => $backup_file\n"
- echo_status "...backing up $count of $total databases: $database"
- $(mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction $(mysql_login) $database | gzip -9 > $backup_file)
-}
-
-function backup_databases(){
- local databases=$(database_list)
- local total=$(echo $databases | wc -w | xargs)
- local output=""
- local count=1
- for database in $databases; do
- backup_database
- local count=$((count+1))
- done
- echo -ne $output
-}
-
-function hr(){
- printf '=%.0s' {1..100}
- printf "\n"
-}
-
-#==============================================================================
-# RUN SCRIPT
-#==============================================================================
-prepare
-delete_old_backups
-hr
-backup_databases
-hr
-printf "All backed up!\n\n"
diff --git a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh b/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh
deleted file mode 100755
index 368be4374b..0000000000
--- a/images/mariadb/root/usr/share/container-scripts/mysql/readiness-probe.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-#
-# openshift-mariadb: mysqld readinessProbe
-#
-
-mysql --defaults-file=${MARIADB_DATA_DIR:-/var/lib/mysql}/.my.cnf -e"SHOW DATABASES;"
-
-if [ $? -ne 0 ]; then
- exit 1
-else
- exit 0
-fi
diff --git a/images/mongo/Dockerfile b/images/mongo/Dockerfile
deleted file mode 100644
index 0c433eb0dc..0000000000
--- a/images/mongo/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM alpine:3.8
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=mongo
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN apk --no-cache add mongodb
-
-RUN mkdir -p /data/db /data/configdb && \
- fix-permissions /data/db && \
- fix-permissions /data/configdb
-
-VOLUME /data/db
-EXPOSE 27017 28017
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD [ "mongod", "--bind_ip", "0.0.0.0" ]
diff --git a/images/nginx-drupal/Dockerfile b/images/nginx-drupal/Dockerfile
deleted file mode 100644
index 20f043040c..0000000000
--- a/images/nginx-drupal/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/nginx
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=nginx
-
-RUN mkdir -p /etc/nginx/conf.d/drupal
-
-COPY drupal /etc/nginx/conf.d/drupal/
-COPY drupal.conf /etc/nginx/conf.d/app.conf
-
-RUN fix-permissions /etc/nginx
diff --git a/images/nginx-drupal/drupal.conf b/images/nginx-drupal/drupal.conf
deleted file mode 100644
index de97c2b71c..0000000000
--- a/images/nginx-drupal/drupal.conf
+++ /dev/null
@@ -1,145 +0,0 @@
-### Nginx configuration for Drupal 7 and 8.
-server {
- include /etc/nginx/conf.d/drupal/server_prepend*.conf;
-
- listen ${NGINX_LISTEN:-8080} default_server;
-
- include /etc/nginx/helpers/*.conf;
-
- root /app/${WEBROOT:-};
- index index.php;
-
- ## rewriting /index.php to / because after https://www.drupal.org/node/2599326
- ## autocomplete URLs are forced to go to index.php
- rewrite ^/index.php / last;
-
- ## The 'default' location.
- location / {
- include /etc/nginx/conf.d/drupal/location_prepend*.conf;
-
- ## Do not allow access to .txt and .md unless inside sites/*/files/
- location ~* ^(?!.+sites\/.+\/files\/).+\.(txt|md)$ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Replicate the Apache directive of Drupal standard
- ## .htaccess. Disable access to any code files. Return a 404 to curtail
- ## information disclosure.
- location ~* \.(engine|inc|install|make|module|profile|po|sh|.*sql|theme|twig|tpl(\.php)?|xtmpl|yml)(~|\.sw[op]|\.bak|\.orig|\.save)?$|^\/(\.(?!well-known).*|Entries.*|Repository|Root|Tag|Template|composer\.(json|lock))$|^\/#.*#$|\.php(~|\.sw[op]|\.bak|\.orig|\.save)$ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Expiring per default for four weeks and one second, Drupal will overwrite that if necessary
- expires ${NGINX_DEFAULT_EXPIRES:-2628001s};
-
- ## Disallow access to any dot files, but send the request to Drupal
- location ~* /\. {
- try_files /dev/null @drupal;
- }
-
- ### Directives for installing drupal.
- location ~* ^(/install.php|/core/install.php) {
- try_files /dev/null @php;
- }
-
- ## Direct Access to .php files is not alled and is sent to Drupal instead
- location ~* ^.+\.php$ {
- try_files /dev/null @drupal;
- }
-
- ## Try to find a file with given URL, if not pass to Drupal
- try_files $uri @drupal;
-
- include /etc/nginx/conf.d/drupal/location_append*.conf;
- }
-
- ## Main Drupal Location
- location @drupal {
- include /etc/nginx/conf.d/drupal/location_drupal_prepend*.conf;
-
- include /etc/nginx/fastcgi.conf;
- fastcgi_param SCRIPT_NAME /index.php;
- fastcgi_param SCRIPT_FILENAME $realpath_root/index.php;
- fastcgi_pass ${NGINX_FASTCGI_PASS:-php}:9000;
-
- include /etc/nginx/conf.d/drupal/location_drupal_append*.conf;
- }
-
- ## PHP Location.
- ## Warning: This allows to execute any PHP files, use with care!
- location @php {
- include /etc/nginx/conf.d/drupal/location_php_prepend*.conf;
-
- include /etc/nginx/fastcgi.conf;
- fastcgi_pass ${NGINX_FASTCGI_PASS:-php}:9000;
-
- include /etc/nginx/conf.d/drupal/location_php_append*.conf;
- }
-
- ## Trying to access private files directly returns a 404.
- location /sites/default/files/private/ {
- internal;
- }
-
- ## Disallow access to patches directory.
- location ^~ /patches/ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Disallow access to backup directory.
- location ^~ /backup/ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Disallow access to vagrant directory.
- location ^~ /vagrant/ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Disallow access to vendor directory.
- location ^~ /core/vendor/ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Disallow access to vendor directory.
- location ^~ /vendor/ {
- deny all;
- access_log off;
- log_not_found off;
- }
-
- ## Support for the robotstxt module
- ## http://drupal.org/project/robotstxt.
- location = /robots.txt {
- access_log off;
- try_files $uri @drupal;
- }
-
- ## Add support for the humanstxt module
- ## http://drupal.org/project/humanstxt.
- location = /humans.txt {
- access_log off;
- try_files $uri @drupal;
- }
-
- ## Return an in memory 1x1 transparent GIF.
- location @empty {
- expires 30d;
- empty_gif;
- }
-
- include /etc/nginx/conf.d/drupal/favicon.conf;
- include /etc/nginx/conf.d/drupal/server_append*.conf;
-}
diff --git a/images/nginx-drupal/drupal/favicon.conf b/images/nginx-drupal/drupal/favicon.conf
deleted file mode 100644
index bbe0d8427b..0000000000
--- a/images/nginx-drupal/drupal/favicon.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-## Support for favicon. Return an 1x1 transparent GIF if it doesn't
-## exist.
-location = /favicon.ico {
- expires 30d;
- try_files /favicon.ico @empty;
-}
diff --git a/images/nginx/Dockerfile b/images/nginx/Dockerfile
deleted file mode 100644
index 5f9cc78ccc..0000000000
--- a/images/nginx/Dockerfile
+++ /dev/null
@@ -1,56 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-# Alpine 3.11 per https://github.com/openresty/docker-openresty/blob/master/alpine/Dockerfile#L5
-FROM openresty/openresty:1.17.8.2-alpine
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=nginx
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN apk add --no-cache openssl
-
-RUN rm -Rf /etc/nginx && ln -s /usr/local/openresty/nginx/conf /etc/nginx
-
-COPY nginx.conf /etc/nginx/nginx.conf
-COPY fastcgi.conf /etc/nginx/fastcgi.conf
-COPY fastcgi.conf /etc/nginx/fastcgi_params
-COPY helpers/ /etc/nginx/helpers/
-COPY static-files.conf /etc/nginx/conf.d/app.conf
-COPY redirects-map.conf /etc/nginx/redirects-map.conf
-COPY healthcheck/healthz.locations healthcheck/healthz.locations.php.disable /etc/nginx/conf.d/
-
-RUN mkdir -p /app \
- && rm -f /etc/nginx/conf.d/default.conf \
- && fix-permissions /usr/local/openresty/nginx \
- && fix-permissions /var/run/
-
-COPY docker-entrypoint /lagoon/entrypoints/70-nginx-entrypoint
-
-WORKDIR /app
-
-EXPOSE 8080
-
-# tells the local development environment on which port we are running
-ENV LAGOON_LOCALDEV_HTTP_PORT=8080
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["nginx", "-g", "daemon off;"]
diff --git a/images/nginx/docker-entrypoint b/images/nginx/docker-entrypoint
deleted file mode 100755
index 2f8458ec4a..0000000000
--- a/images/nginx/docker-entrypoint
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-# If `BASIC_AUTH` is not already set to 'off'.
-if [[ ! "${BASIC_AUTH}" == "off" ]]; then
- # And if a username and password is set.
- if [ ! -z ${BASIC_AUTH_USERNAME+x} ] && [ ! -z ${BASIC_AUTH_PASSWORD+x} ]; then
- # Generate a basic authentication config file for Nginx.
- printf "${BASIC_AUTH_USERNAME}:$(openssl passwd -crypt ${BASIC_AUTH_PASSWORD})\n" >> /etc/nginx/.htpasswd
- # Set `BASIC_AUTH` to restricted which will tell nginx to do basic authentication.
- export BASIC_AUTH="restricted"
- fi
-fi
-
-ep /etc/nginx/*
-# Find all folders within /etc/nginx/conf.d/
-find /etc/nginx/conf.d/ -type d | while read DIR; do
- # envplate if found folder is not empty
- if find $DIR -mindepth 1 | read; then
- ep $DIR/*;
- fi
-done
-ep /etc/nginx/helpers/*
-
-# If PHP is enabled, we override the Luascript /healthz check
-echo "Setting up Healthz routing"
-if [ ! -z "$NGINX_FASTCGI_PASS" ]; then
- echo "Healthz routing - using PHP"
- cp /etc/nginx/conf.d/healthz.locations.php.disable /etc/nginx/conf.d/healthz.locations
-fi
-
-if [ "$FAST_HEALTH_CHECK" == "true" ]; then
- echo "FAST HEALTH CHECK ENABLED"
- cp /etc/nginx/helpers/90_healthz_fast_check.conf.disabled /etc/nginx/helpers/90_health_fast_check.conf
-fi
\ No newline at end of file
diff --git a/images/nginx/fastcgi.conf b/images/nginx/fastcgi.conf
deleted file mode 100644
index 9cbc41e668..0000000000
--- a/images/nginx/fastcgi.conf
+++ /dev/null
@@ -1,63 +0,0 @@
-
-set $fastcgi_port "80";
-if ($http_x_forwarded_proto = 'https') {
- set $fastcgi_https "on";
- set $fastcgi_port "443";
-}
-
-set_by_lua_block $remote_addr_clean {
- if string.find(ngx.var.remote_addr, "^::ffff:") then
- return string.match(ngx.var.remote_addr, "^::ffff:(.*)")
- else
- return ngx.var.remote_addr
- end
-}
-
-fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
-fastcgi_param QUERY_STRING $query_string;
-fastcgi_param REQUEST_METHOD $request_method;
-fastcgi_param CONTENT_TYPE $content_type;
-fastcgi_param CONTENT_LENGTH $content_length;
-
-fastcgi_param SCRIPT_NAME $fastcgi_script_name;
-fastcgi_param REQUEST_URI $request_uri;
-fastcgi_param DOCUMENT_URI $document_uri;
-fastcgi_param DOCUMENT_ROOT $document_root;
-fastcgi_param SERVER_PROTOCOL $server_protocol;
-fastcgi_param REQUEST_SCHEME $scheme;
-fastcgi_param HTTPS $https if_not_empty;
-fastcgi_param HTTPS $fastcgi_https if_not_empty;
-
-fastcgi_param GATEWAY_INTERFACE CGI/1.1;
-fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
-
-fastcgi_param REMOTE_ADDR $remote_addr_clean;
-fastcgi_param REMOTE_PORT $remote_port;
-fastcgi_param SERVER_ADDR $server_addr;
-
-# Setting to Port 80 and 443 based on if we have an upstream https or not
-fastcgi_param SERVER_PORT $fastcgi_port;
-
-# Setting to $host as $server_name is empty all the time
-fastcgi_param SERVER_NAME $host;
-
-# PHP only, required if PHP was built with --enable-force-cgi-redirect
-fastcgi_param REDIRECT_STATUS 200;
-
-# Mitigate https://httpoxy.org/ vulnerabilities
-fastcgi_param HTTP_PROXY "";
-
-# Mitigate CVE-2018-14773: https://symfony.com/blog/cve-2018-14773-remove-support-for-legacy-and-risky-http-headers
-fastcgi_param HTTP_X-ORIGINAL-URL "";
-fastcgi_param HTTP_X_ORIGINAL_URL "";
-fastcgi_param HTTP_X-REWRITE-URL "";
-fastcgi_param HTTP_X_REWRITE_URL "";
-
-fastcgi_keep_conn on;
-fastcgi_index index.php;
-fastcgi_hide_header 'X-Generator';
-
-fastcgi_buffers ${FASTCGI_BUFFERS:-256 32k};
-fastcgi_buffer_size ${FASTCGI_BUFFER_SIZE:-32k};
-fastcgi_read_timeout ${FASTCGI_READ_TIMEOUT:-3600s};
-fastcgi_temp_path /tmp/fastcgi_temp;
diff --git a/images/nginx/healthcheck/README.md b/images/nginx/healthcheck/README.md
deleted file mode 100644
index 43751e2e11..0000000000
--- a/images/nginx/healthcheck/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Healthcheck
-
-In this directory you'll find two files
-
-- healthz.locations.php.disable
-- healthz.locations
-
-Both are designed to expose a `/.lagoonhealthz` location from the nginx service. The difference being that the `.php.disable` file is used to point to the [healthz-php](https://github.com/amazeeio/healthz-php) application _if_ there is a PHP service attached to this application.
-
-The logic for which of the two files are enabled are contained in this image's `docker-entrypoint` file - there we check for the existence of the env var `NGINX_FASTCGI_PASS`, which indicates (or should indicate) the presence of a PHP-fpm service.
\ No newline at end of file
diff --git a/images/nginx/healthcheck/healthz.locations b/images/nginx/healthcheck/healthz.locations
deleted file mode 100644
index 95cf2ed753..0000000000
--- a/images/nginx/healthcheck/healthz.locations
+++ /dev/null
@@ -1,8 +0,0 @@
-location /.lagoonhealthz {
- content_by_lua_block {
- ngx.status = ngx.HTTP_OK;
- ngx.header.content_type = 'application/json';
- ngx.say('{"check_nginx":"pass"}');
- ngx.exit(ngx.OK);
- }
-}
diff --git a/images/nginx/healthcheck/healthz.locations.php.disable b/images/nginx/healthcheck/healthz.locations.php.disable
deleted file mode 100644
index dd6be8e7ea..0000000000
--- a/images/nginx/healthcheck/healthz.locations.php.disable
+++ /dev/null
@@ -1,10 +0,0 @@
-location /.lagoonhealthz {
- rewrite ^/.lagoonhealthz(/.*)?$ /.lagoonhealthz/index.php;
-
- location ~* \.php(/|$) {
- include /etc/nginx/fastcgi.conf;
- fastcgi_param SCRIPT_NAME /index.php;
- fastcgi_param SCRIPT_FILENAME /healthz-php/index.php;
- fastcgi_pass ${NGINX_FASTCGI_PASS:-php}:9000;
- }
-}
diff --git a/images/nginx/helpers/000_variables.conf b/images/nginx/helpers/000_variables.conf
deleted file mode 100644
index a54bf1cdd4..0000000000
--- a/images/nginx/helpers/000_variables.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-# sets the nginx internal variable $lagoon_environment_type to the env variable LAGOON_ENVIRONMENT_TYPE
-set_by_lua_block $lagoon_environment_type { return os.getenv("LAGOON_ENVIRONMENT_TYPE") }
\ No newline at end of file
diff --git a/images/nginx/helpers/010_redirects.conf b/images/nginx/helpers/010_redirects.conf
deleted file mode 100644
index 0431d1bba1..0000000000
--- a/images/nginx/helpers/010_redirects.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-# $redirectdomain is set via the redirects-map.conf within nginx.conf
-if ($redirectdomain) {
- return 301 $redirectdomain;
-}
\ No newline at end of file
diff --git a/images/nginx/helpers/020_basic-auth.conf b/images/nginx/helpers/020_basic-auth.conf
deleted file mode 100644
index 2929b801fa..0000000000
--- a/images/nginx/helpers/020_basic-auth.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# BASIC_AUTH is set during docker-entrypoint if BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD are set
-auth_basic "${BASIC_AUTH:-off}";
-auth_basic_user_file "/etc/nginx/.htpasswd";
\ No newline at end of file
diff --git a/images/nginx/helpers/030_rewrite_by_lua_block.conf b/images/nginx/helpers/030_rewrite_by_lua_block.conf
deleted file mode 100644
index b4de6f9c22..0000000000
--- a/images/nginx/helpers/030_rewrite_by_lua_block.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-rewrite_by_lua_block {
- -- IPv6 X-Forwarded-For
- local xff = {}
- if ngx.var.http_x_forwarded_for then
- for ip in string.gmatch(ngx.var.http_x_forwarded_for, '([^,]+)') do
- if string.find(ip, "^::ffff:") then
- table.insert(xff,string.match(ip, "^::ffff:(.*)"))
- else
- table.insert(xff,ip)
- end
- ngx.req.set_header("X-Forwarded-For", table.concat(xff, ","))
- end
- end
-}
diff --git a/images/nginx/helpers/100_x-robots-header-development.conf b/images/nginx/helpers/100_x-robots-header-development.conf
deleted file mode 100644
index 2d084156aa..0000000000
--- a/images/nginx/helpers/100_x-robots-header-development.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-# Set X-Robots-Tag to 'noindex, nofollow' for development environments and Lagoon autogenerated routes
-header_filter_by_lua_block {
- -- escape characters in the hostname
- host = string.gsub(ngx.var.host, "%p", "%%%1")
-
- -- check to see if we are a development environment
- if (os.getenv("LAGOON_ENVIRONMENT_TYPE") and string.match(os.getenv("LAGOON_ENVIRONMENT_TYPE"), 'development')) then
- ngx.header["X-Robots-Tag"] = 'noindex, nofollow';
- end
-
- -- check hostname against autogenerated routes
- if (os.getenv("LAGOON_AUTOGENERATED_ROUTES") and string.match(os.getenv("LAGOON_AUTOGENERATED_ROUTES"), host)) then
- ngx.header["X-Robots-Tag"] = 'noindex, nofollow';
- end
-}
diff --git a/images/nginx/helpers/90_healthz.conf b/images/nginx/helpers/90_healthz.conf
deleted file mode 100644
index 33356eda06..0000000000
--- a/images/nginx/helpers/90_healthz.conf
+++ /dev/null
@@ -1 +0,0 @@
-include /etc/nginx/conf.d/healthz.locations;
diff --git a/images/nginx/helpers/90_healthz_fast_check.conf.disabled b/images/nginx/helpers/90_healthz_fast_check.conf.disabled
deleted file mode 100644
index 78cb43761e..0000000000
--- a/images/nginx/helpers/90_healthz_fast_check.conf.disabled
+++ /dev/null
@@ -1,13 +0,0 @@
-set $fhcc none;
-
-if ( $http_user_agent ~* "StatusCake|Pingdom|Site25x7|Uptime|nagios" ) {
- set $fhcc "A";
-}
-
-if ( $request_method = 'GET' ) {
- set $fhcc "$fhcc G";
-}
-
-if ( $fhcc = 'A G' ) {
- rewrite ~* /.lagoonhealthz last;
-}
\ No newline at end of file
diff --git a/images/nginx/nginx.conf b/images/nginx/nginx.conf
deleted file mode 100755
index 5fd14bfe25..0000000000
--- a/images/nginx/nginx.conf
+++ /dev/null
@@ -1,125 +0,0 @@
-
-error_log /dev/stdout ${NGINX_ERROR_LOG_LEVEL:-warn};
-
-# Establish some environment variables for later use
-env LAGOON_AUTOGENERATED_ROUTES;
-env LAGOON_ENVIRONMENT_TYPE;
-
-events {
- worker_connections 1024;
- multi_accept on;
- use epoll;
-}
-
-
-http {
- include /etc/nginx/mime.types;
- default_type application/octet-stream;
-
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
-
- access_log /dev/stdout;
-
- sendfile on;
- tcp_nopush on;
- tcp_nodelay on;
-
- server_tokens off;
-
- add_header X-LAGOON $hostname always;
-
- uninitialized_variable_warn off;
-
- map $host$uri $redirectdomain {
- include /etc/nginx/redirects-map.conf;
- }
-
- gzip on;
- gzip_disable "MSIE [1-6]\.(?!.*SV1)";
-
- gzip_buffers 16 8k;
- gzip_comp_level 1;
- gzip_http_version 1.0;
- gzip_min_length 10;
- gzip_types
- application/atom+xml
- application/javascript
- application/json
- application/ld+json
- application/manifest+json
- application/rss+xml
- application/vnd.geo+json
- application/vnd.ms-fontobject
- application/x-font-ttf
- application/x-javascript
- application/x-web-app-manifest+json
- application/xhtml+xml
- application/xml
- application/xml+rss
- font/opentype
- image/bmp
- image/svg+xml
- image/x-icon
- text/cache-manifest
- text/css
- text/javascript
- text/plain
- text/vcard
- text/vnd.rim.location.xloc
- text/vtt
- text/xml
- text/x-component
- text/x-cross-domain-policy;
-
- client_max_body_size 2048m;
- client_body_timeout 10s;
- client_header_timeout 10s;
- client_body_buffer_size 128k;
- client_body_temp_path /tmp/client_temp;
- proxy_redirect off;
- proxy_max_temp_file_size 4096m;
- proxy_connect_timeout 90;
- proxy_send_timeout 90;
- proxy_read_timeout 90;
- proxy_buffers 32 16k;
- proxy_buffer_size 16k;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_headers_hash_bucket_size 64;
- proxy_temp_path /tmp/proxy_temp;
-
- uwsgi_temp_path /tmp/uwsgi_temp;
- scgi_temp_path /tmp/scgi_temp;
-
- set_real_ip_from 10.0.0.0/8;
- set_real_ip_from 172.16.0.0/12;
- set_real_ip_from 192.168.0.0/16;
- real_ip_header X-Forwarded-For;
- real_ip_recursive on;
-
- port_in_redirect off;
-
- root /app;
-
- server {
-
- listen 50000;
-
- location /nginx_status {
- stub_status on;
- access_log off;
- allow 127.0.0.1;
- allow 10.0.0.0/8;
- allow 172.16.0.0/12;
- allow 192.168.0.0/16;
- deny all;
- }
-
- }
-
- include /etc/nginx/conf.d/*.conf;
-
-}
diff --git a/images/nginx/redirects-map.conf b/images/nginx/redirects-map.conf
deleted file mode 100644
index 6bb3595aeb..0000000000
--- a/images/nginx/redirects-map.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-## Nginx redirect map
-## This file is expected to have two entries per line:
-## 1. source, which will be matched against '$host$uri' from nginx (so the hostname and uri, while uri is always at least /)
-## 2. destination of the redirect
-## The file is read from top to bottom, so more specific sources need to be above more general matches
-## A couple of examples:
-
-## Simple www to non www redirect, with preserving the URL string and arguments
-# ~^www\.example\.com\/ http://example.com$request_uri;
-
-## Simple non-www to www redirect, with preserving the URL string and arguments
-#~^example\.com\/ http://www.example.com$request_uri;
-
-## Redirect every request to example.com to example.net with preserving the URL string and arguments, eg: example.com/bla -> example.net/bla, example.com/bla?test -> example.net/bla?test
-##
-# ~^example\.com\/ http://example.net$request_uri;
-
-## Redirect request only to example.com/test (no regex matching) to example.net without preserving the URL string, eg: example.com/test -> example.net
-## Requestes to example.com/test/bla or example.com/bla are not matched
-##
-# example\.com\/test http://example.net;
-
-## Redirect request only to example.com/test to example.net with preserving the rest of the URL string and arguments, eg: example.com/test/bla -> example.net/bla, example.com/test/bla?test -> example.net/bla?test
-## Requestes to example.com/bla are not matched
-##
-# ~^example\.com\/test\/(.*) http://example.net/$1$is_args$args;
diff --git a/images/nginx/static-files.conf b/images/nginx/static-files.conf
deleted file mode 100644
index ac5fc8802c..0000000000
--- a/images/nginx/static-files.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-server {
-
- listen 8080 default_server;
-
- include /etc/nginx/helpers/*.conf;
-
- location / {
- index index.html index.htm;
- try_files $uri $uri/ =404;
- }
-
-}
\ No newline at end of file
diff --git a/images/node/Dockerfile b/images/node/Dockerfile
deleted file mode 100644
index 2a749d9c24..0000000000
--- a/images/node/Dockerfile
+++ /dev/null
@@ -1,49 +0,0 @@
-ARG NODE_VERSION
-ARG ALPINE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM node:${NODE_VERSION}-alpine${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=node
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home \
- && fix-permissions /home \
- && mkdir -p /app \
- && fix-permissions /app
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN apk update \
- && apk upgrade \
- && rm -rf /var/cache/apk/*
-
-# Make sure Bower and NPM are allowed to be running as root
-RUN echo '{ "allow_root": true }' > /home/.bowerrc \
- && echo 'unsafe-perm=true' > /home/.npmrc
-
-WORKDIR /app
-
-EXPOSE 3000
-
-# tells the local development environment on which port we are running
-ENV LAGOON_LOCALDEV_HTTP_PORT=3000
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["yarn", "run", "start"]
diff --git a/images/node/builder/Dockerfile b/images/node/builder/Dockerfile
deleted file mode 100644
index c55b594b6f..0000000000
--- a/images/node/builder/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-ARG NODE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/node:${NODE_VERSION}
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=node
-
-RUN apk update \
- && apk upgrade \
- && apk add --no-cache \
- libstdc++ \
- && apk add --no-cache \
- binutils-gold \
- curl \
- g++ \
- gcc \
- gnupg \
- libgcc \
- linux-headers \
- make \
- git \
- file \
- openssl \
- python \
- bash \
- ca-certificates \
- wget \
- libpng-dev \
- && curl -sSLo /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \
- && curl -sSLO https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk \
- && apk add glibc-2.28-r0.apk \
- && rm -rf /var/cache/apk/*
-
-CMD ["/bin/docker-sleep"]
diff --git a/images/oc-build-deploy-dind/Dockerfile b/images/oc-build-deploy-dind/Dockerfile
index bdbae58ad1..a4734707d2 100644
--- a/images/oc-build-deploy-dind/Dockerfile
+++ b/images/oc-build-deploy-dind/Dockerfile
@@ -21,4 +21,6 @@ COPY scripts /oc-build-deploy/scripts
COPY openshift-templates /oc-build-deploy/openshift-templates
+ENV IMAGECACHE_REGISTRY=imagecache.amazeeio.cloud
+
CMD ["/oc-build-deploy/build-deploy.sh"]
diff --git a/images/oc-build-deploy-dind/README.md b/images/oc-build-deploy-dind/README.md
new file mode 100644
index 0000000000..79b3492bae
--- /dev/null
+++ b/images/oc-build-deploy-dind/README.md
@@ -0,0 +1,52 @@
+# Lagoon Build & Deploy
+
+This is the Image which contains the actual code that is responsible for building and deploying the Code from Git repositories.
+
+## Main purpose
+- This is the Image which contains the actual code that is responsible for building and deploying the Code from Git repositories.
+
+## Upstream Image
+- Based on the Lagoon `oc` image, which has the OpenShift Client Tools installed that are used heavily in this image.
+
+## How this works
+
+Everything in here is based on Bash scripts. Which in a nutshell do this:
+1. Check out a given Git Repository of a given Git Reference (Branch, Branch & SHA, Tag)
+2. Creates a new project in an OpenShift for the given project and branch
+3. Checks yaml files (either .lagoon.yml or docker-compose.yml) to learn:
+ 1. Which Docker Images with which context should be built
+ 2. Which Services and with that which OpenShift Resources should be created
+4. Builds Docker Images
+5. Creates OpenShift Resources
+6. Pushes Docker Images to OpenShift Registry
+7. Monitors the deployment of the Resources inside OpenShift
+
+## Environment Variables
+
+As this is a Docker Image that is built once and then executed multiple times for each single deployment we use Environment Variables to define what should happen.
+
+| Environment Variable | Description |
+|--------|---|
+| `GIT_REPO` | Full URL of Git Repo to clone/checkout, should be a ssh compatible Git Repo |
+| `GIT_REF` | Git reference to checkout, can be: 1. a Git Branch prefixed by `/origin`, 2. a Git Tag, 3. a Git Sha |
+| `OPENSHIFT_FOLDER` | Folder where the script should be searching for files and generally be working on, can be used to put everything in a subfolder |
+| `OPENSHIFT_CONSOLE` | Full URL of the OpenShift Console where OpenShift Resources should be created in |
+| `OPENSHIFT_TOKEN` | API Token of an OpenShift ServiceAccount that will be used to connect to the Console |
+| `APPUIO_TOKEN` | Special case for appuio.ch (needed when is `OPENSHIFT_CONSOLE` is `https://console.appuio.ch`), the API Token that should be used to create projects with |
+| `OPENSHIFT_PROJECT` | Name of the OpenShift Project that should be used, will be created if not existing |
+| `OPENSHIFT_PROJECT_USER` | OpenShift Username that should be given access to the project (useful if the User behind `OPENSHIFT_TOKEN` is different one that will be used to access the OpenShift UI |
+| `PROJECT` | Name of the Project in which this Deployment is part of |
+| `BRANCH` | Branch Name in which this Deployment is part of (even though `GIT_REF` can also be a Git Hash, we still need to know which Branch do we actually deploy) |
+
+## Mountable Volumes
+
+In order for better working of a container created from this image, there are some Volumes that can be mounted into the Host:
+
+| Volume Path | Description |
+|--------|---|
+| `/git` | Path where Git Repo will be checked out. Can be used for Caching and faster checkouts for consequent checkouts |
+| `/var/run/docker.sock` | Path to the Docker Engine Socket, as we build Docker images within the Container it's good to use the Docker Engine of the Host to profit from Layer Caching etc. |
+
+
+ -v $WORKSPACE:/git \\
+ -v /var/run/docker.sock:/var/run/docker.sock \\
\ No newline at end of file
diff --git a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
index a08a52fcc8..fa9982ba9e 100755
--- a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
+++ b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh
@@ -75,7 +75,24 @@ if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
LAGOON_SERVICE_TYPES=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- LAGOON_SERVICE_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
+ TEMP_LAGOON_SERVICE_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_SERVICE_TYPES") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_SERVICE_TYPES ]; then
+ LAGOON_SERVICE_TYPES=$TEMP_LAGOON_SERVICE_TYPES
+ fi
+fi
+# Allow the dbaas environment type to be overridden by the lagoon API
+# This accepts colon separated values like so `SERVICE_NAME:DBAAS_ENVIRONMENT_TYPE`, and multiple overrides
+# separated by commas
+# Example 1: mariadb:production < tells any docker-compose services named mariadb to use the production dbaas environment type
+# Example 2: mariadb:production,mariadb-test:development
+if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ LAGOON_DBAAS_ENVIRONMENT_TYPES=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_DBAAS_ENVIRONMENT_TYPES") | "\(.value)"'))
+fi
+if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
+ TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_DBAAS_ENVIRONMENT_TYPES") | "\(.value)"'))
+ if [ ! -z $TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES ]; then
+ LAGOON_DBAAS_ENVIRONMENT_TYPES=$TEMP_LAGOON_DBAAS_ENVIRONMENT_TYPES
+ fi
fi
set -x
@@ -121,7 +138,7 @@ do
# check if we can use the dbaas operator
elif oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mariadbconsumer.v1.mariadb.amazee.io &> /dev/null; then
SERVICE_TYPE="mariadb-dbaas"
- # heck if this cluster supports the default one, if not we assume that this cluster is not capable of shared mariadbs and we use a mariadb-single
+ # check if this cluster supports the default one, if not we assume that this cluster is not capable of shared mariadbs and we use a mariadb-single
elif svcat --scope cluster get class $MARIADB_SHARED_DEFAULT_CLASS > /dev/null; then
SERVICE_TYPE="mariadb-shared"
else
@@ -141,6 +158,18 @@ do
DBAAS_ENVIRONMENT=$ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE
fi
+ # If we have a dbaas environment type override in the api, consume it here
+ if [ ! -z "$LAGOON_DBAAS_ENVIRONMENT_TYPES" ]; then
+ IFS=',' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPES"
+ for LAGOON_DBAAS_ENVIRONMENT_TYPE in "${LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPE"
+ if [ "${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[0]}" == "$SERVICE_NAME" ]; then
+ DBAAS_ENVIRONMENT=${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[1]}
+ fi
+ done
+ fi
+
MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]="$DBAAS_ENVIRONMENT"
fi
@@ -181,6 +210,10 @@ do
fi
fi
+ if [ "$SERVICE_TYPE" == "mongodb-single" ]; then
+ SERVICE_TYPE="mongo"
+ fi
+
if [ "$SERVICE_TYPE" == "mongodb-shared" ]; then
MONGODB_SHARED_CLASS=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.mongo-shared\\.class "${MONGODB_SHARED_DEFAULT_CLASS}")
MONGODB_SHARED_PLAN=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.mongo-shared\\.plan "${ENVIRONMENT_TYPE}")
@@ -194,6 +227,31 @@ do
fi
fi
+ if [[ "$SERVICE_TYPE" == "mongodb-dbaas" ]]; then
+ # Default plan is the enviroment type
+ DBAAS_ENVIRONMENT=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.$SERVICE_TYPE\\.environment "${ENVIRONMENT_TYPE}")
+
+ # Allow the dbaas shared servicebroker plan to be overriden by environment in .lagoon.yml
+ ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH}.overrides.$SERVICE_NAME.$SERVICE_TYPE\\.environment false)
+ if [ ! $DBAAS_ENVIRONMENT_OVERRIDE == "false" ]; then
+ DBAAS_ENVIRONMENT=$ENVIRONMENT_DBAAS_ENVIRONMENT_OVERRIDE
+ fi
+
+ # If we have a dbaas environment type override in the api, consume it here
+ if [ ! -z "$LAGOON_DBAAS_ENVIRONMENT_TYPES" ]; then
+ IFS=',' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPES"
+ for LAGOON_DBAAS_ENVIRONMENT_TYPE in "${LAGOON_DBAAS_ENVIRONMENT_TYPES_SPLIT[@]}"
+ do
+ IFS=':' read -ra LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT <<< "$LAGOON_DBAAS_ENVIRONMENT_TYPE"
+ if [ "${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[0]}" == "$SERVICE_NAME" ]; then
+ DBAAS_ENVIRONMENT=${LAGOON_DBAAS_ENVIRONMENT_TYPE_SPLIT[1]}
+ fi
+ done
+ fi
+
+ MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]="$DBAAS_ENVIRONMENT"
+ fi
+
if [ "$SERVICE_TYPE" == "none" ]; then
continue
fi
@@ -208,8 +266,14 @@ do
# The ImageName is the same as the Name of the Docker Compose ServiceName
IMAGE_NAME=$COMPOSE_SERVICE
- # Generate List of Images to build
- IMAGES+=("${IMAGE_NAME}")
+ # Do not handle images for shared services
+ if [[ "$SERVICE_TYPE" != "mariadb-dbaas" ]] &&
+ [[ "$SERVICE_TYPE" != "mariadb-shared" ]] &&
+ [[ "$SERVICE_TYPE" != "mongodb-shared" ]] &&
+ [[ "$SERVICE_TYPE" != "mongodb-dbaas" ]]; then
+ # Generate List of Images to build
+ IMAGES+=("${IMAGE_NAME}")
+ fi
# Map Deployment ServiceType to the ImageName
MAP_DEPLOYMENT_SERVICETYPE_TO_IMAGENAME["${SERVICE_NAME}:${DEPLOYMENT_SERVICETYPE}"]="${IMAGE_NAME}"
@@ -264,12 +328,19 @@ do
PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
fi
if [ ! -z "$LAGOON_ENVIRONMENT_VARIABLES" ]; then
- PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
+ TEMP_PRIVATE_REGISTRY_CREDENTIAL=($(echo $LAGOON_ENVIRONMENT_VARIABLES | jq -r '.[] | select(.scope == "container_registry" and .name == "'$PRIVATE_CONTAINER_REGISTRY_PASSWORD'") | "\(.value)"'))
+ if [ ! -z "$TEMP_PRIVATE_REGISTRY_CREDENTIAL" ]; then
+ PRIVATE_REGISTRY_CREDENTIAL=$TEMP_PRIVATE_REGISTRY_CREDENTIAL
+ fi
fi
if [ -z $PRIVATE_REGISTRY_CREDENTIAL ]; then
#if no password defined in the lagoon api, pass the one in `.lagoon.yml` as a password
PRIVATE_REGISTRY_CREDENTIAL=$PRIVATE_CONTAINER_REGISTRY_PASSWORD
fi
+ if [ -z "$PRIVATE_REGISTRY_CREDENTIAL" ]; then
+ echo -e "A private container registry was defined in the .lagoon.yml file, but no password could be found in either the .lagoon.yml or in the Lagoon API\n\nPlease check if the password has been set correctly."
+ exit 1
+ fi
if [ $PRIVATE_CONTAINER_REGISTRY_URL != "false" ]; then
echo "Attempting to log in to $PRIVATE_CONTAINER_REGISTRY_URL with user $PRIVATE_CONTAINER_REGISTRY_USERNAME - $PRIVATE_CONTAINER_REGISTRY_PASSWORD"
docker login --username $PRIVATE_CONTAINER_REGISTRY_USERNAME --password $PRIVATE_REGISTRY_CREDENTIAL $PRIVATE_CONTAINER_REGISTRY_URL
@@ -331,6 +402,8 @@ if [[ ( "$TYPE" == "pullrequest" || "$TYPE" == "branch" ) && ! $THIS_IS_TUG ==
BUILD_ARGS+=(--build-arg LAGOON_GIT_BRANCH="${BRANCH}")
BUILD_ARGS+=(--build-arg LAGOON_GIT_SAFE_BRANCH="${SAFE_BRANCH}")
BUILD_ARGS+=(--build-arg LAGOON_PROJECT="${PROJECT}")
+ BUILD_ARGS+=(--build-arg LAGOON_ENVIRONMENT="${SAFE_BRANCH}")
+ BUILD_ARGS+=(--build-arg LAGOON_ENVIRONMENT_TYPE="${ENVIRONMENT_TYPE}")
BUILD_ARGS+=(--build-arg LAGOON_SAFE_PROJECT="${SAFE_PROJECT}")
BUILD_ARGS+=(--build-arg LAGOON_BUILD_TYPE="${TYPE}")
BUILD_ARGS+=(--build-arg LAGOON_GIT_SOURCE_REPOSITORY="${SOURCE_REPOSITORY}")
@@ -366,6 +439,12 @@ if [[ ( "$TYPE" == "pullrequest" || "$TYPE" == "branch" ) && ! $THIS_IS_TUG ==
PULL_IMAGE=$(echo "${OVERRIDE_IMAGE}" | envsubst)
fi
+ # if the image just is an image name (like "alpine") we prefix it with `libary/` as the imagecache does not understand
+ # the magic `alpine` images
+ if [[ ! "$PULL_IMAGE" =~ "/" ]]; then
+ PULL_IMAGE="library/$PULL_IMAGE"
+ fi
+
# Add the images we should pull to the IMAGES_PULL array, they will later be tagged from dockerhub
IMAGES_PULL["${IMAGE_NAME}"]="${PULL_IMAGE}"
@@ -453,14 +532,14 @@ else
ROUTES_AUTOGENERATE_INSECURE=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.insecure Allow)
fi
-ROUTES_AUTOGENERATE_ENABLED=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true)
-ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED)
+ROUTES_AUTOGENERATE_ENABLED=$(set -o pipefail; cat .lagoon.yml | shyaml get-value routes.autogenerate.enabled true | tr '[:upper:]' '[:lower:]')
+ROUTES_AUTOGENERATE_ALLOW_PRS=$(cat .lagoon.yml | shyaml get-value routes.autogenerate.allowPullrequests $ROUTES_AUTOGENERATE_ENABLED | tr '[:upper:]' '[:lower:]')
if [[ "$TYPE" == "pullrequest" && "$ROUTES_AUTOGENERATE_ALLOW_PRS" == "true" ]]; then
ROUTES_AUTOGENERATE_ENABLED=true
fi
## fail silently if the key autogenerateRoutes doesn't exist and default to whatever ROUTES_AUTOGENERATE_ENABLED is set to
-ROUTES_AUTOGENERATE_BRANCH=$(cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED)
-if [[ "$ROUTES_AUTOGENERATE_BRANCH" =~ [Tt]rue ]]; then
+ROUTES_AUTOGENERATE_BRANCH=$(set -o pipefail; cat .lagoon.yml | shyaml -q get-value environments.${BRANCH//./\\.}.autogenerateRoutes $ROUTES_AUTOGENERATE_ENABLED | tr '[:upper:]' '[:lower:]')
+if [[ "$ROUTES_AUTOGENERATE_BRANCH" == "true" ]]; then
ROUTES_AUTOGENERATE_ENABLED=true
fi
@@ -513,20 +592,37 @@ do
SERVICEBROKERS+=("${SERVICE_NAME}:${SERVICE_TYPE}")
fi
- # If we have a dbaas consumer, create it
OPENSHIFT_SERVICES_TEMPLATE="/oc-build-deploy/openshift-templates/${SERVICE_TYPE}/consumer.yml"
if [ -f $OPENSHIFT_SERVICES_TEMPLATE ]; then
- OPENSHIFT_TEMPLATE=$OPENSHIFT_SERVICES_TEMPLATE
- OPERATOR_ENVIRONMENT="${MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]}"
- TEMPLATE_ADDITIONAL_PARAMETERS=()
- oc process --local -o yaml --insecure-skip-tls-verify \
- -n ${OPENSHIFT_PROJECT} \
- -f ${OPENSHIFT_TEMPLATE} \
- -p SERVICE_NAME="${SERVICE_NAME}" \
- -p SAFE_BRANCH="${SAFE_BRANCH}" \
- -p SAFE_PROJECT="${SAFE_PROJECT}" \
- -p ENVIRONMENT="${OPERATOR_ENVIRONMENT}" \
- | outputToYaml
+ EXISTING_CONSUMER_DB=""
+ # Check if we have a dbaas consumer already created
+ if [ "$SERVICE_TYPE" == "mariadb-dbaas" ]; then
+ if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mariadbconsumer/${SERVICE_NAME} 2> /dev/null; then
+ EXISTING_CONSUMER_DB=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mariadbconsumer/${SERVICE_NAME} -o json 2> /dev/null | jq -r '.spec.consumer.database')
+ fi
+ elif [ "$SERVICE_TYPE" == "mongodb-dbaas" ]; then
+ if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} 2> /dev/null; then
+ EXISTING_CONSUMER_DB=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o json 2> /dev/null | jq -r '.spec.consumer.database')
+ fi
+ elif [ "$SERVICE_TYPE" == "postgres-dbaas" ]; then
+ if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get postgresqlconsumer/${SERVICE_NAME} 2> /dev/null; then
+ EXISTING_CONSUMER_DB=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get postgresqlconsumer/${SERVICE_NAME} -o json 2> /dev/null | jq -r '.spec.consumer.database')
+ fi
+ fi
+ # If we haven't already got an existing dbaas consumer, create one
+ if [ -z "$EXISTING_CONSUMER_DB" ]; then
+ OPENSHIFT_TEMPLATE=$OPENSHIFT_SERVICES_TEMPLATE
+ OPERATOR_ENVIRONMENT="${MAP_SERVICE_NAME_TO_DBAAS_ENVIRONMENT["${SERVICE_NAME}"]}"
+ TEMPLATE_ADDITIONAL_PARAMETERS=()
+ oc process --local -o yaml --insecure-skip-tls-verify \
+ -n ${OPENSHIFT_PROJECT} \
+ -f ${OPENSHIFT_TEMPLATE} \
+ -p SERVICE_NAME="${SERVICE_NAME}" \
+ -p SAFE_BRANCH="${SAFE_BRANCH}" \
+ -p SAFE_PROJECT="${SAFE_PROJECT}" \
+ -p ENVIRONMENT="${OPERATOR_ENVIRONMENT}" \
+ | outputToYaml
+ fi
SERVICEBROKERS+=("${SERVICE_NAME}:${SERVICE_TYPE}")
fi
@@ -538,11 +634,8 @@ TEMPLATE_PARAMETERS=()
### CUSTOM ROUTES FROM .lagoon.yml
##############################################
-if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
- MONITORING_ENABLED="true"
-else
- MONITORING_ENABLED="false"
-fi
+
+MONITORING_ENABLED="false" # monitoring is by default disabled, it will be enabled for the first route again
MONITORING_INTERVAL=60
ROUTES_SERVICE_COUNTER=0
@@ -561,8 +654,8 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.active.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
@@ -578,12 +671,20 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
. /oc-build-deploy/scripts/exec-openshift-create-route.sh
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -603,8 +704,8 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate true | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value production_routes.standby.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
@@ -620,12 +721,20 @@ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
. /oc-build-deploy/scripts/exec-openshift-create-route.sh
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -648,8 +757,8 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value ${PROJECT}.environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
@@ -665,12 +774,20 @@ if [ -n "$(cat .lagoon.yml | shyaml keys ${PROJECT}.environments.${BRANCH//./\\.
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
. /oc-build-deploy/scripts/exec-openshift-create-route.sh
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -687,8 +804,8 @@ else
ROUTE_DOMAIN=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER)
# Route Domains include dots, which need to be esacped via `\.` in order to use them within shyaml
ROUTE_DOMAIN_ESCAPED=$(cat .lagoon.yml | shyaml keys environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER | sed 's/\./\\./g')
- ROUTE_TLS_ACME=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true)
- ROUTE_MIGRATE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false)
+ ROUTE_TLS_ACME=$(set -o pipefail; cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.tls-acme true | tr '[:upper:]' '[:lower:]')
+ ROUTE_MIGRATE=$(set -o pipefail; cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.migrate false | tr '[:upper:]' '[:lower:]')
ROUTE_INSECURE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.insecure Redirect)
ROUTE_HSTS=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.hsts null)
MONITORING_PATH=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.routes.$ROUTES_SERVICE_COUNTER.$ROUTES_SERVICE.$ROUTE_DOMAIN_COUNTER.$ROUTE_DOMAIN_ESCAPED.monitoring-path "/")
@@ -704,12 +821,20 @@ else
# The very first found route is set as MAIN_CUSTOM_ROUTE
if [ -z "${MAIN_CUSTOM_ROUTE+x}" ]; then
MAIN_CUSTOM_ROUTE=$ROUTE_DOMAIN
+
+ # if we are in production we enabled monitoring for the main custom route
+ if [ "${ENVIRONMENT_TYPE}" == "production" ]; then
+ MONITORING_ENABLED="true"
+ fi
+
fi
ROUTE_SERVICE=$ROUTES_SERVICE
. /oc-build-deploy/scripts/exec-openshift-create-route.sh
+ MONITORING_ENABLED="false" # disabling a possible enabled monitoring again
+
let ROUTE_DOMAIN_COUNTER=ROUTE_DOMAIN_COUNTER+1
done
@@ -729,16 +854,62 @@ if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get schedules.backup.ap
TEMPLATE_PARAMETERS=()
+ # Check for custom baas bucket name
+ if [ ! -z "$LAGOON_PROJECT_VARIABLES" ]; then
+ BAAS_BUCKET_NAME=$(echo $LAGOON_PROJECT_VARIABLES | jq -r '.[] | select(.name == "LAGOON_BAAS_BUCKET_NAME") | "\(.value)"')
+ fi
+ if [ -z $BAAS_BUCKET_NAME ]; then
+ BAAS_BUCKET_NAME=baas-${SAFE_PROJECT}
+ fi
+ TEMPLATE_PARAMETERS+=(-p BAAS_BUCKET_NAME="${BAAS_BUCKET_NAME}")
+
+ # Pull in .lagoon.yml variables
+ PRODUCTION_MONTHLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.monthly "")
+ PRODUCTION_WEEKLY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.weekly "")
+ PRODUCTION_DAILY_BACKUP_RETENTION=$(cat .lagoon.yml | shyaml get-value backup-retention.production.daily "")
+
+ # Pull in environment type (development/production)
+ TEMPLATE_PARAMETERS+=(-p ENVIRONMENT_TYPE="${ENVIRONMENT_TYPE}")
+
+ # Set template parameters for retention values (prefer .lagoon.yml values over supplied defaults after ensuring they are valid integers via "-eq" comparison)
+ if [ ! -z $PRODUCTION_MONTHLY_BACKUP_RETENTION ] && [ "$PRODUCTION_MONTHLY_BACKUP_RETENTION" -eq "$PRODUCTION_MONTHLY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ TEMPLATE_PARAMETERS+=(-p MONTHLY_BACKUP_RETENTION="${PRODUCTION_MONTHLY_BACKUP_RETENTION}")
+ else
+ TEMPLATE_PARAMETERS+=(-p MONTHLY_BACKUP_RETENTION="${MONTHLY_BACKUP_DEFAULT_RETENTION}")
+ fi
+ if [ ! -z $PRODUCTION_WEEKLY_BACKUP_RETENTION ] && [ "$PRODUCTION_WEEKLY_BACKUP_RETENTION" -eq "$PRODUCTION_WEEKLY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ TEMPLATE_PARAMETERS+=(-p WEEKLY_BACKUP_RETENTION="${PRODUCTION_WEEKLY_BACKUP_RETENTION}")
+ else
+ TEMPLATE_PARAMETERS+=(-p WEEKLY_BACKUP_RETENTION="${WEEKLY_BACKUP_DEFAULT_RETENTION}")
+ fi
+ if [ ! -z $PRODUCTION_DAILY_BACKUP_RETENTION ] && [ "$PRODUCTION_DAILY_BACKUP_RETENTION" -eq "$PRODUCTION_DAILY_BACKUP_RETENTION" ] && [ $ENVIRONMENT_TYPE = 'production' ]; then
+ TEMPLATE_PARAMETERS+=(-p DAILY_BACKUP_RETENTION="${PRODUCTION_DAILY_BACKUP_RETENTION}")
+ else
+ TEMPLATE_PARAMETERS+=(-p DAILY_BACKUP_RETENTION="${DAILY_BACKUP_DEFAULT_RETENTION}")
+ fi
+
# Run Backups every day at 2200-0200
BACKUP_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(22-2) * * *")
TEMPLATE_PARAMETERS+=(-p BACKUP_SCHEDULE="${BACKUP_SCHEDULE}")
- # Run Checks on Sunday at 0300-0600
- CHECK_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 0")
+ # Checks
+ if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then
+ # Let the controller deduplicate checks (will run weekly at a random time throughout the week)
+ CHECK_SCHEDULE="@weekly-random"
+ else
+ # Run Checks on Sunday at 0300-0600
+ CHECK_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 0")
+ fi
TEMPLATE_PARAMETERS+=(-p CHECK_SCHEDULE="${CHECK_SCHEDULE}")
- # Run Prune on Saturday at 0300-0600
- PRUNE_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 6")
+ # Prunes
+ if [ ! -z $K8UP_WEEKLY_RANDOM_FEATURE_FLAG ] && [ $K8UP_WEEKLY_RANDOM_FEATURE_FLAG = 'enabled' ]; then
+ # Let the controller deduplicate prunes (will run weekly at a random time throughout the week)
+ PRUNE_SCHEDULE="@weekly-random"
+ else
+ # Run Prune on Saturday at 0300-0600
+ PRUNE_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 6")
+ fi
TEMPLATE_PARAMETERS+=(-p PRUNE_SCHEDULE="${PRUNE_SCHEDULE}")
OPENSHIFT_TEMPLATE="/oc-build-deploy/openshift-templates/backup-schedule.yml"
@@ -911,6 +1082,10 @@ do
set -x
;;
+ mongodb-dbaas)
+ . /oc-build-deploy/scripts/exec-openshift-mongodb-dbaas.sh
+ ;;
+
*)
echo "ServiceBroker Type ${SERVICE_TYPE} not implemented"; exit 1;
@@ -940,11 +1115,11 @@ if [[ $THIS_IS_TUG == "true" ]]; then
elif [ "$TYPE" == "pullrequest" ] || [ "$TYPE" == "branch" ]; then
- # All images that should be pulled are tagged as Images directly in OpenShift Registry
+ # All images that should be pulled are copied to the openshift and harbor registry
for IMAGE_NAME in "${!IMAGES_PULL[@]}"
do
PULL_IMAGE="${IMAGES_PULL[${IMAGE_NAME}]}"
- . /oc-build-deploy/scripts/exec-openshift-tag-dockerhub.sh
+ . /oc-build-deploy/scripts/exec-openshift-copy-to-registry.sh
done
for IMAGE_NAME in "${!IMAGES_BUILD[@]}"
@@ -1223,6 +1398,10 @@ do
echo "nothing to monitor for $SERVICE_TYPE"
+ elif [ $SERVICE_TYPE == "mongodb-dbaas" ]; then
+
+ echo "nothing to monitor for $SERVICE_TYPE"
+
elif [ ! $SERVICE_ROLLOUT_TYPE == "false" ]; then
. /oc-build-deploy/scripts/exec-monitor-deploy.sh
fi
@@ -1279,3 +1458,15 @@ if [ "${LAGOON_POSTROLLOUT_DISABLED}" != "true" ]; then
else
echo "post-rollout tasks are currently disabled LAGOON_POSTROLLOUT_DISABLED is set to true"
fi
+
+##############################################
+### PUSH the latest .lagoon.yml into lagoon-yaml configmap
+##############################################
+
+if oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get configmap lagoon-yaml &> /dev/null; then
+ # replace it
+ oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} create configmap lagoon-yaml --from-file=.lagoon.yml -o yaml --dry-run | oc replace -f -
+else
+ # create it
+ oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} create configmap lagoon-yaml --from-file=.lagoon.yml
+fi
diff --git a/images/oc-build-deploy-dind/build-deploy.sh b/images/oc-build-deploy-dind/build-deploy.sh
index 205ce69061..69546b94ad 100755
--- a/images/oc-build-deploy-dind/build-deploy.sh
+++ b/images/oc-build-deploy-dind/build-deploy.sh
@@ -48,8 +48,7 @@ docker login -u=jenkins -p="${DOCKER_REGISTRY_TOKEN}" ${OPENSHIFT_REGISTRY}
INTERNAL_REGISTRY_LOGGED_IN="false"
if [ ! -z ${INTERNAL_REGISTRY_URL} ] && [ ! -z ${INTERNAL_REGISTRY_USERNAME} ] && [ ! -z ${INTERNAL_REGISTRY_PASSWORD} ] ; then
- echo "docker login -u '${INTERNAL_REGISTRY_USERNAME}' -p '${INTERNAL_REGISTRY_PASSWORD}' ${INTERNAL_REGISTRY_URL}" | /bin/bash
- if [ "$?" -eq 0 ] ; then
+ if echo "docker login -u '${INTERNAL_REGISTRY_USERNAME}' -p '${INTERNAL_REGISTRY_PASSWORD}' ${INTERNAL_REGISTRY_URL}" | /bin/bash; then
INTERNAL_REGISTRY_LOGGED_IN="true"
fi
fi
@@ -75,8 +74,7 @@ do
fi
ADDITIONAL_YAML_COMMAND=$(cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.command apply)
- ADDITIONAL_YAML_IGNORE_ERROR=$(cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.ignore_error false)
- ADDITIONAL_YAML_IGNORE_ERROR="${ADDITIONAL_YAML_IGNORE_ERROR,,}" # convert to lowercase, as shyaml returns "True" if the yaml is set to "true"
+ ADDITIONAL_YAML_IGNORE_ERROR=$(set -o pipefail; cat .lagoon.yml | shyaml get-value additional-yaml.$ADDITIONAL_YAML.ignore_error false | tr '[:upper:]' '[:lower:]')
. /oc-build-deploy/scripts/exec-additional-yaml.sh
done
diff --git a/images/oc-build-deploy-dind/openshift-templates/backup-schedule.yml b/images/oc-build-deploy-dind/openshift-templates/backup-schedule.yml
index 330595fe5f..42cabaebf6 100644
--- a/images/oc-build-deploy-dind/openshift-templates/backup-schedule.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/backup-schedule.yml
@@ -32,13 +32,28 @@ parameters:
description: Registry where Images are pushed to
required: true
- name: BACKUP_SCHEDULE
- description: Schedule of the Backup in Cron format
+ description: Schedule of the Backup in Cron format or auto schedule format
required: true
- name: CHECK_SCHEDULE
- description: Schedule of the Backup Check in Cron format
+ description: Schedule of the Backup Check in Cron format or auto schedule format
required: true
- name: PRUNE_SCHEDULE
- description: Schedule of the Backup Prune in Cron format
+ description: Schedule of the Backup Prune in Cron format or auto schedule format
+ required: true
+ - name: BAAS_BUCKET_NAME
+ description: Which bucket the backups for this project should go to
+ required: true
+ - name: MONTHLY_BACKUP_RETENTION
+ description: How many monthly backups should be retained after pruning
+ value: '1'
+ - name: WEEKLY_BACKUP_RETENTION
+ description: How many weekly backups should be retained after pruning
+ value: '4'
+ - name: DAILY_BACKUP_RETENTION
+ description: How many daily backups should be retained after pruning
+ value: '7'
+ - name: ENVIRONMENT_TYPE
+ description: What type of environment this is (production/development)
required: true
objects:
- apiVersion: backup.appuio.ch/v1alpha1
@@ -55,13 +70,14 @@ objects:
key: repo-pw
name: baas-repo-pw
s3:
- bucket: 'baas-${SAFE_PROJECT}'
+ bucket: '${BAAS_BUCKET_NAME}'
backup:
schedule: '${BACKUP_SCHEDULE}'
check:
schedule: '${CHECK_SCHEDULE}'
prune:
retention:
- keepDaily: 7
- keepWeekly: 6
+ keepDaily: ${{DAILY_BACKUP_RETENTION}}
+ keepWeekly: ${{WEEKLY_BACKUP_RETENTION}}
+ keepMonthly: ${{MONTHLY_BACKUP_RETENTION}}
schedule: '${PRUNE_SCHEDULE}'
diff --git a/images/oc-build-deploy-dind/openshift-templates/configmap.yml b/images/oc-build-deploy-dind/openshift-templates/configmap.yml
index b7c060e664..ba591d01c5 100644
--- a/images/oc-build-deploy-dind/openshift-templates/configmap.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/configmap.yml
@@ -52,6 +52,7 @@ objects:
LAGOON_GIT_BRANCH: ${BRANCH}
LAGOON_SAFE_PROJECT: ${SAFE_PROJECT}
LAGOON_PROJECT: ${PROJECT}
+ LAGOON_ENVIRONMENT: ${SAFE_BRANCH}
LAGOON_ENVIRONMENT_TYPE: ${ENVIRONMENT_TYPE}
LAGOON_ROUTE: ${ROUTE}
LAGOON_ROUTES: ${ROUTES}
diff --git a/images/oc-build-deploy-dind/openshift-templates/elasticsearch-cluster/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/elasticsearch-cluster/prebackuppod.yml
index a62cb11699..6c1ae3770c 100644
--- a/images/oc-build-deploy-dind/openshift-templates/elasticsearch-cluster/prebackuppod.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/elasticsearch-cluster/prebackuppod.yml
@@ -78,7 +78,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
@@ -126,7 +126,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
@@ -174,11 +174,11 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
- image: alpine
+ image: imagecache.amazeeio.cloud/library/alpine
imagePullPolicy: Always
name: ${SERVICE_NAME}-2-prebackuppod
volumeMounts:
diff --git a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml
index 09ccf0602b..48ce969aea 100644
--- a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml
@@ -73,6 +73,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "tar -cf - -C /usr/share/elasticsearch/data ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
diff --git a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/prebackuppod.yml
deleted file mode 100644
index 04373a49b2..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-elasticsearch
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "tar -cf - -C /usr/share/elasticsearch/data ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /usr/share/elasticsearch/data
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/mariadb-dbaas/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/mariadb-dbaas/prebackuppod.yml
index a9eaaac3c7..a055536791 100644
--- a/images/oc-build-deploy-dind/openshift-templates/mariadb-dbaas/prebackuppod.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/mariadb-dbaas/prebackuppod.yml
@@ -39,7 +39,6 @@ parameters:
value: "Recreate"
- name: SERVICE_IMAGE
description: Pullable image of service
- required: true
- name: CRONJOBS
description: Oneliner of Cronjobs
value: ""
@@ -85,7 +84,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
env:
- name: BACKUP_DB_HOST
valueFrom:
@@ -107,6 +106,6 @@ objects:
configMapKeyRef:
key: ${SERVICE_NAME_UPPERCASE}_DATABASE
name: lagoon-env
- image: amazeeio/alpine-mysql-client
+ image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: ${SERVICE_NAME}-prebackuppod
diff --git a/images/oc-build-deploy-dind/openshift-templates/mariadb-shared/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/mariadb-shared/prebackuppod.yml
index a9eaaac3c7..a055536791 100644
--- a/images/oc-build-deploy-dind/openshift-templates/mariadb-shared/prebackuppod.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/mariadb-shared/prebackuppod.yml
@@ -39,7 +39,6 @@ parameters:
value: "Recreate"
- name: SERVICE_IMAGE
description: Pullable image of service
- required: true
- name: CRONJOBS
description: Oneliner of Cronjobs
value: ""
@@ -85,7 +84,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
env:
- name: BACKUP_DB_HOST
valueFrom:
@@ -107,6 +106,6 @@ objects:
configMapKeyRef:
key: ${SERVICE_NAME_UPPERCASE}_DATABASE
name: lagoon-env
- image: amazeeio/alpine-mysql-client
+ image: imagecache.amazeeio.cloud/amazeeio/alpine-mysql-client
imagePullPolicy: Always
name: ${SERVICE_NAME}-prebackuppod
diff --git a/images/oc-build-deploy-dind/openshift-templates/mongo/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/mongo/deployment.yml
index eca9728e79..11039e32c0 100644
--- a/images/oc-build-deploy-dind/openshift-templates/mongo/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/mongo/deployment.yml
@@ -71,6 +71,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /data/db ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
volumes:
diff --git a/images/oc-build-deploy-dind/openshift-templates/mongo/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/mongo/prebackuppod.yml
deleted file mode 100644
index 54098f132a..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/mongo/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-mongo
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /data/db ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /data/db
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/mongodb-dbaas/consumer.yml b/images/oc-build-deploy-dind/openshift-templates/mongodb-dbaas/consumer.yml
new file mode 100644
index 0000000000..81412ca7b6
--- /dev/null
+++ b/images/oc-build-deploy-dind/openshift-templates/mongodb-dbaas/consumer.yml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Template
+metadata:
+ creationTimestamp: null
+ name: lagoon-openshift-template-mongodb-dbaas-crd
+parameters:
+ - name: SERVICE_NAME
+ description: Name of this service
+ required: true
+ - name: SAFE_BRANCH
+ description: Which branch this belongs to, special chars replaced with dashes
+ required: true
+ - name: SAFE_PROJECT
+ description: Which project this belongs to, special chars replaced with dashes
+ required: true
+ - name: ENVIRONMENT
+ description: Environment or type of dbaas to choose
+ required: true
+objects:
+- apiVersion: mongodb.amazee.io/v1
+ kind: MongoDBConsumer
+ metadata:
+ name: ${SERVICE_NAME}
+ labels:
+ service: ${SERVICE_NAME}
+ branch: ${SAFE_BRANCH}
+ project: ${SAFE_PROJECT}
+ spec:
+ environment: ${ENVIRONMENT}
\ No newline at end of file
diff --git a/images/oc-build-deploy-dind/openshift-templates/postgres/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/postgres/deployment.yml
index 32956a5f38..1cf12877f8 100644
--- a/images/oc-build-deploy-dind/openshift-templates/postgres/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/postgres/deployment.yml
@@ -70,6 +70,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/lib/postgresql/data ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
volumes:
diff --git a/images/oc-build-deploy-dind/openshift-templates/postgres/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/postgres/prebackuppod.yml
deleted file mode 100644
index dd24383002..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/postgres/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-postgres
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/lib/postgresql/data ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /var/lib/postgresql/data
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/rabbitmq-cluster/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/rabbitmq-cluster/prebackuppod.yml
index 097007ea16..847f750c99 100644
--- a/images/oc-build-deploy-dind/openshift-templates/rabbitmq-cluster/prebackuppod.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/rabbitmq-cluster/prebackuppod.yml
@@ -78,7 +78,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
@@ -126,7 +126,7 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
@@ -174,11 +174,11 @@ objects:
containers:
- args:
- sleep
- - '3600'
+ - infinity
envFrom:
- configMapRef:
name: lagoon-env
- image: alpine
+ image: imagecache.amazeeio.cloud/library/alpine
imagePullPolicy: Always
name: ${SERVICE_NAME}-2-prebackuppod
volumeMounts:
diff --git a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
index 58e8ea4b4e..3799ea8605 100644
--- a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/deployment.yml
@@ -69,6 +69,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/lib/rabbitmq ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
priorityClassName: lagoon-priority-${ENVIRONMENT_TYPE}
diff --git a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/rabbitmq/prebackuppod.yml
deleted file mode 100644
index 316befd9d8..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/rabbitmq/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-rabbitmq
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/lib/rabbitmq ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /var/lib/rabbitmq
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/redis-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/redis-persistent/deployment.yml
index 35aca8c5d1..86c322ab10 100644
--- a/images/oc-build-deploy-dind/openshift-templates/redis-persistent/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/redis-persistent/deployment.yml
@@ -79,6 +79,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /data ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
volumes:
diff --git a/images/oc-build-deploy-dind/openshift-templates/redis-persistent/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/redis-persistent/prebackuppod.yml
deleted file mode 100644
index f00bcb283a..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/redis-persistent/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-redis-persistent
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /data ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /data
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/solr/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/solr/deployment.yml
index 13829e7541..8380632646 100644
--- a/images/oc-build-deploy-dind/openshift-templates/solr/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/solr/deployment.yml
@@ -73,6 +73,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/solr ."
+ backup.appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
volumes:
diff --git a/images/oc-build-deploy-dind/openshift-templates/solr/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/solr/prebackuppod.yml
deleted file mode 100644
index 83530208f6..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/solr/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-solr
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/solr ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /var/solr
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/deployment.yml
index addfee86bc..b025911eec 100644
--- a/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/deployment.yml
+++ b/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/deployment.yml
@@ -70,6 +70,8 @@ objects:
branch: ${SAFE_BRANCH}
project: ${SAFE_PROJECT}
annotations:
+ appuio.ch/backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/cache/varnish ."
+ appuio.ch/fileExtension: .${SERVICE_NAME}.tar
lagoon.sh/configMapSha: ${CONFIG_MAP_SHA}
spec:
tolerations:
diff --git a/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/prebackuppod.yml b/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/prebackuppod.yml
deleted file mode 100644
index f24059bc59..0000000000
--- a/images/oc-build-deploy-dind/openshift-templates/varnish-persistent/prebackuppod.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: lagoon-openshift-template-prebackuppod-varnish-persistent
-parameters:
- - name: SERVICE_NAME
- description: Name of this service
- required: true
- - name: SERVICE_NAME_UPPERCASE
- description: Name of this service in uppercase
- required: true
- - name: SAFE_BRANCH
- description: Which branch this belongs to, special chars replaced with dashes
- required: true
- - name: SAFE_PROJECT
- description: Which project this belongs to, special chars replaced with dashes
- required: true
- - name: BRANCH
- description: Which branch this belongs to, original value
- required: true
- - name: PROJECT
- description: Which project this belongs to, original value
- required: true
- - name: LAGOON_GIT_SHA
- description: git hash sha of the current deployment
- required: true
- - name: SERVICE_ROUTER_URL
- description: URL of the Router for this service
- value: ""
- - name: OPENSHIFT_PROJECT
- description: Name of the Project that this service is in
- required: true
- - name: REGISTRY
- description: Registry where Images are pushed to
- required: true
- - name: DEPLOYMENT_STRATEGY
- description: Strategy of Deploymentconfig
- value: "Recreate"
- - name: SERVICE_IMAGE
- description: Pullable image of service
- required: true
- - name: CRONJOBS
- description: Oneliner of Cronjobs
- value: ""
-objects:
-- apiVersion: backup.appuio.ch/v1alpha1
- kind: PreBackupPod
- metadata:
- name: ${SERVICE_NAME}-prebackuppod
- labels:
- service: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- spec:
- backupCommand: /bin/sh -c "/bin/busybox tar -cf - -C /var/cache/varnish ."
- fileExtension: .${SERVICE_NAME}.tar
- pod:
- metadata:
- labels:
- prebackuppod: ${SERVICE_NAME}
- branch: ${SAFE_BRANCH}
- project: ${SAFE_PROJECT}
- parent: ${SERVICE_NAME}
- spec:
- affinity:
- podAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: service
- operator: In
- values:
- - ${SERVICE_NAME}
- topologyKey: kubernetes.io/hostname
- weight: 100
- containers:
- - args:
- - sleep
- - '3600'
- envFrom:
- - configMapRef:
- name: lagoon-env
- image: alpine
- imagePullPolicy: Always
- name: ${SERVICE_NAME}-prebackuppod
- volumeMounts:
- - mountPath: /var/cache/varnish
- name: ${SERVICE_NAME}
- volumes:
- - name: ${SERVICE_NAME}
- persistentVolumeClaim:
- claimName: ${SERVICE_NAME}
diff --git a/images/oc-build-deploy-dind/scripts/exec-openshift-copy-to-registry.sh b/images/oc-build-deploy-dind/scripts/exec-openshift-copy-to-registry.sh
new file mode 100644
index 0000000000..a04bb22596
--- /dev/null
+++ b/images/oc-build-deploy-dind/scripts/exec-openshift-copy-to-registry.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+skopeo copy --dest-tls-verify=false docker://${IMAGECACHE_REGISTRY}/${PULL_IMAGE} docker://${OPENSHIFT_REGISTRY}/${OPENSHIFT_PROJECT}/${IMAGE_NAME}:${IMAGE_TAG:-latest}
+
+if [ "${INTERNAL_REGISTRY_LOGGED_IN}" = "true" ] ; then
+ skopeo copy --dest-tls-verify=false docker://${IMAGECACHE_REGISTRY}/${PULL_IMAGE} docker://${INTERNAL_REGISTRY_URL}/${PROJECT}/${SAFE_BRANCH}/${IMAGE_NAME}:${IMAGE_TAG:-latest} || true
+fi
diff --git a/images/oc-build-deploy-dind/scripts/exec-openshift-mongodb-dbaas.sh b/images/oc-build-deploy-dind/scripts/exec-openshift-mongodb-dbaas.sh
new file mode 100644
index 0000000000..5a36a4c12e
--- /dev/null
+++ b/images/oc-build-deploy-dind/scripts/exec-openshift-mongodb-dbaas.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# The operator can sometimes take a bit, wait until the details are available
+# We added a timeout of 10 minutes (120 retries) before exit
+OPERATOR_COUNTER=1
+OPERATOR_TIMEOUT=180
+# use the secret name from the consumer to prevent credential clash
+until oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database
+do
+if [ $OPERATOR_COUNTER -lt $OPERATOR_TIMEOUT ]; then
+ let SERVICE_BROKER_COUNTER=SERVICE_BROKER_COUNTER+1
+ echo "Service for ${SERVICE_NAME} not available yet, waiting for 5 secs"
+ sleep 5
+else
+ echo "Timeout of $OPERATOR_TIMEOUT for ${SERVICE_NAME} creation reached"
+ exit 1
+fi
+done
+set +x
+# Grab the details from the consumer spec
+DB_HOST=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.services.primary)
+DB_USER=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.username)
+DB_PASSWORD=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.password)
+DB_NAME=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.consumer.database)
+DB_PORT=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.port)
+DB_AUTHSOURCE=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.source)
+DB_AUTHMECHANISM=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.mechanism)
+DB_AUTHTLS=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get mongodbconsumer/${SERVICE_NAME} -o yaml | shyaml get-value spec.provider.auth.tls)
+
+# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker
+oc patch --insecure-skip-tls-verify \
+ -n ${OPENSHIFT_PROJECT} \
+ configmap lagoon-env \
+ -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\", \"${SERVICE_NAME_UPPERCASE}_AUTHSOURCE\":\"${DB_AUTHSOURCE}\", \"${SERVICE_NAME_UPPERCASE}_AUTHMECHANISM\":\"${DB_AUTHMECHANISM}\", \"${SERVICE_NAME_UPPERCASE}_AUTHTLS\":\"${DB_AUTHTLS}\" }}"
+
+set -x
\ No newline at end of file
diff --git a/images/oc-build-deploy-dind/scripts/exec-openshift-tag-dockerhub.sh b/images/oc-build-deploy-dind/scripts/exec-openshift-tag-dockerhub.sh
deleted file mode 100644
index 62abcd80ac..0000000000
--- a/images/oc-build-deploy-dind/scripts/exec-openshift-tag-dockerhub.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} tag --reference-policy=local --source=docker ${PULL_IMAGE} ${OPENSHIFT_PROJECT}/${IMAGE_NAME}:latest
diff --git a/images/oc/Dockerfile b/images/oc/Dockerfile
index a7c5e72b5a..169d13b549 100644
--- a/images/oc/Dockerfile
+++ b/images/oc/Dockerfile
@@ -1,12 +1,12 @@
-ARG ALPINE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM golang:1.13-alpine${ALPINE_VERSION} as golang
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
+FROM golang:1.13-alpine3.12 as golang
RUN apk add --no-cache git
RUN go get github.com/a8m/envsubst/cmd/envsubst
-FROM docker:19.03.10
+FROM docker:19.03.14
LABEL maintainer="amazee.io"
ENV LAGOON=oc
@@ -41,7 +41,7 @@ ENV OC_VERSION=v3.11.0 \
# To run the openshift client library `oc` we need glibc, install that first. Copied from https://github.com/jeanblanchard/docker-alpine-glibc/blob/master/Dockerfile
RUN apk add -U --repository http://dl-cdn.alpinelinux.org/alpine/edge/testing aufs-util && \
- apk add --update openssl curl jq parallel && \
+ apk add --update openssl curl jq parallel skopeo && \
curl -Lo /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \
curl -Lo glibc.apk "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-${GLIBC_VERSION}.apk" && \
curl -Lo glibc-bin.apk "https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_VERSION}/glibc-bin-${GLIBC_VERSION}.apk" && \
diff --git a/images/php/cli-drupal/Dockerfile b/images/php/cli-drupal/Dockerfile
deleted file mode 100644
index a228a40adb..0000000000
--- a/images/php/cli-drupal/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-ARG PHP_VERSION
-ARG PHP_IMAGE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/php:${PHP_VERSION}-cli
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=cli-drupal
-
-# Defining Versions - https://github.com/hechoendrupal/drupal-console-launcher/releases
-ENV DRUPAL_CONSOLE_LAUNCHER_VERSION=1.9.4 \
- DRUPAL_CONSOLE_LAUNCHER_SHA=b7759279668caf915b8e9f3352e88f18e4f20659 \
- DRUSH_VERSION=8.3.5 \
- DRUSH_LAUNCHER_VERSION=0.6.0 \
- DRUSH_LAUNCHER_FALLBACK=/opt/drush8/vendor/bin/drush
-
-RUN curl -sSLo /usr/local/bin/drupal "https://github.com/hechoendrupal/drupal-console-launcher/releases/download/${DRUPAL_CONSOLE_LAUNCHER_VERSION}/drupal.phar" \
- && echo "${DRUPAL_CONSOLE_LAUNCHER_SHA} /usr/local/bin/drupal" | sha1sum \
- && chmod +x /usr/local/bin/drupal \
- && mkdir -p /opt/drush8 \
- && php /usr/local/bin/composer init -n -d /opt/drush8 --require=drush/drush:${DRUSH_VERSION} \
- && php -d memory_limit=-1 /usr/local/bin/composer update -n -d /opt/drush8 \
- && curl -sSLo /usr/local/bin/drush "https://github.com/drush-ops/drush-launcher/releases/download/${DRUSH_LAUNCHER_VERSION}/drush.phar" \
- && chmod +x /usr/local/bin/drush \
- && mkdir -p /home/.drush
-
-COPY drushrc.php drush.yml /home/.drush/
-
-RUN fix-permissions /home/.drush
diff --git a/images/php/cli-drupal/drush.yml b/images/php/cli-drupal/drush.yml
deleted file mode 100644
index 4bab95e5ef..0000000000
--- a/images/php/cli-drupal/drush.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# Lagoon global drush.yml file
-# This file tells Drush 9 about the lagoon environment
-
-options:
- root: '/app/${env.WEBROOT}'
- uri: '${env.LAGOON_ROUTE}'
\ No newline at end of file
diff --git a/images/php/cli-drupal/drushrc.php b/images/php/cli-drupal/drushrc.php
deleted file mode 100644
index ee9aad89e4..0000000000
--- a/images/php/cli-drupal/drushrc.php
+++ /dev/null
@@ -1,23 +0,0 @@
- TRUE, 'no-perms' => TRUE, 'no-group' => TRUE, 'no-owner' => TRUE, 'chmod' => 'ugo=rwX');
\ No newline at end of file
diff --git a/images/php/cli/05-ssh-key.sh b/images/php/cli/05-ssh-key.sh
deleted file mode 100755
index cfab3b42f3..0000000000
--- a/images/php/cli/05-ssh-key.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-set -e
-
-# If we there is an ssh key injected via lagoon and kubernetes, we use that
-if [ -f /var/run/secrets/lagoon/sshkey/ssh-privatekey ]; then
- cp -f /var/run/secrets/lagoon/sshkey/ssh-privatekey /home/.ssh/key
-# If there is an env variable SSH_PRIVATE_KEY we use that
-elif [ ! -z "$SSH_PRIVATE_KEY" ]; then
- echo -e "$SSH_PRIVATE_KEY" > /home/.ssh/key
-# If there is an env variable LAGOON_SSH_PRIVATE_KEY we use that
-elif [ ! -z "$LAGOON_SSH_PRIVATE_KEY" ]; then
- echo -e "$LAGOON_SSH_PRIVATE_KEY" > /home/.ssh/key
-fi
-
-if [ -f /home/.ssh/key ]; then
- # add a new line to the key. OpenSSH is very picky that keys are always end with a newline
- echo >> /home/.ssh/key
- # Fix permissions of SSH key
- chmod 600 /home/.ssh/key
-fi
diff --git a/images/php/cli/10-ssh-agent.sh b/images/php/cli/10-ssh-agent.sh
deleted file mode 100644
index e75f44581e..0000000000
--- a/images/php/cli/10-ssh-agent.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-set -e
-
-# Test if pygmy or cachalot ssh-agents are mounted and symlink them as our known ssh-auth-sock file.
-# This will only be used in local development
-if [ -S /tmp/amazeeio_ssh-agent/socket ]; then
- ln -sf /tmp/amazeeio_ssh-agent/socket $SSH_AUTH_SOCK
-# Use the existing key instead (which was generated from 05-ssh-key.sh)
-elif [ -f /home/.ssh/key ]; then
- rm -f $SSH_AUTH_SOCK
- eval $(ssh-agent -a $SSH_AUTH_SOCK)
- ssh-add /home/.ssh/key
-fi
diff --git a/images/php/cli/55-cli-helpers.sh b/images/php/cli/55-cli-helpers.sh
deleted file mode 100644
index ea7133ce41..0000000000
--- a/images/php/cli/55-cli-helpers.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-dsql () {
- drush sql-sync $1 @self
-}
-
-dfiles () {
- drush rsync $1:%files @self:%files
-}
diff --git a/images/php/cli/61-php-xdebug-cli-env.sh b/images/php/cli/61-php-xdebug-cli-env.sh
deleted file mode 100755
index 3db4afa3f4..0000000000
--- a/images/php/cli/61-php-xdebug-cli-env.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# Only if XDEBUG_ENABLE is not empty
-if [ ! -z ${XDEBUG_ENABLE} ]; then
- # XDEBUG_CONFIG is used by xdebug to decide if an xdebug session should be started in the CLI or not.
- # The content doesn't really matter it just needs to be set, the actual connection details are loaded from /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
- export XDEBUG_CONFIG="idekey=lagoon"
-
- # PHP_IDE_CONFIG is used by PhpStorm and should be the URL of the project, we use the `LAGOON_ROUTE` for it (if it exists)
- if [ ${LAGOON_ROUTE+x} ]; then
- SERVERNAME=$(echo $LAGOON_ROUTE | sed 's/https\?:\/\///')
- else
- SERVERNAME="lagoon"
- fi
- export PHP_IDE_CONFIG="serverName=${SERVERNAME}"
-fi
-
diff --git a/images/php/cli/80-shell-timeout.sh b/images/php/cli/80-shell-timeout.sh
deleted file mode 100644
index fdc02f389e..0000000000
--- a/images/php/cli/80-shell-timeout.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# If we are running within kubernetes, set a shell timeout of 10mins.
-# We do that so old shells are closed and we can idle the cli container
-if [ $KUBERNETES_PORT ]; then
- TMOUT=600
-fi
\ No newline at end of file
diff --git a/images/php/cli/90-composer-path.sh b/images/php/cli/90-composer-path.sh
deleted file mode 100644
index f18e7d1bf5..0000000000
--- a/images/php/cli/90-composer-path.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-add_to_PATH () {
- for d; do
- case ":$PATH:" in
- *":$d:"*) :;;
- *) PATH=$d:$PATH;;
- esac
- done
-}
-
-add_to_PATH /home/.composer/vendor/bin
diff --git a/images/php/cli/90-mariadb-envplate.sh b/images/php/cli/90-mariadb-envplate.sh
deleted file mode 100644
index 5f8dbe0a45..0000000000
--- a/images/php/cli/90-mariadb-envplate.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -eo pipefail
-
-/bin/ep /etc/my.cnf.d/*
diff --git a/images/php/cli/Dockerfile b/images/php/cli/Dockerfile
deleted file mode 100644
index cc6aa703f9..0000000000
--- a/images/php/cli/Dockerfile
+++ /dev/null
@@ -1,82 +0,0 @@
-ARG PHP_VERSION
-ARG IMAGE_REPO
-ARG PHP_IMAGE_VERSION
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM ${IMAGE_REPO:-lagoon}/php:${PHP_VERSION}-fpm
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=cli
-
-# Defining Versions - Composer
-# @see https://getcomposer.org/download/
-ENV COMPOSER_VERSION=1.10.9 \
- COMPOSER_HASH_SHA256=70d6b9c3e0774b398a372dcb7f89dfe22fc25884e6e09ebf277286dd64cfaf35
-
-COPY --from=commons /bin/entrypoint-readiness /bin/
-
-RUN apk add --no-cache git \
- unzip \
- gzip \
- bash \
- tini \
- openssh-client \
- rsync \
- patch \
- procps \
- coreutils \
- mariadb-client \
- postgresql-client \
- mongodb-tools \
- openssh-sftp-server \
- findutils \
- nodejs-current \
- nodejs-npm \
- yarn \
- && ln -s /usr/lib/ssh/sftp-server /usr/local/bin/sftp-server \
- && rm -rf /var/cache/apk/* \
- && curl -sSLo /usr/local/bin/composer https://github.com/composer/composer/releases/download/${COMPOSER_VERSION}/composer.phar \
- && echo "$COMPOSER_HASH_SHA256 /usr/local/bin/composer" | sha256sum \
- && chmod +x /usr/local/bin/composer \
- && php -d memory_limit=-1 /usr/local/bin/composer global require hirak/prestissimo \
- && mkdir -p /home/.ssh \
- && fix-permissions /home/
-
-# Adding Composer vendor bin path to $PATH.
-ENV PATH="/home/.composer/vendor/bin:${PATH}"
-# We not only use "export $PATH" as this could be overwritten again
-# like it happens in /etc/profile of alpine Images.
-COPY 90-composer-path.sh /lagoon/entrypoints/
-
-# Remove warning about running as root in composer
-ENV COMPOSER_ALLOW_SUPERUSER=1
-
-# Making sure the path is not only added during entrypoint, but also when creating a new shell
-RUN echo "source /lagoon/entrypoints/90-composer-path.sh" >> /home/.bashrc
-
-# Make sure shells are not running forever
-COPY 80-shell-timeout.sh /lagoon/entrypoints/
-RUN echo "source /lagoon/entrypoints/80-shell-timeout.sh" >> /home/.bashrc
-
-# Make sure xdebug is automatically enabled also for cli scripts
-COPY 61-php-xdebug-cli-env.sh /lagoon/entrypoints/
-RUN echo "source /lagoon/entrypoints/61-php-xdebug-cli-env.sh" >> /home/.bashrc
-
-# Copy mariadb-client configuration.
-COPY 90-mariadb-envplate.sh /lagoon/entrypoints/
-COPY mariadb-client.cnf /etc/my.cnf.d/
-RUN fix-permissions /etc/my.cnf.d/
-
-# helper functions
-COPY 55-cli-helpers.sh /lagoon/entrypoints/
-RUN echo "source /lagoon/entrypoints/55-cli-helpers.sh" >> /home/.bashrc
-
-# SSH Key and Agent Setup
-COPY 05-ssh-key.sh /lagoon/entrypoints/
-COPY 10-ssh-agent.sh /lagoon/entrypoints/
-COPY ssh_config /etc/ssh/ssh_config
-COPY id_ed25519_lagoon_cli.key /home/.ssh/lagoon_cli.key
-RUN chmod 400 /home/.ssh/lagoon_cli.key
-ENV SSH_AUTH_SOCK=/tmp/ssh-agent
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["/bin/docker-sleep"]
diff --git a/images/php/cli/README.md b/images/php/cli/README.md
deleted file mode 100644
index 105a752f06..0000000000
--- a/images/php/cli/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Alpine PHP CLI Image
-
-## Use another Node.js Version
-
-By default this Image ships with the current Node.js Version (v9 at time of writing this). If you need another Version you can remove the current version and install the one of your choice.
-
-Add these commands as parts of your customized Dockerfile within `RUN` commands.
-
-#### Remove current version (needed for installing any other Version)
-
- RUN apk del --no-cache nodejs-current yarn --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/ --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/
-
-#### Install Node.js Version 6
-
- RUN apk add --no-cache nodejs yarn --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/
-
-#### Install Node.js Version 8
-
- RUN apk add --no-cache nodejs yarn --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ --repository http://dl-cdn.alpinelinux.org/alpine/edge/main/
diff --git a/images/php/cli/id_ed25519_lagoon_cli.key b/images/php/cli/id_ed25519_lagoon_cli.key
deleted file mode 100644
index 3e4a43f9e5..0000000000
--- a/images/php/cli/id_ed25519_lagoon_cli.key
+++ /dev/null
@@ -1,7 +0,0 @@
------BEGIN OPENSSH PRIVATE KEY-----
-b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
-QyNTUxOQAAACCFGJJXzBNtfjlGlt7lsXsiM0WDSGVcRSkcF22WS0ErMgAAAJB8dgK3fHYC
-twAAAAtzc2gtZWQyNTUxOQAAACCFGJJXzBNtfjlGlt7lsXsiM0WDSGVcRSkcF22WS0ErMg
-AAAEC0XD5LwsT4v+f/DsslBBOZT3kW17Br+jdXvfoReRfjN4UYklfME21+OUaW3uWxeyIz
-RYNIZVxFKRwXbZZLQSsyAAAACmxhZ29vbi1jbGkBAgM=
------END OPENSSH PRIVATE KEY-----
diff --git a/images/php/cli/mariadb-client.cnf b/images/php/cli/mariadb-client.cnf
deleted file mode 100644
index ddae2ddd2e..0000000000
--- a/images/php/cli/mariadb-client.cnf
+++ /dev/null
@@ -1,2 +0,0 @@
-[client]
-max_allowed_packet = ${MARIADB_MAX_ALLOWED_PACKET:-64M}
diff --git a/images/php/cli/ssh_config b/images/php/cli/ssh_config
deleted file mode 100644
index 0927994629..0000000000
--- a/images/php/cli/ssh_config
+++ /dev/null
@@ -1,7 +0,0 @@
-Host *
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
- IdentityFile /home/.ssh/lagoon_cli.key
- IdentityFile /home/.ssh/key
- ServerAliveInterval 60
- ServerAliveCountMax 1440
diff --git a/images/php/fpm/00-lagoon-php.ini.tpl b/images/php/fpm/00-lagoon-php.ini.tpl
deleted file mode 100644
index ba428eaf3c..0000000000
--- a/images/php/fpm/00-lagoon-php.ini.tpl
+++ /dev/null
@@ -1,17 +0,0 @@
-[PHP]
-max_execution_time = ${PHP_MAX_EXECUTION_TIME:-900}
-max_input_vars = ${PHP_MAX_INPUT_VARS:-2000}
-max_file_uploads = ${PHP_MAX_FILE_UPLOADS:-20}
-memory_limit = ${PHP_MEMORY_LIMIT:-400M}
-display_errors = ${PHP_DISPLAY_ERRORS:-Off}
-display_startup_errors = ${PHP_DISPLAY_STARTUP_ERRORS:-Off}
-auto_prepend_file = ${PHP_AUTO_PREPEND_FILE:-none}
-auto_append_file = ${PHP_AUTO_APPEND_FILE:-none}
-error_reporting = ${PHP_ERROR_REPORTING:-E_ALL & ~E_DEPRECATED & ~E_STRICT}
-
-[APC]
-apc.shm_size = ${PHP_APC_SHM_SIZE:-32m}
-apc.enabled = ${PHP_APC_ENABLED:-1}
-
-[xdebug]
-xdebug.remote_enable = on
diff --git a/images/php/fpm/Dockerfile b/images/php/fpm/Dockerfile
deleted file mode 100644
index f2a3d3b1a7..0000000000
--- a/images/php/fpm/Dockerfile
+++ /dev/null
@@ -1,136 +0,0 @@
-ARG PHP_VERSION
-ARG PHP_IMAGE_VERSION
-ARG ALPINE_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-
-FROM composer:latest as healthcheckbuilder
-
-RUN composer create-project --no-dev amazeeio/healthz-php /healthz-php v0.0.6
-
-FROM php:${PHP_IMAGE_VERSION}-fpm-alpine${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=php
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-# Copy healthcheck files
-
-COPY --from=healthcheckbuilder /healthz-php /healthz-php
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-COPY check_fcgi /usr/sbin/
-COPY entrypoints/70-php-config.sh entrypoints/60-php-xdebug.sh entrypoints/50-ssmtp.sh entrypoints/71-php-newrelic.sh /lagoon/entrypoints/
-
-COPY php.ini /usr/local/etc/php/
-COPY 00-lagoon-php.ini.tpl /usr/local/etc/php/conf.d/
-COPY php-fpm.d/www.conf /usr/local/etc/php-fpm.d/www.conf
-COPY ssmtp.conf /etc/ssmtp/ssmtp.conf
-
-# New Relic PHP Agent.
-# @see https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/
-# @see https://docs.newrelic.com/docs/agents/php-agent/getting-started/php-agent-compatibility-requirements
-ENV NEWRELIC_VERSION=9.12.0.268
-
-RUN apk add --no-cache --repository http://dl-cdn.alpinelinux.org/alpine/v3.12/main/ 'curl>7.68' 'libcurl>7.68'
-
-RUN apk add --no-cache fcgi \
- ssmtp \
- libzip libzip-dev \
- # for gd
- libpng-dev \
- libjpeg-turbo-dev \
- # for gettext
- gettext-dev \
- # for mcrypt
- libmcrypt-dev \
- # for soap
- libxml2-dev \
- # for xsl
- libxslt-dev \
- libgcrypt-dev \
- # for webp
- libwebp-dev \
- postgresql-dev \
- # for yaml
- yaml-dev \
- # for imagemagick
- imagemagick \
- imagemagick-libs \
- imagemagick-dev \
- && apk add --no-cache --virtual .phpize-deps $PHPIZE_DEPS \
- && yes '' | pecl install -f apcu \
- && yes '' | pecl install -f xdebug \
- && yes '' | pecl install -f yaml \
- && yes '' | pecl install -f redis-4.3.0 \
- && yes '' | pecl install -f imagick \
- && docker-php-ext-enable apcu redis xdebug imagick \
- && case ${PHP_VERSION} in \
- 7.4*) \
- docker-php-ext-configure gd --with-webp --with-jpeg \
- ;; \
- *) \
- docker-php-ext-configure gd --with-webp-dir=/usr/include/ --with-jpeg-dir=/usr/include/ \
- ;; \
- esac \
- && docker-php-ext-install -j4 bcmath gd gettext pdo_mysql mysqli pdo_pgsql pgsql shmop soap sockets opcache xsl zip \
- && sed -i '1s/^/;Intentionally disabled. Enable via setting env variable XDEBUG_ENABLE to true\n;/' /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini \
- && rm -rf /var/cache/apk/* /tmp/pear/ \
- && apk del .phpize-deps \
- && echo "extension=yaml.so" > /usr/local/etc/php/conf.d/yaml.ini \
- && mkdir -p /tmp/newrelic && cd /tmp/newrelic \
- && curl -sSLO https://download.newrelic.com/php_agent/archive/${NEWRELIC_VERSION}/newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz \
- && gzip -dc newrelic-php5-${NEWRELIC_VERSION}-linux-musl.tar.gz | tar --strip-components=1 -xf - \
- && NR_INSTALL_USE_CP_NOT_LN=1 NR_INSTALL_SILENT=1 ./newrelic-install install \
- && sed -i -e "s/newrelic.appname = .*/newrelic.appname = \"\${LAGOON_PROJECT:-noproject}-\${LAGOON_GIT_SAFE_BRANCH:-nobranch}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/;newrelic.enabled = .*/newrelic.enabled = \${NEWRELIC_ENABLED:-false}/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/;newrelic.browser_monitoring.auto_instrument = .*/newrelic.browser_monitoring.auto_instrument = \${NEWRELIC_BROWSER_MONITORING_ENABLED:-true}/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/newrelic.license = .*/newrelic.license = \"\${NEWRELIC_LICENSE:-}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/;newrelic.loglevel = .*/newrelic.loglevel = \"\${NEWRELIC_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/;newrelic.daemon.loglevel = .*/newrelic.daemon.loglevel = \"\${NEWRELIC_DAEMON_LOG_LEVEL:-warning}\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/newrelic.logfile = .*/newrelic.logfile = \"\/dev\/stdout\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && sed -i -e "s/newrelic.daemon.logfile = .*/newrelic.daemon.logfile = \"\/dev\/stdout\"/" /usr/local/etc/php/conf.d/newrelic.ini \
- && mv /usr/local/etc/php/conf.d/newrelic.ini /usr/local/etc/php/conf.d/newrelic.disable \
- && cd / && rm -rf /tmp/newrelic \
- && mkdir -p /app \
- && fix-permissions /usr/local/etc/ \
- && fix-permissions /app \
- && fix-permissions /etc/ssmtp/ssmtp.conf
-
-EXPOSE 9000
-
-ENV AMAZEEIO_DB_HOST=mariadb \
- AMAZEEIO_DB_PORT=3306 \
- AMAZEEIO_DB_USERNAME=drupal \
- AMAZEEIO_DB_PASSWORD=drupal \
- AMAZEEIO_SITENAME=drupal \
- AMAZEEIO_SITE_NAME=drupal \
- AMAZEEIO_SITE_ENVIRONMENT=development \
- AMAZEEIO_HASH_SALT=0000000000000000000000000 \
- AMAZEEIO_TMP_PATH=/tmp \
- AMAZEEIO_LOCATION=docker
-
-ENV LAGOON_ENVIRONMENT_TYPE=development
-
-WORKDIR /app
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["/usr/local/sbin/php-fpm", "-F", "-R"]
diff --git a/images/php/fpm/check_fcgi b/images/php/fpm/check_fcgi
deleted file mode 100755
index 8af1d53ba6..0000000000
--- a/images/php/fpm/check_fcgi
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-
-# cgi-fcgi has issues with non-standard environment variables, so this script
-# is called with "env -i" to clean the environment
-# This script calls the /ping endpoing of the php-fpm, if the return code is 0, the php-fpm has correctly started
-env -i SCRIPT_NAME=/${1:-ping} SCRIPT_FILENAME=/${1:-ping} REQUEST_METHOD=GET /usr/bin/cgi-fcgi -bind -connect 127.0.0.1:9000
diff --git a/images/php/fpm/entrypoints/50-ssmtp.sh b/images/php/fpm/entrypoints/50-ssmtp.sh
deleted file mode 100755
index c5a51f7ea1..0000000000
--- a/images/php/fpm/entrypoints/50-ssmtp.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/sh
-
-
-if [ ${SSMTP_REWRITEDOMAIN+x} ]; then
- echo -e "\nrewriteDomain=${SSMTP_REWRITEDOMAIN}" >> /etc/ssmtp/ssmtp.conf
-fi
-if [ ${SSMTP_AUTHUSER+x} ]; then
- echo -e "\nAuthUser=${SSMTP_AUTHUSER}" >> /etc/ssmtp/ssmtp.conf
-fi
-if [ ${SSMTP_AUTHPASS+x} ]; then
- echo -e "\nAuthPass=${SSMTP_AUTHPASS}" >> /etc/ssmtp/ssmtp.conf
-fi
-if [ ${SSMTP_USETLS+x} ]; then
- echo -e "\nUseTLS=${SSMTP_USETLS}" >> /etc/ssmtp/ssmtp.conf
-fi
-if [ ${SSMTP_USESTARTTLS+x} ]; then
- echo -e "\nUseSTARTTLS=${SSMTP_USESTARTTLS}" >> /etc/ssmtp/ssmtp.conf
-fi
-
-if [ ${SSMTP_MAILHUB+x} ]; then
- echo -e "\nmailhub=${SSMTP_MAILHUB}" >> /etc/ssmtp/ssmtp.conf
-else
- # check if we find a mailhog on 172.17.0.1:1025
- if nc -z -w 1 172.17.0.1 1025 &> /dev/null; then
- echo -e "\nmailhub=172.17.0.1:1025" >> /etc/ssmtp/ssmtp.conf
- return
- fi
- # check if mxout.lagoon.svc can do smtp TLS
- if nc -z -w 1 mxout.lagoon.svc 465 &> /dev/null; then
- echo -e "UseTLS=Yes\nmailhub=mxout.lagoon.svc:465" >> /etc/ssmtp/ssmtp.conf
- return
- fi
- # Fallback: check if mxout.lagoon.svc can do regular 25 smtp
- if nc -z -w 1 mxout.lagoon.svc 25 &> /dev/null; then
- echo -e "\nmailhub=mxout.lagoon.svc:25" >> /etc/ssmtp/ssmtp.conf
- return
- fi
- # check if mxout.default.svc can do smtp TLS
- if nc -z -w 1 mxout.default.svc 465 &> /dev/null; then
- echo -e "UseTLS=Yes\nmailhub=mxout.default.svc:465" >> /etc/ssmtp/ssmtp.conf
- return
- fi
- # Fallback: check if mxout.default.svc can do regular 25 smtp
- if nc -z -w 1 mxout.default.svc 25 &> /dev/null; then
- echo -e "\nmailhub=mxout.default.svc:25" >> /etc/ssmtp/ssmtp.conf
- return
- fi
-fi
diff --git a/images/php/fpm/entrypoints/51-production-detection.sh b/images/php/fpm/entrypoints/51-production-detection.sh
deleted file mode 100644
index ce5b12eee3..0000000000
--- a/images/php/fpm/entrypoints/51-production-detection.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-if [[ -z "${PHP_ERROR_REPORTING}" ]]; then
- if [[ ${LAGOON_ENVIRONMENT_TYPE} == "production" ]]; then
- export PHP_ERROR_REPORTING="E_ALL & ~E_DEPRECATED & ~E_STRICT & ~E_NOTICE"
- fi
-fi
diff --git a/images/php/fpm/entrypoints/60-php-xdebug.sh b/images/php/fpm/entrypoints/60-php-xdebug.sh
deleted file mode 100755
index 1d05a274f1..0000000000
--- a/images/php/fpm/entrypoints/60-php-xdebug.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-# Tries to find the Dockerhost
-get_dockerhost() {
- # https://docs.docker.com/docker-for-mac/networking/#known-limitations-use-cases-and-workarounds
- if busybox timeout 1 busybox nslookup -query=A host.docker.internal &> /dev/null; then
- echo "host.docker.internal"
- return
- fi
-
- # Fallback to default gateway (should work on Linux) see https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
- echo $(route -n | awk '/UG[ \t]/{print $2}')
- return
-}
-
-# Only if XDEBUG_ENABLE is not empty
-if [ ! -z ${XDEBUG_ENABLE} ]; then
- # remove first line and all comments
- sed -i '1d; s/;//' /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
- # add comment that explains how we have xdebug enabled
- sed -i '1s/^/;xdebug enabled as XDEBUG_ENABLE is not empty, see \/lagoon\/entrypoints\/60-php-xdebug.sh \n/' /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
-
- # Only if DOCKERHOST is not already set, allows to set a DOCKERHOST via environment variables
- if [[ -z ${DOCKERHOST+x} ]]; then
- DOCKERHOST=$(get_dockerhost)
- fi
-
- # Add the found remote_host to xdebug.ini
- echo -e "\n\nxdebug.remote_host=${DOCKERHOST}" >> /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
-
- if [ ${XDEBUG_LOG+x} ]; then
- echo -e "\n\nxdebug.remote_log=/tmp/xdebug.log" >> /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
- fi
-fi
diff --git a/images/php/fpm/entrypoints/70-php-config.sh b/images/php/fpm/entrypoints/70-php-config.sh
deleted file mode 100755
index 257c9ac8c5..0000000000
--- a/images/php/fpm/entrypoints/70-php-config.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-cp /usr/local/etc/php/conf.d/00-lagoon-php.ini.tpl /usr/local/etc/php/conf.d/00-lagoon-php.ini && ep /usr/local/etc/php/conf.d/00-lagoon-php.ini
-ep /usr/local/etc/php-fpm.conf
-ep /usr/local/etc/php-fpm.d/*
\ No newline at end of file
diff --git a/images/php/fpm/entrypoints/71-php-newrelic.sh b/images/php/fpm/entrypoints/71-php-newrelic.sh
deleted file mode 100755
index e1b1bb0d54..0000000000
--- a/images/php/fpm/entrypoints/71-php-newrelic.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-# enable newrelic only if NEWRELIC_ENABLED is set
-if [ ${NEWRELIC_ENABLED+x} ]; then
- # envplate the newrelic ini file
- ep /usr/local/etc/php/conf.d/newrelic.disable
-
- cp /usr/local/etc/php/conf.d/newrelic.disable /usr/local/etc/php/conf.d/newrelic.ini
-
- # check if newrelic is running before trying to do tasks as it can cause them to fail, can delay container start by a few seconds
- # https://discuss.newrelic.com/t/php-agents-tries-to-connect-before-daemon-is-ready/48160/9
- php -r '$count=0;while(!newrelic_set_appname(ini_get("newrelic.appname")) && $count < 10){ $count++; echo "Waiting for NewRelic Agent to be responsive. ($count)" . PHP_EOL; sleep(1); }'
-fi
diff --git a/images/php/fpm/php-fpm.d/www.conf b/images/php/fpm/php-fpm.d/www.conf
deleted file mode 100644
index 3aa6fcd85c..0000000000
--- a/images/php/fpm/php-fpm.d/www.conf
+++ /dev/null
@@ -1,119 +0,0 @@
-; Start a new pool named 'www'.
-; the variable $pool can we used in any directive and will be replaced by the
-; pool name ('www' here)
-[www]
-
-; The address on which to accept FastCGI requests.
-; Valid syntaxes are:
-; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
-; a specific port;
-; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
-; a specific port;
-; 'port' - to listen on a TCP socket to all addresses
-; (IPv6 and IPv4-mapped) on a specific port;
-; '/path/to/unix/socket' - to listen on a unix socket.
-; Note: This value is mandatory.
-listen = [::]:9000
-
-; Choose how the process manager will control the number of child processes.
-; Possible Values:
-; static - a fixed number (pm.max_children) of child processes;
-; dynamic - the number of child processes are set dynamically based on the
-; following directives. With this process management, there will be
-; always at least 1 children.
-; pm.max_children - the maximum number of children that can
-; be alive at the same time.
-; pm.start_servers - the number of children created on startup.
-; pm.min_spare_servers - the minimum number of children in 'idle'
-; state (waiting to process). If the number
-; of 'idle' processes is less than this
-; number then some children will be created.
-; pm.max_spare_servers - the maximum number of children in 'idle'
-; state (waiting to process). If the number
-; of 'idle' processes is greater than this
-; number then some children will be killed.
-; ondemand - no children are created at startup. Children will be forked when
-; new requests will connect. The following parameter are used:
-; pm.max_children - the maximum number of children that
-; can be alive at the same time.
-; pm.process_idle_timeout - The number of seconds after which
-; an idle process will be killed.
-; Note: This value is mandatory.
-pm = dynamic
-
-; The number of child processes to be created when pm is set to 'static' and the
-; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
-; This value sets the limit on the number of simultaneous requests that will be
-; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
-; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
-; CGI. The below defaults are based on a server without much resources. Don't
-; forget to tweak pm.* to fit your needs.
-; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
-; Note: This value is mandatory.
-pm.max_children = ${PHP_FPM_PM_MAX_CHILDREN:-50}
-
-; The number of child processes created on startup.
-; Note: Used only when pm is set to 'dynamic'
-; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
-pm.start_servers = ${PHP_FPM_PM_START_SERVERS:-2}
-
-; The desired minimum number of idle server processes.
-; Note: Used only when pm is set to 'dynamic'
-; Note: Mandatory when pm is set to 'dynamic'
-pm.min_spare_servers = ${PHP_FPM_PM_MIN_SPARE_SERVERS:-2}
-
-; The desired maximum number of idle server processes.
-; Note: Used only when pm is set to 'dynamic'
-; Note: Mandatory when pm is set to 'dynamic'
-pm.max_spare_servers = ${PHP_FPM_PM_MAX_SPARE_SERVERS:-2}
-
-; The number of seconds after which an idle process will be killed.
-; Note: Used only when pm is set to 'ondemand'
-; Default Value: 10s
-pm.process_idle_timeout = ${PHP_FPM_PM_PROCESS_IDLE_TIMEOUT:-60s}
-
-; The number of requests each child process should execute before respawning.
-; This can be useful to work around memory leaks in 3rd party libraries. For
-; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
-; Default Value: 0
-pm.max_requests = ${PHP_FPM_PM_MAX_REQUESTS:-500}
-
-; The ping URI to call the monitoring page of FPM. If this value is not set, no
-; URI will be recognized as a ping page. This could be used to test from outside
-; that FPM is alive and responding, or to
-; - create a graph of FPM availability (rrd or such);
-; - remove a server from a group if it is not responding (load balancing);
-; - trigger alerts for the operating team (24/7).
-; Note: The value must start with a leading slash (/). The value can be
-; anything, but it may not be a good idea to use the .php extension or it
-; may conflict with a real PHP file.
-; Default Value: not set
-ping.path = /ping
-
-; This directive may be used to customize the response of a ping request. The
-; response is formatted as text/plain with a 200 response code.
-; Default Value: pong
-ping.response = pong
-
-; PHP FPM Status Page
-pm.status_path = /status
-
-; The access log file
-; Default: not set
-access.log = ${PHP_FPM_ACCESS_LOG:-/dev/null}
-
-; Redirect worker stdout and stderr into main error log. If not set, stdout and
-; stderr will be redirected to /dev/null according to FastCGI specs.
-; Note: on highloaded environement, this can cause some delay in the page
-; process time (several ms).
-; Default Value: no
-catch_workers_output = yes
-
-; Clear environment in FPM workers
-; Prevents arbitrary environment variables from reaching FPM worker processes
-; by clearing the environment in workers before env vars specified in this
-; pool configuration are added.
-; Setting to "no" will make all environment variables available to PHP code
-; via getenv(), $_ENV and $_SERVER.
-; Default Value: yes
-clear_env = no
diff --git a/images/php/fpm/php.ini b/images/php/fpm/php.ini
deleted file mode 100644
index 55009fc640..0000000000
--- a/images/php/fpm/php.ini
+++ /dev/null
@@ -1,1587 +0,0 @@
-[PHP]
-
-;;;;;;;;;;;;;;;;;;;;
-; Language Options ;
-;;;;;;;;;;;;;;;;;;;;
-
-; Enable the PHP scripting language engine under Apache.
-; http://php.net/engine
-engine = On
-
-; This directive determines whether or not PHP will recognize code between
-; and ?> tags as PHP source which should be processed as such. It is
-; generally recommended that should be used and that this feature
-; should be disabled, as enabling it may result in issues when generating XML
-; documents, however this remains supported for backward compatibility reasons.
-; Note that this directive does not control the = shorthand tag, which can be
-; used regardless of this directive.
-; Default Value: On
-; Development Value: Off
-; Production Value: Off
-; http://php.net/short-open-tag
-short_open_tag = On
-
-; The number of significant digits displayed in floating point numbers.
-; http://php.net/precision
-precision = 14
-
-; Output buffering is a mechanism for controlling how much output data
-; (excluding headers and cookies) PHP should keep internally before pushing that
-; data to the client. If your application's output exceeds this setting, PHP
-; will send that data in chunks of roughly the size you specify.
-; Turning on this setting and managing its maximum buffer size can yield some
-; interesting side-effects depending on your application and web server.
-; You may be able to send headers and cookies after you've already sent output
-; through print or echo. You also may see performance benefits if your server is
-; emitting less packets due to buffered output versus PHP streaming the output
-; as it gets it. On production servers, 4096 bytes is a good setting for performance
-; reasons.
-; Note: Output buffering can also be controlled via Output Buffering Control
-; functions.
-; Possible Values:
-; On = Enabled and buffer is unlimited. (Use with caution)
-; Off = Disabled
-; Integer = Enables the buffer and sets its maximum size in bytes.
-; Note: This directive is hardcoded to Off for the CLI SAPI
-; Default Value: Off
-; Development Value: 4096
-; Production Value: 4096
-; http://php.net/output-buffering
-output_buffering = 4096
-
-; You can redirect all of the output of your scripts to a function. For
-; example, if you set output_handler to "mb_output_handler", character
-; encoding will be transparently converted to the specified encoding.
-; Setting any output handler automatically turns on output buffering.
-; Note: People who wrote portable scripts should not depend on this ini
-; directive. Instead, explicitly set the output handler using ob_start().
-; Using this ini directive may cause problems unless you know what script
-; is doing.
-; Note: You cannot use both "mb_output_handler" with "ob_iconv_handler"
-; and you cannot use both "ob_gzhandler" and "zlib.output_compression".
-; Note: output_handler must be empty if this is set 'On' !!!!
-; Instead you must use zlib.output_handler.
-; http://php.net/output-handler
-;output_handler =
-
-; Transparent output compression using the zlib library
-; Valid values for this option are 'off', 'on', or a specific buffer size
-; to be used for compression (default is 4KB)
-; Note: Resulting chunk size may vary due to nature of compression. PHP
-; outputs chunks that are few hundreds bytes each as a result of
-; compression. If you prefer a larger chunk size for better
-; performance, enable output_buffering in addition.
-; Note: You need to use zlib.output_handler instead of the standard
-; output_handler, or otherwise the output will be corrupted.
-; http://php.net/zlib.output-compression
-zlib.output_compression = Off
-
-; http://php.net/zlib.output-compression-level
-;zlib.output_compression_level = -1
-
-; You cannot specify additional output handlers if zlib.output_compression
-; is activated here. This setting does the same as output_handler but in
-; a different order.
-; http://php.net/zlib.output-handler
-;zlib.output_handler =
-
-; Implicit flush tells PHP to tell the output layer to flush itself
-; automatically after every output block. This is equivalent to calling the
-; PHP function flush() after each and every call to print() or echo() and each
-; and every HTML block. Turning this option on has serious performance
-; implications and is generally recommended for debugging purposes only.
-; http://php.net/implicit-flush
-; Note: This directive is hardcoded to On for the CLI SAPI
-implicit_flush = Off
-
-; The unserialize callback function will be called (with the undefined class'
-; name as parameter), if the unserializer finds an undefined class
-; which should be instantiated. A warning appears if the specified function is
-; not defined, or if the function doesn't include/implement the missing class.
-; So only set this entry, if you really want to implement such a
-; callback-function.
-unserialize_callback_func =
-
-; When floats & doubles are serialized store serialize_precision significant
-; digits after the floating point. The default value ensures that when floats
-; are decoded with unserialize, the data will remain the same.
-serialize_precision = 17
-
-; open_basedir, if set, limits all file operations to the defined directory
-; and below. This directive makes most sense if used in a per-directory
-; or per-virtualhost web server configuration file.
-; http://php.net/open-basedir
-;open_basedir =
-
-; This directive allows you to disable certain functions for security reasons.
-; It receives a comma-delimited list of function names.
-; http://php.net/disable-functions
-disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,
-
-; This directive allows you to disable certain classes for security reasons.
-; It receives a comma-delimited list of class names.
-; http://php.net/disable-classes
-disable_classes =
-
-; Colors for Syntax Highlighting mode. Anything that's acceptable in
-; would work.
-; http://php.net/syntax-highlighting
-;highlight.string = #DD0000
-;highlight.comment = #FF9900
-;highlight.keyword = #007700
-;highlight.default = #0000BB
-;highlight.html = #000000
-
-; If enabled, the request will be allowed to complete even if the user aborts
-; the request. Consider enabling it if executing long requests, which may end up
-; being interrupted by the user or a browser timing out. PHP's default behavior
-; is to disable this feature.
-; http://php.net/ignore-user-abort
-;ignore_user_abort = On
-
-; Determines the size of the realpath cache to be used by PHP. This value should
-; be increased on systems where PHP opens many files to reflect the quantity of
-; the file operations performed.
-; http://php.net/realpath-cache-size
-;realpath_cache_size = 16k
-realpath_cache_size = 256k
-
-; Duration of time, in seconds for which to cache realpath information for a given
-; file or directory. For systems with rarely changing files, consider increasing this
-; value.
-; http://php.net/realpath-cache-ttl
-;realpath_cache_ttl = 120
-realpath_cache_ttl = 3600
-
-; Enables or disables the circular reference collector.
-; http://php.net/zend.enable-gc
-zend.enable_gc = On
-
-; If enabled, scripts may be written in encodings that are incompatible with
-; the scanner. CP936, Big5, CP949 and Shift_JIS are the examples of such
-; encodings. To use this feature, mbstring extension must be enabled.
-; Default: Off
-;zend.multibyte = Off
-
-; Allows to set the default encoding for the scripts. This value will be used
-; unless "declare(encoding=...)" directive appears at the top of the script.
-; Only affects if zend.multibyte is set.
-; Default: ""
-;zend.script_encoding =
-
-;;;;;;;;;;;;;;;;;
-; Miscellaneous ;
-;;;;;;;;;;;;;;;;;
-
-; Decides whether PHP may expose the fact that it is installed on the server
-; (e.g. by adding its signature to the Web server header). It is no security
-; threat in any way, but it makes it possible to determine whether you use PHP
-; on your server or not.
-; http://php.net/expose-php
-expose_php = 0
-
-;;;;;;;;;;;;;;;;;;;
-; Resource Limits ;
-;;;;;;;;;;;;;;;;;;;
-
-; Maximum execution time of each script, in seconds
-; http://php.net/max-execution-time
-; Note: This directive is hardcoded to 0 for the CLI SAPI
-max_execution_time = 900
-
-; Maximum amount of time each script may spend parsing request data. It's a good
-; idea to limit this time on productions servers in order to eliminate unexpectedly
-; long running scripts.
-; Note: This directive is hardcoded to -1 for the CLI SAPI
-; Default Value: -1 (Unlimited)
-; Development Value: 60 (60 seconds)
-; Production Value: 60 (60 seconds)
-; http://php.net/max-input-time
-max_input_time = 900
-
-; Maximum input variable nesting level
-; http://php.net/max-input-nesting-level
-;max_input_nesting_level = 64
-
-; How many GET/POST/COOKIE input variables may be accepted
-max_input_vars = 1000
-
-; Maximum amount of memory a script may consume (128MB)
-; http://php.net/memory-limit
-memory_limit = 400M
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Error handling and logging ;
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-; This directive informs PHP of which errors, warnings and notices you would like
-; it to take action for. The recommended way of setting values for this
-; directive is through the use of the error level constants and bitwise
-; operators. The error level constants are below here for convenience as well as
-; some common settings and their meanings.
-; By default, PHP is set to take action on all errors, notices and warnings EXCEPT
-; those related to E_NOTICE and E_STRICT, which together cover best practices and
-; recommended coding standards in PHP. For performance reasons, this is the
-; recommend error reporting setting. Your production server shouldn't be wasting
-; resources complaining about best practices and coding standards. That's what
-; development servers and development settings are for.
-; Note: The php.ini-development file has this setting as E_ALL. This
-; means it pretty much reports everything which is exactly what you want during
-; development and early testing.
-;
-; Error Level Constants:
-; E_ALL - All errors and warnings (includes E_STRICT as of PHP 5.4.0)
-; E_ERROR - fatal run-time errors
-; E_RECOVERABLE_ERROR - almost fatal run-time errors
-; E_WARNING - run-time warnings (non-fatal errors)
-; E_PARSE - compile-time parse errors
-; E_NOTICE - run-time notices (these are warnings which often result
-; from a bug in your code, but it's possible that it was
-; intentional (e.g., using an uninitialized variable and
-; relying on the fact it is automatically initialized to an
-; empty string)
-; E_STRICT - run-time notices, enable to have PHP suggest changes
-; to your code which will ensure the best interoperability
-; and forward compatibility of your code
-; E_CORE_ERROR - fatal errors that occur during PHP's initial startup
-; E_CORE_WARNING - warnings (non-fatal errors) that occur during PHP's
-; initial startup
-; E_COMPILE_ERROR - fatal compile-time errors
-; E_COMPILE_WARNING - compile-time warnings (non-fatal errors)
-; E_USER_ERROR - user-generated error message
-; E_USER_WARNING - user-generated warning message
-; E_USER_NOTICE - user-generated notice message
-; E_DEPRECATED - warn about code that will not work in future versions
-; of PHP
-; E_USER_DEPRECATED - user-generated deprecation warnings
-;
-; Common Values:
-; E_ALL (Show all errors, warnings and notices including coding standards.)
-; E_ALL & ~E_NOTICE (Show all errors, except for notices)
-; E_ALL & ~E_NOTICE & ~E_STRICT (Show all errors, except for notices and coding standards warnings.)
-; E_COMPILE_ERROR|E_RECOVERABLE_ERROR|E_ERROR|E_CORE_ERROR (Show only errors)
-; Default Value: E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED
-; Development Value: E_ALL & ~E_DEPRECATED & ~E_STRICT
-; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT & ~E_NOTICE
-; http://php.net/error-reporting
-error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
-
-; This directive controls whether or not and where PHP will output errors,
-; notices and warnings too. Error output is very useful during development, but
-; it could be very dangerous in production environments. Depending on the code
-; which is triggering the error, sensitive information could potentially leak
-; out of your application such as database usernames and passwords or worse.
-; For production environments, we recommend logging errors rather than
-; sending them to STDOUT.
-; Possible Values:
-; Off = Do not display any errors
-; stderr = Display errors to STDERR (affects only CGI/CLI binaries!)
-; On or stdout = Display errors to STDOUT
-; Default Value: On
-; Development Value: On
-; Production Value: Off
-; http://php.net/display-errors
-display_errors = Off
-
-; The display of errors which occur during PHP's startup sequence are handled
-; separately from display_errors. PHP's default behavior is to suppress those
-; errors from clients. Turning the display of startup errors on can be useful in
-; debugging configuration problems. We strongly recommend you
-; set this to 'off' for production servers.
-; Default Value: Off
-; Development Value: On
-; Production Value: Off
-; http://php.net/display-startup-errors
-display_startup_errors = Off
-
-; Besides displaying errors, PHP can also log errors to locations such as a
-; server-specific log, STDERR, or a location specified by the error_log
-; directive found below. While errors should not be displayed on productions
-; servers they should still be monitored and logging is a great way to do that.
-; Default Value: Off
-; Development Value: On
-; Production Value: On
-; http://php.net/log-errors
-log_errors = On
-
-; Set maximum length of log_errors. In error_log information about the source is
-; added. The default is 1024 and 0 allows to not apply any maximum length at all.
-; http://php.net/log-errors-max-len
-log_errors_max_len = 1024
-
-; Do not log repeated messages. Repeated errors must occur in same file on same
-; line unless ignore_repeated_source is set true.
-; http://php.net/ignore-repeated-errors
-ignore_repeated_errors = Off
-
-; Ignore source of message when ignoring repeated messages. When this setting
-; is On you will not log errors with repeated messages from different files or
-; source lines.
-; http://php.net/ignore-repeated-source
-ignore_repeated_source = Off
-
-; If this parameter is set to Off, then memory leaks will not be shown (on
-; stdout or in the log). This has only effect in a debug compile, and if
-; error reporting includes E_WARNING in the allowed list
-; http://php.net/report-memleaks
-report_memleaks = On
-
-; This setting is on by default.
-;report_zend_debug = 0
-
-; Store the last error/warning message in $php_errormsg (boolean). Setting this value
-; to On can assist in debugging and is appropriate for development servers. It should
-; however be disabled on production servers.
-; Default Value: Off
-; Development Value: On
-; Production Value: Off
-; http://php.net/track-errors
-track_errors = Off
-
-; Turn off normal error reporting and emit XML-RPC error XML
-; http://php.net/xmlrpc-errors
-;xmlrpc_errors = 0
-
-; An XML-RPC faultCode
-;xmlrpc_error_number = 0
-
-; When PHP displays or logs an error, it has the capability of formatting the
-; error message as HTML for easier reading. This directive controls whether
-; the error message is formatted as HTML or not.
-; Note: This directive is hardcoded to Off for the CLI SAPI
-; Default Value: On
-; Development Value: On
-; Production value: On
-; http://php.net/html-errors
-html_errors = On
-
-; If html_errors is set to On *and* docref_root is not empty, then PHP
-; produces clickable error messages that direct to a page describing the error
-; or function causing the error in detail.
-; You can download a copy of the PHP manual from http://php.net/docs
-; and change docref_root to the base URL of your local copy including the
-; leading '/'. You must also specify the file extension being used including
-; the dot. PHP's default behavior is to leave these settings empty, in which
-; case no links to documentation are generated.
-; Note: Never use this feature for production boxes.
-; http://php.net/docref-root
-; Examples
-;docref_root = "/phpmanual/"
-
-; http://php.net/docref-ext
-;docref_ext = .html
-
-; String to output before an error message. PHP's default behavior is to leave
-; this setting blank.
-; http://php.net/error-prepend-string
-; Example:
-;error_prepend_string = ""
-
-; String to output after an error message. PHP's default behavior is to leave
-; this setting blank.
-; http://php.net/error-append-string
-; Example:
-;error_append_string = ""
-
-; Log errors to specified file. PHP's default behavior is to leave this value
-; empty.
-; http://php.net/error-log
-; Example:
-;error_log = php_errors.log
-; Log errors to syslog.
-;error_log = syslog
-
-;windows.show_crt_warning
-; Default value: 0
-; Development value: 0
-; Production value: 0
-
-;;;;;;;;;;;;;;;;;
-; Data Handling ;
-;;;;;;;;;;;;;;;;;
-
-; The separator used in PHP generated URLs to separate arguments.
-; PHP's default setting is "&".
-; http://php.net/arg-separator.output
-; Example:
-;arg_separator.output = "&"
-
-; List of separator(s) used by PHP to parse input URLs into variables.
-; PHP's default setting is "&".
-; NOTE: Every character in this directive is considered as separator!
-; http://php.net/arg-separator.input
-; Example:
-;arg_separator.input = ";&"
-
-; This directive determines which super global arrays are registered when PHP
-; starts up. G,P,C,E & S are abbreviations for the following respective super
-; globals: GET, POST, COOKIE, ENV and SERVER. There is a performance penalty
-; paid for the registration of these arrays and because ENV is not as commonly
-; used as the others, ENV is not recommended on productions servers. You
-; can still get access to the environment variables through getenv() should you
-; need to.
-; Default Value: "EGPCS"
-; Development Value: "GPCS"
-; Production Value: "GPCS";
-; http://php.net/variables-order
-variables_order = "GPCS"
-
-; This directive determines which super global data (G,P & C) should be
-; registered into the super global array REQUEST. If so, it also determines
-; the order in which that data is registered. The values for this directive
-; are specified in the same manner as the variables_order directive,
-; EXCEPT one. Leaving this value empty will cause PHP to use the value set
-; in the variables_order directive. It does not mean it will leave the super
-; globals array REQUEST empty.
-; Default Value: None
-; Development Value: "GP"
-; Production Value: "GP"
-; http://php.net/request-order
-request_order = "GP"
-
-; This directive determines whether PHP registers $argv & $argc each time it
-; runs. $argv contains an array of all the arguments passed to PHP when a script
-; is invoked. $argc contains an integer representing the number of arguments
-; that were passed when the script was invoked. These arrays are extremely
-; useful when running scripts from the command line. When this directive is
-; enabled, registering these variables consumes CPU cycles and memory each time
-; a script is executed. For performance reasons, this feature should be disabled
-; on production servers.
-; Note: This directive is hardcoded to On for the CLI SAPI
-; Default Value: On
-; Development Value: Off
-; Production Value: Off
-; http://php.net/register-argc-argv
-register_argc_argv = Off
-
-; When enabled, the ENV, REQUEST and SERVER variables are created when they're
-; first used (Just In Time) instead of when the script starts. If these
-; variables are not used within a script, having this directive on will result
-; in a performance gain. The PHP directive register_argc_argv must be disabled
-; for this directive to have any affect.
-; http://php.net/auto-globals-jit
-auto_globals_jit = On
-
-; Whether PHP will read the POST data.
-; This option is enabled by default.
-; Most likely, you won't want to disable this option globally. It causes $_POST
-; and $_FILES to always be empty; the only way you will be able to read the
-; POST data will be through the php://input stream wrapper. This can be useful
-; to proxy requests or to process the POST data in a memory efficient fashion.
-; http://php.net/enable-post-data-reading
-;enable_post_data_reading = Off
-
-; Maximum size of POST data that PHP will accept.
-; Its value may be 0 to disable the limit. It is ignored if POST data reading
-; is disabled through enable_post_data_reading.
-; http://php.net/post-max-size
-post_max_size = 2048M
-
-; Automatically add files before PHP document.
-; http://php.net/auto-prepend-file
-auto_prepend_file = none
-
-; Automatically add files after PHP document.
-; http://php.net/auto-append-file
-auto_append_file = none
-
-; By default, PHP will output a character encoding using
-; the Content-type: header. To disable sending of the charset, simply
-; set it to be empty.
-;
-; PHP's built-in default is text/html
-; http://php.net/default-mimetype
-default_mimetype = "text/html"
-
-; PHP's default character set is set to UTF-8.
-; http://php.net/default-charset
-default_charset = "UTF-8"
-
-; PHP internal character encoding is set to empty.
-; If empty, default_charset is used.
-; http://php.net/internal-encoding
-;internal_encoding =
-
-; PHP input character encoding is set to empty.
-; If empty, default_charset is used.
-; http://php.net/input-encoding
-;input_encoding =
-
-; PHP output character encoding is set to empty.
-; If empty, default_charset is used.
-; mbstring or iconv output handler is used.
-; See also output_buffer.
-; http://php.net/output-encoding
-;output_encoding =
-
-;;;;;;;;;;;;;;;;;;;;;;;;;
-; Paths and Directories ;
-;;;;;;;;;;;;;;;;;;;;;;;;;
-
-; UNIX: "/path1:/path2"
-;include_path = ".:/php/includes"
-;
-; Windows: "\path1;\path2"
-;include_path = ".;c:\php\includes"
-;
-; PHP's default setting for include_path is ".;/path/to/php/pear"
-; http://php.net/include-path
-
-; The root of the PHP pages, used only if nonempty.
-; if PHP was not compiled with FORCE_REDIRECT, you SHOULD set doc_root
-; if you are running php as a CGI under any web server (other than IIS)
-; see documentation for security issues. The alternate is to use the
-; cgi.force_redirect configuration below
-; http://php.net/doc-root
-doc_root =
-
-; The directory under which PHP opens the script using /~username used only
-; if nonempty.
-; http://php.net/user-dir
-user_dir =
-
-; Directory in which the loadable extensions (modules) reside.
-; http://php.net/extension-dir
-; extension_dir = "./"
-; On windows:
-; extension_dir = "ext"
-
-; Directory where the temporary files should be placed.
-; Defaults to the system default (see sys_get_temp_dir)
-; sys_temp_dir = "/tmp"
-
-; Whether or not to enable the dl() function. The dl() function does NOT work
-; properly in multithreaded servers, such as IIS or Zeus, and is automatically
-; disabled on them.
-; http://php.net/enable-dl
-enable_dl = Off
-
-; cgi.force_redirect is necessary to provide security running PHP as a CGI under
-; most web servers. Left undefined, PHP turns this on by default. You can
-; turn it off here AT YOUR OWN RISK
-; **You CAN safely turn this off for IIS, in fact, you MUST.**
-; http://php.net/cgi.force-redirect
-;cgi.force_redirect = 1
-
-; if cgi.nph is enabled it will force cgi to always sent Status: 200 with
-; every request. PHP's default behavior is to disable this feature.
-;cgi.nph = 1
-
-; if cgi.force_redirect is turned on, and you are not running under Apache or Netscape
-; (iPlanet) web servers, you MAY need to set an environment variable name that PHP
-; will look for to know it is OK to continue execution. Setting this variable MAY
-; cause security issues, KNOW WHAT YOU ARE DOING FIRST.
-; http://php.net/cgi.redirect-status-env
-;cgi.redirect_status_env =
-
-; cgi.fix_pathinfo provides *real* PATH_INFO/PATH_TRANSLATED support for CGI. PHP's
-; previous behaviour was to set PATH_TRANSLATED to SCRIPT_FILENAME, and to not grok
-; what PATH_INFO is. For more information on PATH_INFO, see the cgi specs. Setting
-; this to 1 will cause PHP CGI to fix its paths to conform to the spec. A setting
-; of zero causes PHP to behave as before. Default is 1. You should fix your scripts
-; to use SCRIPT_FILENAME rather than PATH_TRANSLATED.
-; http://php.net/cgi.fix-pathinfo
-cgi.fix_pathinfo=0
-
-; FastCGI under IIS (on WINNT based OS) supports the ability to impersonate
-; security tokens of the calling client. This allows IIS to define the
-; security context that the request runs under. mod_fastcgi under Apache
-; does not currently support this feature (03/17/2002)
-; Set to 1 if running under IIS. Default is zero.
-; http://php.net/fastcgi.impersonate
-;fastcgi.impersonate = 1
-
-; Disable logging through FastCGI connection. PHP's default behavior is to enable
-; this feature.
-;fastcgi.logging = 0
-
-; cgi.rfc2616_headers configuration option tells PHP what type of headers to
-; use when sending HTTP response code. If set to 0, PHP sends Status: header that
-; is supported by Apache. When this option is set to 1, PHP will send
-; RFC2616 compliant header.
-; Default is zero.
-; http://php.net/cgi.rfc2616-headers
-;cgi.rfc2616_headers = 0
-
-;;;;;;;;;;;;;;;;
-; File Uploads ;
-;;;;;;;;;;;;;;;;
-
-; Whether to allow HTTP file uploads.
-; http://php.net/file-uploads
-file_uploads = On
-
-; Temporary directory for HTTP uploaded files (will use system default if not
-; specified).
-; http://php.net/upload-tmp-dir
-;upload_tmp_dir =
-
-; Maximum allowed size for uploaded files.
-; http://php.net/upload-max-filesize
-upload_max_filesize = 2048M
-
-; Maximum number of files that can be uploaded via a single request
-max_file_uploads = 20
-
-;;;;;;;;;;;;;;;;;;
-; Fopen wrappers ;
-;;;;;;;;;;;;;;;;;;
-
-; Whether to allow the treatment of URLs (like http:// or ftp://) as files.
-; http://php.net/allow-url-fopen
-allow_url_fopen = On
-
-; Whether to allow include/require to open URLs (like http:// or ftp://) as files.
-; http://php.net/allow-url-include
-allow_url_include = Off
-
-; Define the anonymous ftp password (your email address). PHP's default setting
-; for this is empty.
-; http://php.net/from
-;from="john@doe.com"
-
-; Define the User-Agent string. PHP's default setting for this is empty.
-; http://php.net/user-agent
-;user_agent="PHP"
-
-; Default timeout for socket based streams (seconds)
-; http://php.net/default-socket-timeout
-default_socket_timeout = 60
-
-; If your scripts have to deal with files from Macintosh systems,
-; or you are running on a Mac and need to deal with files from
-; unix or win32 systems, setting this flag will cause PHP to
-; automatically detect the EOL character in those files so that
-; fgets() and file() will work regardless of the source of the file.
-; http://php.net/auto-detect-line-endings
-;auto_detect_line_endings = Off
-
-;;;;;;;;;;;;;;;;;;;;;;
-; Dynamic Extensions ;
-;;;;;;;;;;;;;;;;;;;;;;
-
-; If you wish to have an extension loaded automatically, use the following
-; syntax:
-;
-; extension=modulename.extension
-;
-; For example, on Windows:
-;
-; extension=msql.dll
-;
-; ... or under UNIX:
-;
-; extension=msql.so
-;
-; ... or with a path:
-;
-; extension=/path/to/extension/msql.so
-;
-; If you only provide the name of the extension, PHP will look for it in its
-; default extension directory.
-
-;;;;
-; Note: packaged extension modules are now loaded via the .ini files
-; found in the directory /etc/php.d; these are loaded by default.
-;;;;
-
-;;;;;;;;;;;;;;;;;;;
-; Module Settings ;
-;;;;;;;;;;;;;;;;;;;
-
-[CLI Server]
-; Whether the CLI web server uses ANSI color coding in its terminal output.
-cli_server.color = On
-
-[Date]
-; Defines the default timezone used by the date functions
-; http://php.net/date.timezone
-date.timezone = UTC
-
-; http://php.net/date.default-latitude
-;date.default_latitude = 31.7667
-
-; http://php.net/date.default-longitude
-;date.default_longitude = 35.2333
-
-; http://php.net/date.sunrise-zenith
-;date.sunrise_zenith = 90.583333
-
-; http://php.net/date.sunset-zenith
-;date.sunset_zenith = 90.583333
-
-[filter]
-; http://php.net/filter.default
-;filter.default = unsafe_raw
-
-; http://php.net/filter.default-flags
-;filter.default_flags =
-
-[iconv]
-; Use of this INI entry is deprecated, use global input_encoding instead.
-; If empty, default_charset or input_encoding or iconv.input_encoding is used.
-; The precedence is: default_charset < intput_encoding < iconv.input_encoding
-;iconv.input_encoding =
-
-; Use of this INI entry is deprecated, use global internal_encoding instead.
-; If empty, default_charset or internal_encoding or iconv.internal_encoding is used.
-; The precedence is: default_charset < internal_encoding < iconv.internal_encoding
-;iconv.internal_encoding =
-
-; Use of this INI entry is deprecated, use global output_encoding instead.
-; If empty, default_charset or output_encoding or iconv.output_encoding is used.
-; The precedence is: default_charset < output_encoding < iconv.output_encoding
-; To use an output encoding conversion, iconv's output handler must be set
-; otherwise output encoding conversion cannot be performed.
-;iconv.output_encoding =
-
-[intl]
-;intl.default_locale =
-; This directive allows you to produce PHP errors when some error
-; happens within intl functions. The value is the level of the error produced.
-; Default is 0, which does not produce any errors.
-;intl.error_level = E_WARNING
-
-[sqlite]
-; http://php.net/sqlite.assoc-case
-;sqlite.assoc_case = 0
-
-[sqlite3]
-;sqlite3.extension_dir =
-
-[Pcre]
-;PCRE library backtracking limit.
-; http://php.net/pcre.backtrack-limit
-;pcre.backtrack_limit=100000
-
-;PCRE library recursion limit.
-;Please note that if you set this value to a high number you may consume all
-;the available process stack and eventually crash PHP (due to reaching the
-;stack size limit imposed by the Operating System).
-; http://php.net/pcre.recursion-limit
-;pcre.recursion_limit=100000
-
-;Enables or disables JIT compilation of patterns. This requires the PCRE
-;library to be compiled with JIT support.
-pcre.jit=0
-
-[Pdo]
-; Whether to pool ODBC connections. Can be one of "strict", "relaxed" or "off"
-; http://php.net/pdo-odbc.connection-pooling
-;pdo_odbc.connection_pooling=strict
-
-;pdo_odbc.db2_instance_name
-
-[Pdo_mysql]
-; If mysqlnd is used: Number of cache slots for the internal result set cache
-; http://php.net/pdo_mysql.cache_size
-pdo_mysql.cache_size = 2000
-
-; Default socket name for local MySQL connects. If empty, uses the built-in
-; MySQL defaults.
-; http://php.net/pdo_mysql.default-socket
-pdo_mysql.default_socket=
-
-[Phar]
-; http://php.net/phar.readonly
-;phar.readonly = On
-
-; http://php.net/phar.require-hash
-;phar.require_hash = On
-
-;phar.cache_list =
-
-[mail function]
-; For Unix only. You may supply arguments as well (default: "sendmail -t -i").
-; http://php.net/sendmail-path
-sendmail_path = /usr/sbin/sendmail -t -i
-
-; Force the addition of the specified parameters to be passed as extra parameters
-; to the sendmail binary. These parameters will always replace the value of
-; the 5th parameter to mail().
-;mail.force_extra_parameters =
-
-; Add X-PHP-Originating-Script: that will include uid of the script followed by the filename
-mail.add_x_header = On
-
-; The path to a log file that will log all mail() calls. Log entries include
-; the full path of the script, line number, To address and headers.
-;mail.log =
-; Log mail to syslog;
-;mail.log = syslog
-
-[SQL]
-; http://php.net/sql.safe-mode
-sql.safe_mode = Off
-
-[ODBC]
-; http://php.net/odbc.default-db
-;odbc.default_db = Not yet implemented
-
-; http://php.net/odbc.default-user
-;odbc.default_user = Not yet implemented
-
-; http://php.net/odbc.default-pw
-;odbc.default_pw = Not yet implemented
-
-; Controls the ODBC cursor model.
-; Default: SQL_CURSOR_STATIC (default).
-;odbc.default_cursortype
-
-; Allow or prevent persistent links.
-; http://php.net/odbc.allow-persistent
-odbc.allow_persistent = On
-
-; Check that a connection is still valid before reuse.
-; http://php.net/odbc.check-persistent
-odbc.check_persistent = On
-
-; Maximum number of persistent links. -1 means no limit.
-; http://php.net/odbc.max-persistent
-odbc.max_persistent = -1
-
-; Maximum number of links (persistent + non-persistent). -1 means no limit.
-; http://php.net/odbc.max-links
-odbc.max_links = -1
-
-; Handling of LONG fields. Returns number of bytes to variables. 0 means
-; passthru.
-; http://php.net/odbc.defaultlrl
-odbc.defaultlrl = 4096
-
-; Handling of binary data. 0 means passthru, 1 return as is, 2 convert to char.
-; See the documentation on odbc_binmode and odbc_longreadlen for an explanation
-; of odbc.defaultlrl and odbc.defaultbinmode
-; http://php.net/odbc.defaultbinmode
-odbc.defaultbinmode = 1
-
-;birdstep.max_links = -1
-
-[Interbase]
-; Allow or prevent persistent links.
-ibase.allow_persistent = 1
-
-; Maximum number of persistent links. -1 means no limit.
-ibase.max_persistent = -1
-
-; Maximum number of links (persistent + non-persistent). -1 means no limit.
-ibase.max_links = -1
-
-; Default database name for ibase_connect().
-;ibase.default_db =
-
-; Default username for ibase_connect().
-;ibase.default_user =
-
-; Default password for ibase_connect().
-;ibase.default_password =
-
-; Default charset for ibase_connect().
-;ibase.default_charset =
-
-; Default timestamp format.
-ibase.timestampformat = "%Y-%m-%d %H:%M:%S"
-
-; Default date format.
-ibase.dateformat = "%Y-%m-%d"
-
-; Default time format.
-ibase.timeformat = "%H:%M:%S"
-
-[MySQLi]
-
-; Maximum number of persistent links. -1 means no limit.
-; http://php.net/mysqli.max-persistent
-mysqli.max_persistent = -1
-
-; Allow accessing, from PHP's perspective, local files with LOAD DATA statements
-; http://php.net/mysqli.allow_local_infile
-;mysqli.allow_local_infile = On
-
-; Allow or prevent persistent links.
-; http://php.net/mysqli.allow-persistent
-mysqli.allow_persistent = On
-
-; Maximum number of links. -1 means no limit.
-; http://php.net/mysqli.max-links
-mysqli.max_links = -1
-
-; If mysqlnd is used: Number of cache slots for the internal result set cache
-; http://php.net/mysqli.cache_size
-mysqli.cache_size = 2000
-
-; Default port number for mysqli_connect(). If unset, mysqli_connect() will use
-; the $MYSQL_TCP_PORT or the mysql-tcp entry in /etc/services or the
-; compile-time value defined MYSQL_PORT (in that order). Win32 will only look
-; at MYSQL_PORT.
-; http://php.net/mysqli.default-port
-mysqli.default_port = 3306
-
-; Default socket name for local MySQL connects. If empty, uses the built-in
-; MySQL defaults.
-; http://php.net/mysqli.default-socket
-mysqli.default_socket =
-
-; Default host for mysql_connect() (doesn't apply in safe mode).
-; http://php.net/mysqli.default-host
-mysqli.default_host =
-
-; Default user for mysql_connect() (doesn't apply in safe mode).
-; http://php.net/mysqli.default-user
-mysqli.default_user =
-
-; Default password for mysqli_connect() (doesn't apply in safe mode).
-; Note that this is generally a *bad* idea to store passwords in this file.
-; *Any* user with PHP access can run 'echo get_cfg_var("mysqli.default_pw")
-; and reveal this password! And of course, any users with read access to this
-; file will be able to reveal the password as well.
-; http://php.net/mysqli.default-pw
-mysqli.default_pw =
-
-; Allow or prevent reconnect
-mysqli.reconnect = Off
-
-[mysqlnd]
-; Enable / Disable collection of general statistics by mysqlnd which can be
-; used to tune and monitor MySQL operations.
-; http://php.net/mysqlnd.collect_statistics
-mysqlnd.collect_statistics = On
-
-; Enable / Disable collection of memory usage statistics by mysqlnd which can be
-; used to tune and monitor MySQL operations.
-; http://php.net/mysqlnd.collect_memory_statistics
-mysqlnd.collect_memory_statistics = Off
-
-; Size of a pre-allocated buffer used when sending commands to MySQL in bytes.
-; http://php.net/mysqlnd.net_cmd_buffer_size
-;mysqlnd.net_cmd_buffer_size = 2048
-
-; Size of a pre-allocated buffer used for reading data sent by the server in
-; bytes.
-; http://php.net/mysqlnd.net_read_buffer_size
-;mysqlnd.net_read_buffer_size = 32768
-
-[PostgreSQL]
-; Allow or prevent persistent links.
-; http://php.net/pgsql.allow-persistent
-pgsql.allow_persistent = On
-
-; Detect broken persistent links always with pg_pconnect().
-; Auto reset feature requires a little overheads.
-; http://php.net/pgsql.auto-reset-persistent
-pgsql.auto_reset_persistent = Off
-
-; Maximum number of persistent links. -1 means no limit.
-; http://php.net/pgsql.max-persistent
-pgsql.max_persistent = -1
-
-; Maximum number of links (persistent+non persistent). -1 means no limit.
-; http://php.net/pgsql.max-links
-pgsql.max_links = -1
-
-; Ignore PostgreSQL backends Notice message or not.
-; Notice message logging require a little overheads.
-; http://php.net/pgsql.ignore-notice
-pgsql.ignore_notice = 0
-
-; Log PostgreSQL backends Notice message or not.
-; Unless pgsql.ignore_notice=0, module cannot log notice message.
-; http://php.net/pgsql.log-notice
-pgsql.log_notice = 0
-
-[bcmath]
-; Number of decimal digits for all bcmath functions.
-; http://php.net/bcmath.scale
-bcmath.scale = 0
-
-[browscap]
-; http://php.net/browscap
-;browscap = extra/browscap.ini
-
-[Session]
-; Handler used to store/retrieve data.
-; http://php.net/session.save-handler
-session.save_handler = files
-
-; Argument passed to save_handler. In the case of files, this is the path
-; where data files are stored. Note: Windows users have to change this
-; variable in order to use PHP's session functions.
-;
-; The path can be defined as:
-;
-; session.save_path = "N;/path"
-;
-; where N is an integer. Instead of storing all the session files in
-; /path, what this will do is use subdirectories N-levels deep, and
-; store the session data in those directories. This is useful if
-; your OS has problems with many files in one directory, and is
-; a more efficient layout for servers that handle many sessions.
-;
-; NOTE 1: PHP will not create this directory structure automatically.
-; You can use the script in the ext/session dir for that purpose.
-; NOTE 2: See the section on garbage collection below if you choose to
-; use subdirectories for session storage
-;
-; The file storage module creates files using mode 600 by default.
-; You can change that by using
-;
-; session.save_path = "N;MODE;/path"
-;
-; where MODE is the octal representation of the mode. Note that this
-; does not overwrite the process's umask.
-; http://php.net/session.save-path
-
-; RPM note : session directory must be owned by process owner
-; for mod_php, see /etc/httpd/conf.d/php.conf
-; for php-fpm, see /etc/php-fpm.d/*conf
-;session.save_path = "/tmp"
-
-; Whether to use strict session mode.
-; Strict session mode does not accept uninitialized session ID and regenerate
-; session ID if browser sends uninitialized session ID. Strict mode protects
-; applications from session fixation via session adoption vulnerability. It is
-; disabled by default for maximum compatibility, but enabling it is encouraged.
-; https://wiki.php.net/rfc/strict_sessions
-session.use_strict_mode = 0
-
-; Whether to use cookies.
-; http://php.net/session.use-cookies
-session.use_cookies = 1
-
-; http://php.net/session.cookie-secure
-;session.cookie_secure =
-
-; This option forces PHP to fetch and use a cookie for storing and maintaining
-; the session id. We encourage this operation as it's very helpful in combating
-; session hijacking when not specifying and managing your own session id. It is
-; not the be-all and end-all of session hijacking defense, but it's a good start.
-; http://php.net/session.use-only-cookies
-session.use_only_cookies = 1
-
-; Name of the session (used as cookie name).
-; http://php.net/session.name
-session.name = PHPSESSID
-
-; Initialize session on request startup.
-; http://php.net/session.auto-start
-session.auto_start = 0
-
-; Lifetime in seconds of cookie or, if 0, until browser is restarted.
-; http://php.net/session.cookie-lifetime
-session.cookie_lifetime = 2000000
-
-; The path for which the cookie is valid.
-; http://php.net/session.cookie-path
-session.cookie_path = /
-
-; The domain for which the cookie is valid.
-; http://php.net/session.cookie-domain
-session.cookie_domain =
-
-; Whether or not to add the httpOnly flag to the cookie, which makes it inaccessible to browser scripting languages such as JavaScript.
-; http://php.net/session.cookie-httponly
-session.cookie_httponly =
-
-; Handler used to serialize data. php is the standard serializer of PHP.
-; http://php.net/session.serialize-handler
-session.serialize_handler = php
-
-; Defines the probability that the 'garbage collection' process is started
-; on every session initialization. The probability is calculated by using
-; gc_probability/gc_divisor. Where session.gc_probability is the numerator
-; and gc_divisor is the denominator in the equation. Setting this value to 1
-; when the session.gc_divisor value is 100 will give you approximately a 1% chance
-; the gc will run on any give request.
-; Default Value: 1
-; Development Value: 1
-; Production Value: 1
-; http://php.net/session.gc-probability
-session.gc_probability = 1
-
-; Defines the probability that the 'garbage collection' process is started on every
-; session initialization. The probability is calculated by using the following equation:
-; gc_probability/gc_divisor. Where session.gc_probability is the numerator and
-; session.gc_divisor is the denominator in the equation. Setting this value to 1
-; when the session.gc_divisor value is 100 will give you approximately a 1% chance
-; the gc will run on any give request. Increasing this value to 1000 will give you
-; a 0.1% chance the gc will run on any give request. For high volume production servers,
-; this is a more efficient approach.
-; Default Value: 100
-; Development Value: 1000
-; Production Value: 1000
-; http://php.net/session.gc-divisor
-session.gc_divisor = 1000
-
-; After this number of seconds, stored data will be seen as 'garbage' and
-; cleaned up by the garbage collection process.
-; http://php.net/session.gc-maxlifetime
-session.gc_maxlifetime = 200000
-
-; NOTE: If you are using the subdirectory option for storing session files
-; (see session.save_path above), then garbage collection does *not*
-; happen automatically. You will need to do your own garbage
-; collection through a shell script, cron entry, or some other method.
-; For example, the following script would is the equivalent of
-; setting session.gc_maxlifetime to 1440 (1440 seconds = 24 minutes):
-; find /path/to/sessions -cmin +24 -type f | xargs rm
-
-; Check HTTP Referer to invalidate externally stored URLs containing ids.
-; HTTP_REFERER has to contain this substring for the session to be
-; considered as valid.
-; http://php.net/session.referer-check
-session.referer_check =
-
-; How many bytes to read from the file.
-; http://php.net/session.entropy-length
-;session.entropy_length = 32
-
-; Specified here to create the session id.
-; http://php.net/session.entropy-file
-; Defaults to /dev/urandom
-; On systems that don't have /dev/urandom but do have /dev/arandom, this will default to /dev/arandom
-; If neither are found at compile time, the default is no entropy file.
-; On windows, setting the entropy_length setting will activate the
-; Windows random source (using the CryptoAPI)
-;session.entropy_file = /dev/urandom
-
-; Set to {nocache,private,public,} to determine HTTP caching aspects
-; or leave this empty to avoid sending anti-caching headers.
-; http://php.net/session.cache-limiter
-session.cache_limiter = nocache
-
-; Document expires after n minutes.
-; http://php.net/session.cache-expire
-session.cache_expire = 180
-
-; trans sid support is disabled by default.
-; Use of trans sid may risk your users' security.
-; Use this option with caution.
-; - User may send URL contains active session ID
-; to other person via. email/irc/etc.
-; - URL that contains active session ID may be stored
-; in publicly accessible computer.
-; - User may access your site with the same session ID
-; always using URL stored in browser's history or bookmarks.
-; http://php.net/session.use-trans-sid
-session.use_trans_sid = 0
-
-; Select a hash function for use in generating session ids.
-; Possible Values
-; 0 (MD5 128 bits)
-; 1 (SHA-1 160 bits)
-; This option may also be set to the name of any hash function supported by
-; the hash extension. A list of available hashes is returned by the hash_algos()
-; function.
-; http://php.net/session.hash-function
-session.hash_function = 0
-
-; Define how many bits are stored in each character when converting
-; the binary hash data to something readable.
-; Possible values:
-; 4 (4 bits: 0-9, a-f)
-; 5 (5 bits: 0-9, a-v)
-; 6 (6 bits: 0-9, a-z, A-Z, "-", ",")
-; Default Value: 4
-; Development Value: 5
-; Production Value: 5
-; http://php.net/session.hash-bits-per-character
-session.hash_bits_per_character = 5
-
-; The URL rewriter will look for URLs in a defined set of HTML tags.
-; form/fieldset are special; if you include them here, the rewriter will
-; add a hidden field with the info which is otherwise appended
-; to URLs. If you want XHTML conformity, remove the form entry.
-; Note that all valid entries require a "=", even if no value follows.
-; Default Value: "a=href,area=href,frame=src,form=,fieldset="
-; Development Value: "a=href,area=href,frame=src,input=src,form=fakeentry"
-; Production Value: "a=href,area=href,frame=src,input=src,form=fakeentry"
-; http://php.net/url-rewriter.tags
-url_rewriter.tags = "a=href,area=href,frame=src,input=src,form=fakeentry"
-
-; Enable upload progress tracking in $_SESSION
-; Default Value: On
-; Development Value: On
-; Production Value: On
-; http://php.net/session.upload-progress.enabled
-;session.upload_progress.enabled = On
-
-; Cleanup the progress information as soon as all POST data has been read
-; (i.e. upload completed).
-; Default Value: On
-; Development Value: On
-; Production Value: On
-; http://php.net/session.upload-progress.cleanup
-;session.upload_progress.cleanup = On
-
-; A prefix used for the upload progress key in $_SESSION
-; Default Value: "upload_progress_"
-; Development Value: "upload_progress_"
-; Production Value: "upload_progress_"
-; http://php.net/session.upload-progress.prefix
-;session.upload_progress.prefix = "upload_progress_"
-
-; The index name (concatenated with the prefix) in $_SESSION
-; containing the upload progress information
-; Default Value: "PHP_SESSION_UPLOAD_PROGRESS"
-; Development Value: "PHP_SESSION_UPLOAD_PROGRESS"
-; Production Value: "PHP_SESSION_UPLOAD_PROGRESS"
-; http://php.net/session.upload-progress.name
-;session.upload_progress.name = "PHP_SESSION_UPLOAD_PROGRESS"
-
-; How frequently the upload progress should be updated.
-; Given either in percentages (per-file), or in bytes
-; Default Value: "1%"
-; Development Value: "1%"
-; Production Value: "1%"
-; http://php.net/session.upload-progress.freq
-;session.upload_progress.freq = "1%"
-
-; The minimum delay between updates, in seconds
-; Default Value: 1
-; Development Value: 1
-; Production Value: 1
-; http://php.net/session.upload-progress.min-freq
-;session.upload_progress.min_freq = "1"
-
-[Assertion]
-; Switch whether to compile assertions at all (to have no overhead at run-time)
-; -1: Do not compile at all
-; 0: Jump over assertion at run-time
-; 1: Execute assertions
-; Changing from or to a negative value is only possible in php.ini! (For turning assertions on and off at run-time, see assert.active, when zend.assertions = 1)
-; Default Value: 1
-; Development Value: 1
-; Production Value: -1
-; http://php.net/zend.assertions
-zend.assertions = -1
-
-; Assert(expr); active by default.
-; http://php.net/assert.active
-;assert.active = On
-
-; Throw an AssertationException on failed assertions
-; http://php.net/assert.exception
-;assert.exception = On
-
-; Issue a PHP warning for each failed assertion. (Overridden by assert.exception if active)
-; http://php.net/assert.warning
-;assert.warning = On
-
-; Don't bail out by default.
-; http://php.net/assert.bail
-;assert.bail = Off
-
-; User-function to be called if an assertion fails.
-; http://php.net/assert.callback
-;assert.callback = 0
-
-; Eval the expression with current error_reporting(). Set to true if you want
-; error_reporting(0) around the eval().
-; http://php.net/assert.quiet-eval
-;assert.quiet_eval = 0
-
-[mbstring]
-; language for internal character representation.
-; This affects mb_send_mail() and mbstring.detect_order.
-; http://php.net/mbstring.language
-;mbstring.language = Japanese
-
-; Use of this INI entry is deprecated, use global internal_encoding instead.
-; internal/script encoding.
-; Some encoding cannot work as internal encoding. (e.g. SJIS, BIG5, ISO-2022-*)
-; If empty, default_charset or internal_encoding or iconv.internal_encoding is used.
-; The precedence is: default_charset < internal_encoding < iconv.internal_encoding
-;mbstring.internal_encoding =
-
-; Use of this INI entry is deprecated, use global input_encoding instead.
-; http input encoding.
-; mbstring.encoding_traslation = On is needed to use this setting.
-; If empty, default_charset or input_encoding or mbstring.input is used.
-; The precedence is: default_charset < intput_encoding < mbsting.http_input
-; http://php.net/mbstring.http-input
-;mbstring.http_input =
-
-; Use of this INI entry is deprecated, use global output_encoding instead.
-; http output encoding.
-; mb_output_handler must be registered as output buffer to function.
-; If empty, default_charset or output_encoding or mbstring.http_output is used.
-; The precedence is: default_charset < output_encoding < mbstring.http_output
-; To use an output encoding conversion, mbstring's output handler must be set
-; otherwise output encoding conversion cannot be performed.
-; http://php.net/mbstring.http-output
-;mbstring.http_output =
-
-; enable automatic encoding translation according to
-; mbstring.internal_encoding setting. Input chars are
-; converted to internal encoding by setting this to On.
-; Note: Do _not_ use automatic encoding translation for
-; portable libs/applications.
-; http://php.net/mbstring.encoding-translation
-;mbstring.encoding_translation = Off
-
-; automatic encoding detection order.
-; "auto" detect order is changed according to mbstring.language
-; http://php.net/mbstring.detect-order
-;mbstring.detect_order = auto
-
-; substitute_character used when character cannot be converted
-; one from another
-; http://php.net/mbstring.substitute-character
-;mbstring.substitute_character = none
-
-; overload(replace) single byte functions by mbstring functions.
-; mail(), ereg(), etc are overloaded by mb_send_mail(), mb_ereg(),
-; etc. Possible values are 0,1,2,4 or combination of them.
-; For example, 7 for overload everything.
-; 0: No overload
-; 1: Overload mail() function
-; 2: Overload str*() functions
-; 4: Overload ereg*() functions
-; http://php.net/mbstring.func-overload
-;mbstring.func_overload = 0
-
-; enable strict encoding detection.
-; Default: Off
-;mbstring.strict_detection = On
-
-; This directive specifies the regex pattern of content types for which mb_output_handler()
-; is activated.
-; Default: mbstring.http_output_conv_mimetype=^(text/|application/xhtml\+xml)
-;mbstring.http_output_conv_mimetype=
-
-[gd]
-; Tell the jpeg decode to ignore warnings and try to create
-; a gd image. The warning will then be displayed as notices
-; disabled by default
-; http://php.net/gd.jpeg-ignore-warning
-;gd.jpeg_ignore_warning = 0
-
-[exif]
-; Exif UNICODE user comments are handled as UCS-2BE/UCS-2LE and JIS as JIS.
-; With mbstring support this will automatically be converted into the encoding
-; given by corresponding encode setting. When empty mbstring.internal_encoding
-; is used. For the decode settings you can distinguish between motorola and
-; intel byte order. A decode setting cannot be empty.
-; http://php.net/exif.encode-unicode
-;exif.encode_unicode = ISO-8859-15
-
-; http://php.net/exif.decode-unicode-motorola
-;exif.decode_unicode_motorola = UCS-2BE
-
-; http://php.net/exif.decode-unicode-intel
-;exif.decode_unicode_intel = UCS-2LE
-
-; http://php.net/exif.encode-jis
-;exif.encode_jis =
-
-; http://php.net/exif.decode-jis-motorola
-;exif.decode_jis_motorola = JIS
-
-; http://php.net/exif.decode-jis-intel
-;exif.decode_jis_intel = JIS
-
-[Tidy]
-; The path to a default tidy configuration file to use when using tidy
-; http://php.net/tidy.default-config
-;tidy.default_config = /usr/local/lib/php/default.tcfg
-
-; Should tidy clean and repair output automatically?
-; WARNING: Do not use this option if you are generating non-html content
-; such as dynamic images
-; http://php.net/tidy.clean-output
-tidy.clean_output = Off
-
-[soap]
-; Enables or disables WSDL caching feature.
-; http://php.net/soap.wsdl-cache-enabled
-soap.wsdl_cache_enabled=1
-
-; Sets the directory name where SOAP extension will put cache files.
-; http://php.net/soap.wsdl-cache-dir
-
-; RPM note : cache directory must be owned by process owner
-; for mod_php, see /etc/httpd/conf.d/php.conf
-; for php-fpm, see /etc/php-fpm.d/*conf
-soap.wsdl_cache_dir="/tmp"
-
-; (time to live) Sets the number of second while cached file will be used
-; instead of original one.
-; http://php.net/soap.wsdl-cache-ttl
-soap.wsdl_cache_ttl=86400
-
-; Sets the size of the cache limit. (Max. number of WSDL files to cache)
-soap.wsdl_cache_limit = 5
-
-[sysvshm]
-; A default size of the shared memory segment
-;sysvshm.init_mem = 10000
-
-[ldap]
-; Sets the maximum number of open links or -1 for unlimited.
-ldap.max_links = -1
-
-[mcrypt]
-; For more information about mcrypt settings see http://php.net/mcrypt-module-open
-
-; Directory where to load mcrypt algorithms
-; Default: Compiled in into libmcrypt (usually /usr/local/lib/libmcrypt)
-;mcrypt.algorithms_dir=
-
-; Directory where to load mcrypt modes
-; Default: Compiled in into libmcrypt (usually /usr/local/lib/libmcrypt)
-;mcrypt.modes_dir=
-
-[dba]
-;dba.default_handler=
-
-[opcache]
-; Determines if Zend OPCache is enabled
-;opcache.enable=0
-
-; Determines if Zend OPCache is enabled for the CLI version of PHP
-;opcache.enable_cli=0
-
-; The OPcache shared memory storage size.
-;opcache.memory_consumption=64
-opcache.memory_consumption=256
-
-; The amount of memory for interned strings in Mbytes.
-;opcache.interned_strings_buffer=4
-
-; The maximum number of keys (scripts) in the OPcache hash table.
-; Only numbers between 200 and 100000 are allowed.
-;opcache.max_accelerated_files=2000
-
-; The maximum percentage of "wasted" memory until a restart is scheduled.
-;opcache.max_wasted_percentage=5
-
-; When this directive is enabled, the OPcache appends the current working
-; directory to the script key, thus eliminating possible collisions between
-; files with the same name (basename). Disabling the directive improves
-; performance, but may break existing applications.
-;opcache.use_cwd=1
-
-; When disabled, you must reset the OPcache manually or restart the
-; webserver for changes to the filesystem to take effect.
-;opcache.validate_timestamps=1
-
-; How often (in seconds) to check file timestamps for changes to the shared
-; memory storage allocation. ("1" means validate once per second, but only
-; once per request. "0" means always validate)
-;opcache.revalidate_freq=2
-
-; Enables or disables file search in include_path optimization
-;opcache.revalidate_path=0
-
-; If disabled, all PHPDoc comments are dropped from the code to reduce the
-; size of the optimized code.
-;opcache.save_comments=1
-
-; If enabled, a fast shutdown sequence is used for the accelerated code
-;opcache.fast_shutdown=0
-
-; Allow file existence override (file_exists, etc.) performance feature.
-;opcache.enable_file_override=0
-opcache.enable_file_override=1
-
-; A bitmask, where each bit enables or disables the appropriate OPcache
-; passes
-;opcache.optimization_level=0xffffffff
-
-;opcache.inherited_hack=1
-;opcache.dups_fix=0
-
-; The location of the OPcache blacklist file (wildcards allowed).
-; Each OPcache blacklist file is a text file that holds the names of files
-; that should not be accelerated. The file format is to add each filename
-; to a new line. The filename may be a full path or just a file prefix
-; (i.e., /var/www/x blacklists all the files and directories in /var/www
-; that start with 'x'). Line starting with a ; are ignored (comments).
-;opcache.blacklist_filename=
-
-; Allows exclusion of large files from being cached. By default all files
-; are cached.
-;opcache.max_file_size=0
-
-; Check the cache checksum each N requests.
-; The default value of "0" means that the checks are disabled.
-;opcache.consistency_checks=0
-
-; How long to wait (in seconds) for a scheduled restart to begin if the cache
-; is not being accessed.
-;opcache.force_restart_timeout=180
-
-; OPcache error_log file name. Empty string assumes "stderr".
-;opcache.error_log=
-
-; All OPcache errors go to the Web server log.
-; By default, only fatal errors (level 0) or errors (level 1) are logged.
-; You can also enable warnings (level 2), info messages (level 3) or
-; debug messages (level 4).
-;opcache.log_verbosity_level=1
-
-; Preferred Shared Memory back-end. Leave empty and let the system decide.
-;opcache.preferred_memory_model=
-
-; Protect the shared memory from unexpected writing during script execution.
-; Useful for internal debugging only.
-;opcache.protect_memory=0
-
-; Allows calling OPcache API functions only from PHP scripts which path is
-; started from specified string. The default "" means no restriction
-;opcache.restrict_api=
-
-; Mapping base of shared memory segments (for Windows only). All the PHP
-; processes have to map shared memory into the same address space. This
-; directive allows to manually fix the "Unable to reattach to base address"
-; errors.
-;opcache.mmap_base=
-
-; Enables and sets the second level cache directory.
-; It should improve performance when SHM memory is full, at server restart or
-; SHM reset. The default "" disables file based caching.
-;opcache.file_cache=
-
-; Enables or disables opcode caching in shared memory.
-;opcache.file_cache_only=0
-
-; Enables or disables checksum validation when script loaded from file cache.
-;opcache.file_cache_consistency_checks=1
-
-; Enables or disables copying of PHP code (text segment) into HUGE PAGES.
-; This should improve performance, but requires appropriate OS configuration.
-opcache.huge_code_pages=1
-
-[curl]
-; A default value for the CURLOPT_CAINFO option. This is required to be an
-; absolute path.
-;curl.cainfo =
-
-[openssl]
-; The location of a Certificate Authority (CA) file on the local filesystem
-; to use when verifying the identity of SSL/TLS peers. Most users should
-; not specify a value for this directive as PHP will attempt to use the
-; OS-managed cert stores in its absence. If specified, this value may still
-; be overridden on a per-stream basis via the "cafile" SSL stream context
-; option.
-;openssl.cafile=
-
-; If openssl.cafile is not specified or if the CA file is not found, the
-; directory pointed to by openssl.capath is searched for a suitable
-; certificate. This value must be a correctly hashed certificate directory.
-; Most users should not specify a value for this directive as PHP will
-; attempt to use the OS-managed cert stores in its absence. If specified,
-; this value may still be overridden on a per-stream basis via the "capath"
-; SSL stream context option.
-;openssl.capath=
-
-; Local Variables:
-; tab-width: 4
-; End:
-
-[APC]
-apc.shm_size = 32m
-apc.enabled = 1
-
-[xdebug]
-xdebug.remote_enable = on
diff --git a/images/php/fpm/ssmtp.conf b/images/php/fpm/ssmtp.conf
deleted file mode 100644
index 1659e71b49..0000000000
--- a/images/php/fpm/ssmtp.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-# ssmtp config (will be filled during entrypoint 50-ssmtp.sh)
-
-# Email 'From header's can override the default domain
-FromLineOverride=yes
diff --git a/images/postgres-ckan/90-datastore-user.sh b/images/postgres-ckan/90-datastore-user.sh
deleted file mode 100755
index d12ea97269..0000000000
--- a/images/postgres-ckan/90-datastore-user.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-set -e
-
-psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
- CREATE USER ckan_datastore with encrypted password 'ckan';
- GRANT ALL PRIVILEGES ON DATABASE ckan TO ckan_datastore;
-EOSQL
diff --git a/images/postgres-ckan/Dockerfile b/images/postgres-ckan/Dockerfile
deleted file mode 100644
index a20a80827b..0000000000
--- a/images/postgres-ckan/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/postgres
-
-# change log_min_error_statement and log_min_messages from `error` to `log` as drupal is prone to cause some errors which are all logged (yes `log` is a less verbose mode than `error`)
-RUN sed -i "s/#log_min_error_statement = error/log_min_error_statement = log/" /usr/local/share/postgresql/postgresql.conf.sample \
- && sed -i "s/#log_min_messages = warning/log_min_messages = log/" /usr/local/share/postgresql/postgresql.conf.sample
-
-ENV POSTGRES_PASSWORD=ckan \
- POSTGRES_USER=ckan \
- POSTGRES_DB=ckan
-
-COPY 90-datastore-user.sh /docker-entrypoint-initdb.d/
diff --git a/images/postgres-drupal/Dockerfile b/images/postgres-drupal/Dockerfile
deleted file mode 100644
index 2a01b4b630..0000000000
--- a/images/postgres-drupal/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/postgres
-
-# change log_min_error_statement and log_min_messages from `error` to `log` as drupal is prone to cause some errors which are all logged (yes `log` is a less verbose mode than `error`)
-RUN sed -i "s/#log_min_error_statement = error/log_min_error_statement = log/" /usr/local/share/postgresql/postgresql.conf.sample \
- && sed -i "s/#log_min_messages = warning/log_min_messages = log/" /usr/local/share/postgresql/postgresql.conf.sample
-
-ENV POSTGRES_PASSWORD=drupal \
- POSTGRES_USER=drupal \
- POSTGRES_DB=drupal
diff --git a/images/postgres/Dockerfile b/images/postgres/Dockerfile
deleted file mode 100644
index 5601f06b8a..0000000000
--- a/images/postgres/Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-# alpine 3.11 from https://github.com/docker-library/postgres/blob/master/11/alpine/Dockerfile
-FROM postgres:11.6-alpine
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV LAGOON=postgres
-
-COPY postgres-backup.sh /lagoon/
-
-RUN echo -e "local all all md5\nhost all all 0.0.0.0/0 md5" >> /usr/local/share/postgresql/pg_hba.conf
-
-ENV PGUSER=postgres \
- POSTGRES_PASSWORD=lagoon \
- POSTGRES_USER=lagoon \
- POSTGRES_DB=lagoon \
- PGDATA=/var/lib/postgresql/data/pgdata
-
-# Postgresql entrypoint file needs bash, so start the entrypoints with bash
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.bash"]
-CMD ["/usr/local/bin/docker-entrypoint.sh", "postgres"]
\ No newline at end of file
diff --git a/images/postgres/postgres-backup.sh b/images/postgres/postgres-backup.sh
deleted file mode 100644
index f760fbcf3d..0000000000
--- a/images/postgres/postgres-backup.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/sh
-
-set -eu -o pipefail
-
-# directory to put the backup files
-BACKUP_DIR=/var/lib/postgresql/data/backup
-
-# MYSQL Parameters
-PGUSER=${POSTGRES_USER:-lagoon}
-PGPASSWORD=${POSTGRES_PASSWORD:-lagoon}
-
-PGHOST=$1
-
-# Number of days to keep backups
-KEEP_BACKUPS_FOR=4 #days
-
-#==============================================================================
-# METHODS
-#==============================================================================
-
-# YYYY-MM-DD_HHMMSS
-TIMESTAMP=$(date +%F_%H%M%S)
-
-function prepare()
-{
- mkdir -p $BACKUP_DIR
-}
-
-function delete_old_backups()
-{
- echo "Deleting $BACKUP_DIR/*.sql.gz older than $KEEP_BACKUPS_FOR days"
- find $BACKUP_DIR -type f -name "*.sql.gz" -mtime +$KEEP_BACKUPS_FOR -exec rm {} \;
-}
-
-
-function database_list() {
- echo $(psql -At -c "select datname from pg_database where not datistemplate and datallowconn and datname != 'postgres';" postgres)
-}
-
-function echo_status(){
- printf '\r';
- printf ' %0.s' {0..100}
- printf '\r';
- printf "$1"'\r'
-}
-
-function backup_database(){
- backup_file="$BACKUP_DIR/$TIMESTAMP.$database.sql.gz"
- output="${output}${database} => $backup_file\n"
- echo_status "...backing up $count of $total databases: $database"
- $(pg_dump $database | gzip -9 > $backup_file)
-}
-
-function backup_databases(){
- local databases=$(database_list)
- local total=$(echo $databases | wc -w | xargs)
- local output=""
- local count=1
- for database in $databases; do
- backup_database
- local count=$((count+1))
- done
- echo -ne $output
-}
-
-function hr(){
- printf '=%.0s' {1..100}
- printf "\n"
-}
-
-#==============================================================================
-# RUN SCRIPT
-#==============================================================================
-prepare
-delete_old_backups
-hr
-backup_databases
-hr
-printf "All backed up!\n\n"
diff --git a/images/python-ckan/Dockerfile b/images/python-ckan/Dockerfile
deleted file mode 100644
index 74565bf25f..0000000000
--- a/images/python-ckan/Dockerfile
+++ /dev/null
@@ -1,22 +0,0 @@
-ARG PYTHON_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/python:${PYTHON_VERSION}
-
-RUN apk update \
- && apk upgrade \
- && apk add --no-cache git \
- libpq \
- postgresql-dev \
- gcc \
- musl-dev \
- file-dev \
- libxslt-dev \
- libxml2-dev \
- libffi-dev
-
-RUN mkdir -p /app/ckan/default \
- && fix-permissions /app/ckan/default
-
-RUN virtualenv --no-site-packages /app/ckan/default \
- && . /app/ckan/default/bin/activate \
- && pip install setuptools==20.4
diff --git a/images/python-ckandatapusher/Dockerfile b/images/python-ckandatapusher/Dockerfile
deleted file mode 100644
index fe7f2a451d..0000000000
--- a/images/python-ckandatapusher/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-ARG PYTHON_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/python:${PYTHON_VERSION}
-
-RUN apk update \
- && apk upgrade \
- && apk add --no-cache git \
- libpq \
- postgresql-dev \
- gcc \
- musl-dev \
- file-dev \
- libxslt-dev \
- libxml2-dev \
- libffi-dev \
- pcre-dev
-
-RUN virtualenv /app/ckan/datapusher
-
-RUN mkdir -p /app/ckan/datapusher/src \
- && mkdir -p /etc/ckan \
- && fix-permissions /app/ckan \
- && ln -s /app/ckan /usr/lib/ckan \
- && . /app/ckan/datapusher/bin/activate \
- && pip install uwsgi \
- && cd /app/ckan/datapusher/src \
- && git clone -b 0.0.14 https://github.com/ckan/datapusher.git \
- && cd datapusher \
- && /app/ckan/datapusher/bin/pip install -r requirements.txt \
- && /app/ckan/datapusher/bin/python setup.py develop \
- && cp deployment/datapusher.wsgi /etc/ckan/ \
- && cp deployment/datapusher_settings.py /etc/ckan/
-
-ENV LISTEN_PORT=8800
-
-CMD ["sh", "-c", ". /app/ckan/datapusher/bin/activate && uwsgi --http :${LISTEN_PORT} --wsgi-file /etc/ckan/datapusher.wsgi"]
diff --git a/images/python/80-shell-timeout.sh b/images/python/80-shell-timeout.sh
deleted file mode 100644
index fdc02f389e..0000000000
--- a/images/python/80-shell-timeout.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-# If we are running within kubernetes, set a shell timeout of 10mins.
-# We do that so old shells are closed and we can idle the cli container
-if [ $KUBERNETES_PORT ]; then
- TMOUT=600
-fi
\ No newline at end of file
diff --git a/images/python/Dockerfile b/images/python/Dockerfile
deleted file mode 100644
index 1ee8d863f5..0000000000
--- a/images/python/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-ARG PYTHON_VERSION
-ARG IMAGE_REPO
-ARG ALPINE_VERSION
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM python:${PYTHON_VERSION}-alpine${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=python
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-RUN apk add --no-cache --virtual .build-deps \
- build-base \
- && pip install --upgrade pip \
- && pip install virtualenv==16.7.10 \
- && apk del .build-deps
-
-# Make sure shells are not running forever
-COPY 80-shell-timeout.sh /lagoon/entrypoints/
-RUN echo "source /lagoon/entrypoints/80-shell-timeout.sh" >> /home/.bashrc
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["python"]
diff --git a/images/rabbitmq-cluster/enabled_plugins b/images/rabbitmq-cluster/enabled_plugins
index 607ac7474e..4ef8337a78 100644
--- a/images/rabbitmq-cluster/enabled_plugins
+++ b/images/rabbitmq-cluster/enabled_plugins
@@ -1 +1 @@
-[rabbitmq_management,rabbitmq_delayed_message_exchange,rabbitmq_peer_discovery_k8s].
+[rabbitmq_management,rabbitmq_delayed_message_exchange,rabbitmq_peer_discovery_k8s,rabbitmq_prometheus].
diff --git a/images/rabbitmq/Dockerfile b/images/rabbitmq/Dockerfile
index e936178fb5..b1338aa795 100644
--- a/images/rabbitmq/Dockerfile
+++ b/images/rabbitmq/Dockerfile
@@ -1,7 +1,8 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-# alpine 3.11 as per https://github.com/docker-library/rabbitmq/blob/master/3.8/alpine/Dockerfile
-FROM rabbitmq:3.8-management-alpine
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+FROM ${UPSTREAM_REPO:-uselagoon}/commons:${UPSTREAM_TAG:-latest} as commons
+# alpine 3.12 as per https://github.com/docker-library/rabbitmq/blob/master/3.8/alpine/Dockerfile
+FROM rabbitmq:3.8.9-management-alpine
ARG LAGOON_VERSION
ENV LAGOON_VERSION=$LAGOON_VERSION
@@ -13,8 +14,8 @@ ENV RABBITMQ_DEFAULT_USER='guest' \
COPY --from=commons /bin/ep /bin/fix-permissions /bin/
-COPY rabbitmq_delayed_message_exchange-3.8.0.ez /plugins
-RUN rabbitmq-plugins enable --offline rabbitmq_delayed_message_exchange;
+COPY rabbitmq_delayed_message_exchange-3.8.9.ez /plugins
+RUN rabbitmq-plugins enable --offline rabbitmq_delayed_message_exchange rabbitmq_prometheus;
# Copy startup schema with vhost, users, permissions and policies
COPY definitions.json /etc/rabbitmq/definitions.json
diff --git a/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.0.ez b/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.0.ez
deleted file mode 100644
index 6332f5ff21..0000000000
Binary files a/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.0.ez and /dev/null differ
diff --git a/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.9.ez b/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.9.ez
new file mode 100644
index 0000000000..2ba64115db
Binary files /dev/null and b/images/rabbitmq/rabbitmq_delayed_message_exchange-3.8.9.ez differ
diff --git a/images/redis-persistent/Dockerfile b/images/redis-persistent/Dockerfile
deleted file mode 100644
index f83a77aee3..0000000000
--- a/images/redis-persistent/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/redis
-
-ENV FLAVOR=persistent
diff --git a/images/redis/Dockerfile b/images/redis/Dockerfile
deleted file mode 100644
index a6990f656c..0000000000
--- a/images/redis/Dockerfile
+++ /dev/null
@@ -1,37 +0,0 @@
-ARG IMAGE_REPO
-ARG ALPINE_VERSION
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM redis:5.0-alpine${ALPINE_VERSION}
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=redis
-ENV FLAVOR=ephemeral
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home /home
-
-RUN chmod g+w /etc/passwd \
- && mkdir -p /home
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-COPY conf /etc/redis/
-COPY docker-entrypoint /lagoon/entrypoints/70-redis-entrypoint
-
-RUN fix-permissions /etc/redis \
- fix-permissions /data
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-CMD ["redis-server", "/etc/redis/redis.conf"]
diff --git a/images/redis/conf/ephemeral.conf b/images/redis/conf/ephemeral.conf
deleted file mode 100644
index 4840b8259f..0000000000
--- a/images/redis/conf/ephemeral.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# this disabled the persistent cache
-save ""
-appendonly no
diff --git a/images/redis/conf/persistent.conf b/images/redis/conf/persistent.conf
deleted file mode 100644
index d3976a05f4..0000000000
--- a/images/redis/conf/persistent.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-save 300 1
-save 60 100
-
-appendonly yes
-appendfsync everysec
-
-auto-aof-rewrite-percentage 100
-auto-aof-rewrite-min-size 64mb
-
-# RDB files created with checksum disabled have a checksum of zero that will
-# tell the loading code to skip the check.
-rdbchecksum yes
-
-# The filename where to dump the DB
-dbfilename redis.rdb
-
-dir /data
diff --git a/images/redis/conf/redis.conf b/images/redis/conf/redis.conf
deleted file mode 100644
index 06425ea1c6..0000000000
--- a/images/redis/conf/redis.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-# Redis 4.0 configuration file for non-persistent cache
-# see https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf for all possible configs.
-
-loglevel ${LOGLEVEL:-notice}
-databases ${DATABASES:-1}
-
-maxmemory ${MAXMEMORY:-100mb}
-maxmemory-policy allkeys-lru
-
-# allow other hosts to connect to us
-protected-mode no
-bind 0.0.0.0
-
-${REQUIREPASS_CONF:-}
-
-include /etc/redis/${FLAVOR:-ephemeral}.conf
diff --git a/images/redis/docker-entrypoint b/images/redis/docker-entrypoint
deleted file mode 100755
index fafbb758ef..0000000000
--- a/images/redis/docker-entrypoint
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-if [[ -n "${REDIS_PASSWORD}" ]]; then
- export REQUIREPASS_CONF="# Enable basic/simple authentication
-# Warning: since Redis is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-requirepass ${REDIS_PASSWORD}"
-fi
-
-ep /etc/redis/*
-
-exec "$@"
diff --git a/images/solr-ckan/Dockerfile b/images/solr-ckan/Dockerfile
deleted file mode 100644
index be3563bcd4..0000000000
--- a/images/solr-ckan/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-ARG SOLR_MAJ_MIN_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/solr:${SOLR_MAJ_MIN_VERSION}
-ARG SOLR_MAJ_MIN_VERSION
-
-COPY solr${SOLR_MAJ_MIN_VERSION} /solr-conf
-
-RUN precreate-core ckan /solr-conf
-
-CMD ["solr-foreground"]
diff --git a/images/solr-ckan/solr5.5/conf/elevate.xml b/images/solr-ckan/solr5.5/conf/elevate.xml
deleted file mode 100644
index 193a0e727a..0000000000
--- a/images/solr-ckan/solr5.5/conf/elevate.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr5.5/conf/mapping-ISOLatin1Accent.txt b/images/solr-ckan/solr5.5/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index b92d03c550..0000000000
--- a/images/solr-ckan/solr5.5/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file contains character mappings for the default fulltext field type.
-# The source characters (on the left) will be replaced by the respective target
-# characters before any other processing takes place.
-# Lines starting with a pound character # are ignored.
-#
-# For sensible defaults, use the mapping-ISOLatin1Accent.txt file distributed
-# with the example application of your Solr version.
-#
-# Examples:
-# "À" => "A"
-# "\u00c4" => "A"
-# "\u00c4" => "\u0041"
-# "æ" => "ae"
-# "\n" => " "
diff --git a/images/solr-ckan/solr5.5/conf/protwords.txt b/images/solr-ckan/solr5.5/conf/protwords.txt
deleted file mode 100644
index cda8581497..0000000000
--- a/images/solr-ckan/solr5.5/conf/protwords.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#-----------------------------------------------------------------------
-# This file blocks words from being operated on by the stemmer and word delimiter.
-&
-<
->
-'
-"
diff --git a/images/solr-ckan/solr5.5/conf/schema.xml b/images/solr-ckan/solr5.5/conf/schema.xml
deleted file mode 100644
index 8e5018a2e2..0000000000
--- a/images/solr-ckan/solr5.5/conf/schema.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-index_id
-text
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr5.5/conf/solrconfig.xml b/images/solr-ckan/solr5.5/conf/solrconfig.xml
deleted file mode 100644
index a0549e1146..0000000000
--- a/images/solr-ckan/solr5.5/conf/solrconfig.xml
+++ /dev/null
@@ -1,1800 +0,0 @@
-
-
-
-
-
-
-
-
- ${solr.abortOnConfigurationError:true}
-
-
- ${solr.luceneMatchVersion:LUCENE_50}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /var/solr/${solr.core.name}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 32
-
-
-
-
-
-
-
-
-
- 4
-
-
- ${solr.lock.type:none}
-
-
-
-
-
- false
-
-
- true
-
-
-
-
- 1
-
- 0
-
-
-
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${solr.autoCommit.MaxDocs:10000}
- ${solr.autoCommit.MaxTime:120000}
-
-
-
-
- ${solr.autoSoftCommit.MaxDocs:2000}
- ${solr.autoSoftCommit.MaxTime:10000}
-
-
-
-
-
-
-
-
- ${solr.data.dir:}
-
-
-
-
-
-
-
-
-
-
- 1024
-
-
- -1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
- 20
-
-
- 200
-
-
-
-
-
-
-
-
-
-
-
- solr rocks010
-
-
-
-
-
- false
-
-
- 2
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- json
- true
- text
-
-
-
-
-
-
-
- {!xport}
- xsort
- false
-
-
-
- query
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- edismax
- content
- explicit
- true
- 0.01
-
- ${solr.pinkPony.timeAllowed:-1}
- *:*
-
-
- false
-
- true
- false
-
- 1
-
-
- spellcheck
- elevator
-
-
-
-
-
-
- content
- 1
- 1
- 3
- 15
- 20
- false
-
- ${solr.mlt.timeAllowed:2000}
-
-
-
-
-
-
- content
- explicit
- true
-
-
-
-
-
-
-
- text
-
-
-
-
-
-
- _src_
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- text
- true
- ignored_
-
-
- true
- links
- ignored_
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- true
-
-
-
-
-
-
- ${solr.replication.master:false}
- commit
- startup
- ${solr.replication.confFiles:schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml}
-
-
- ${solr.replication.slave:false}
- ${solr.replication.masterUrl:http://localhost:8983/solr}/replication
- ${solr.replication.pollInterval:00:00:60}
-
-
-
-
-
-
- true
- json
- true
-
-
-
-
-
-
-
-
-
- default
- wordbreak
- false
- false
- 1
- 5
- 5
- true
- true
- 10
- 5
-
-
- spellcheck
-
-
-
-
-
-
- mySuggester
- FuzzyLookupFactory
- DocumentDictionaryFactory
- cat
- price
- string
-
-
-
-
-
- true
- 10
-
-
- suggest
-
-
-
-
-
-
-
-
-
- true
-
-
- tvComponent
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
- terms
-
-
-
-
-
-
- string
- elevate.xml
-
-
-
-
-
- explicit
-
-
- elevator
-
-
-
-
-
-
-
-
-
-
- 100
-
-
-
-
-
-
-
- 70
-
- 0.5
-
- [-\w ,/\n\"']{20,200}
-
-
-
-
-
-
- ]]>
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ,,
- ,,
- ,,
- ,,
- ,]]>
- ]]>
-
-
-
-
-
- 10
- .,!?
-
-
-
-
-
-
- WORD
-
-
- en
- US
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- text/plain; charset=UTF-8
-
-
-
-
-
-
-
-
- 5
-
-
-
-
-
-
-
-
-
-
-
-
- *:*
-
-
-
-
-
-
-
-
-
-
-
- textSpell
-
-
-
- default
- spell
- spellchecker
- true
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr5.5/conf/solrconfig_extra.xml b/images/solr-ckan/solr5.5/conf/solrconfig_extra.xml
deleted file mode 100644
index c5bc3acfb5..0000000000
--- a/images/solr-ckan/solr5.5/conf/solrconfig_extra.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
-textSpell
-
-
-
-
-
- default
- spell
- spellchecker
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr5.5/conf/solrcore.properties b/images/solr-ckan/solr5.5/conf/solrcore.properties
deleted file mode 100644
index 3a2433f676..0000000000
--- a/images/solr-ckan/solr5.5/conf/solrcore.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defines Solr properties for this specific core.
-solr.replication.master=false
-solr.replication.slave=false
-solr.replication.pollInterval=00:00:60
-solr.replication.masterUrl=http://localhost:8983/solr
-solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
-solr.mlt.timeAllowed=2000
-# You should not set your luceneMatchVersion to anything lower than your Solr
-# Version.
-solr.luceneMatchVersion=5.0
-solr.pinkPony.timeAllowed=-1
-# autoCommit after 10000 docs
-solr.autoCommit.MaxDocs=10000
-# autoCommit after 2 minutes
-solr.autoCommit.MaxTime=120000
-# autoSoftCommit after 2000 docs
-solr.autoSoftCommit.MaxDocs=2000
-# autoSoftCommit after 10 seconds
-solr.autoSoftCommit.MaxTime=10000
-solr.install.dir=../../..
diff --git a/images/solr-ckan/solr5.5/conf/stopwords.txt b/images/solr-ckan/solr5.5/conf/stopwords.txt
deleted file mode 100644
index d7f243e48a..0000000000
--- a/images/solr-ckan/solr5.5/conf/stopwords.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Contains words which shouldn't be indexed for fulltext fields, e.g., because
-# they're too common. For documentation of the format, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-ckan/solr5.5/conf/synonyms.txt b/images/solr-ckan/solr5.5/conf/synonyms.txt
deleted file mode 100644
index 7d22eea6d6..0000000000
--- a/images/solr-ckan/solr5.5/conf/synonyms.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contains synonyms to use for your index. For the format used, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-ckan/solr6.6/conf/elevate.xml b/images/solr-ckan/solr6.6/conf/elevate.xml
deleted file mode 100644
index 193a0e727a..0000000000
--- a/images/solr-ckan/solr6.6/conf/elevate.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr6.6/conf/mapping-ISOLatin1Accent.txt b/images/solr-ckan/solr6.6/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index b92d03c550..0000000000
--- a/images/solr-ckan/solr6.6/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file contains character mappings for the default fulltext field type.
-# The source characters (on the left) will be replaced by the respective target
-# characters before any other processing takes place.
-# Lines starting with a pound character # are ignored.
-#
-# For sensible defaults, use the mapping-ISOLatin1Accent.txt file distributed
-# with the example application of your Solr version.
-#
-# Examples:
-# "À" => "A"
-# "\u00c4" => "A"
-# "\u00c4" => "\u0041"
-# "æ" => "ae"
-# "\n" => " "
diff --git a/images/solr-ckan/solr6.6/conf/protwords.txt b/images/solr-ckan/solr6.6/conf/protwords.txt
deleted file mode 100644
index cda8581497..0000000000
--- a/images/solr-ckan/solr6.6/conf/protwords.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#-----------------------------------------------------------------------
-# This file blocks words from being operated on by the stemmer and word delimiter.
-&
-<
->
-'
-"
diff --git a/images/solr-ckan/solr6.6/conf/schema.xml b/images/solr-ckan/solr6.6/conf/schema.xml
deleted file mode 100644
index 8e5018a2e2..0000000000
--- a/images/solr-ckan/solr6.6/conf/schema.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-index_id
-text
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr6.6/conf/solrconfig.xml b/images/solr-ckan/solr6.6/conf/solrconfig.xml
deleted file mode 100644
index 12f7fb9966..0000000000
--- a/images/solr-ckan/solr6.6/conf/solrconfig.xml
+++ /dev/null
@@ -1,1494 +0,0 @@
-
-
-
-]>
-
-
-
-
-
-
- ${solr.abortOnConfigurationError:true}
-
-
- ${solr.luceneMatchVersion:LUCENE_60}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /var/solr/${solr.core.name}
-
-
-
-
-
-
-
-
- ${solr.hdfs.home:}
-
- ${solr.hdfs.confdir:}
-
- ${solr.hdfs.blockcache.enabled:true}
-
- ${solr.hdfs.blockcache.global:true}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4
-
-
-
-
-
-
- ${solr.lock.type:none}
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${solr.ulog.dir:}
-
-
-
-
- ${solr.autoCommit.MaxDocs:10000}
- ${solr.autoCommit.MaxTime:120000}
- false
-
-
-
-
- ${solr.autoSoftCommit.MaxDocs:2000}
- ${solr.autoSoftCommit.MaxTime:10000}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1024
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
- 20
-
-
- 200
-
-
-
-
-
-
-
-
-
-
-
- static firstSearcher warming in solrconfig.xml
-
-
-
-
-
- false
-
-
- 2
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- edismax
- content
- explicit
- true
- 0.01
-
- ${solr.selectSearchHandler.timeAllowed:-1}
- *:*
-
-
- false
-
- true
- false
-
- 1
-
-
- spellcheck
- elevator
-
-
-
-
-
-
- explicit
- json
- true
- text
-
-
-
-
-
-
-
-
-
- content
- 1
- 1
- 3
- 15
- 20
- false
-
- ${solr.mlt.timeAllowed:2000}
-
-
-
-
-
-
- content
- explicit
- true
-
-
-
-
-
- text
-
-
-
-
-
-
- true
- ignored_
-
-
- true
- links
- ignored_
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- true
-
-
-
-
-
-
- ${solr.replication.master:false}
- commit
- startup
- ${solr.replication.confFiles:schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml}
-
-
- ${solr.replication.slave:false}
- ${solr.replication.masterUrl:http://localhost:8983/solr}/replication
- ${solr.replication.pollInterval:00:00:60}
-
-
-
-
-
-
- true
- json
- true
-
-
-
-
-
-
-
- &spellcheck;
-
-
-
-
- spell
-
- default
- wordbreak
- on
- false
- false
- 1
- 5
- 5
- true
- true
- 10
- 5
-
-
- spellcheck
-
-
-
-
-
- mySuggester
- FuzzyLookupFactory
- DocumentDictionaryFactory
- cat
- price
- string
-
-
-
-
-
- true
- 10
-
-
- suggest
-
-
-
-
-
-
-
-
- text
- true
-
-
- tvComponent
-
-
-
-
-
-
-
-
-
- true
- false
-
-
- terms
-
-
-
-
-
-
- false
- false
-
- false
-
- true
- false
-
- 1
-
-
- terms
- spellcheck
-
-
-
-
-
-
- string
- elevate.xml
-
-
-
-
-
- explicit
- text
-
-
- elevator
-
-
-
-
-
-
-
-
-
-
- 100
-
-
-
-
-
-
-
- 70
-
- 0.5
-
- [-\w ,/\n\"']{20,200}
-
-
-
-
-
-
- ]]>
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ,,
- ,,
- ,,
- ,,
- ,]]>
- ]]>
-
-
-
-
-
- 10
- .,!?
-
-
-
-
-
-
- WORD
-
-
- en
- US
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- application/json; charset=UTF-8
-
-
-
-
-
-
-
-
- 5
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr6.6/conf/solrconfig_spellcheck.xml b/images/solr-ckan/solr6.6/conf/solrconfig_spellcheck.xml
deleted file mode 100644
index 5c9d7ad79e..0000000000
--- a/images/solr-ckan/solr6.6/conf/solrconfig_spellcheck.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
- textSpell
-
-
-
-
-
-
- default
- spell
- solr.DirectSolrSpellChecker
-
- internal
-
- 0.5
-
- 2
-
- 1
-
- 5
-
- 4
-
- 0.01
-
-
-
-
-
- wordbreak
- solr.WordBreakSolrSpellChecker
- name
- true
- true
- 10
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-ckan/solr6.6/conf/solrcore.properties b/images/solr-ckan/solr6.6/conf/solrcore.properties
deleted file mode 100644
index d7d045b0fd..0000000000
--- a/images/solr-ckan/solr6.6/conf/solrcore.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defines Solr properties for this specific core.
-solr.replication.master=false
-solr.replication.slave=false
-solr.replication.pollInterval=00:00:60
-solr.replication.masterUrl=http://localhost:8983/solr
-solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
-solr.mlt.timeAllowed=2000
-# You should not set your luceneMatchVersion to anything lower than your Solr
-# Version.
-solr.luceneMatchVersion=6.0
-solr.selectSearchHandler.timeAllowed=-1
-# autoCommit after 10000 docs
-solr.autoCommit.MaxDocs=10000
-# autoCommit after 2 minutes
-solr.autoCommit.MaxTime=120000
-# autoSoftCommit after 2000 docs
-solr.autoSoftCommit.MaxDocs=2000
-# autoSoftCommit after 10 seconds
-solr.autoSoftCommit.MaxTime=10000
-solr.install.dir=../../..
diff --git a/images/solr-ckan/solr6.6/conf/stopwords.txt b/images/solr-ckan/solr6.6/conf/stopwords.txt
deleted file mode 100644
index d7f243e48a..0000000000
--- a/images/solr-ckan/solr6.6/conf/stopwords.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Contains words which shouldn't be indexed for fulltext fields, e.g., because
-# they're too common. For documentation of the format, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-ckan/solr6.6/conf/synonyms.txt b/images/solr-ckan/solr6.6/conf/synonyms.txt
deleted file mode 100644
index 7d22eea6d6..0000000000
--- a/images/solr-ckan/solr6.6/conf/synonyms.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contains synonyms to use for your index. For the format used, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-drupal/Dockerfile b/images/solr-drupal/Dockerfile
deleted file mode 100644
index 0de192e938..0000000000
--- a/images/solr-drupal/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-ARG SOLR_MAJ_MIN_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/solr:${SOLR_MAJ_MIN_VERSION}
-ARG SOLR_MAJ_MIN_VERSION
-
-COPY solr${SOLR_MAJ_MIN_VERSION} /solr-conf
-
-RUN precreate-core drupal /solr-conf
-
-CMD ["solr-foreground"]
diff --git a/images/solr-drupal/solr5.5/conf/elevate.xml b/images/solr-drupal/solr5.5/conf/elevate.xml
deleted file mode 100644
index 193a0e727a..0000000000
--- a/images/solr-drupal/solr5.5/conf/elevate.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/mapping-ISOLatin1Accent.txt b/images/solr-drupal/solr5.5/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index b92d03c550..0000000000
--- a/images/solr-drupal/solr5.5/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file contains character mappings for the default fulltext field type.
-# The source characters (on the left) will be replaced by the respective target
-# characters before any other processing takes place.
-# Lines starting with a pound character # are ignored.
-#
-# For sensible defaults, use the mapping-ISOLatin1Accent.txt file distributed
-# with the example application of your Solr version.
-#
-# Examples:
-# "À" => "A"
-# "\u00c4" => "A"
-# "\u00c4" => "\u0041"
-# "æ" => "ae"
-# "\n" => " "
diff --git a/images/solr-drupal/solr5.5/conf/protwords.txt b/images/solr-drupal/solr5.5/conf/protwords.txt
deleted file mode 100644
index cda8581497..0000000000
--- a/images/solr-drupal/solr5.5/conf/protwords.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#-----------------------------------------------------------------------
-# This file blocks words from being operated on by the stemmer and word delimiter.
-&
-<
->
-'
-"
diff --git a/images/solr-drupal/solr5.5/conf/schema.xml b/images/solr-drupal/solr5.5/conf/schema.xml
deleted file mode 100644
index 692e3be1ad..0000000000
--- a/images/solr-drupal/solr5.5/conf/schema.xml
+++ /dev/null
@@ -1,744 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- id
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/schema_extra_fields.xml b/images/solr-drupal/solr5.5/conf/schema_extra_fields.xml
deleted file mode 100644
index 02b365551c..0000000000
--- a/images/solr-drupal/solr5.5/conf/schema_extra_fields.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/schema_extra_types.xml b/images/solr-drupal/solr5.5/conf/schema_extra_types.xml
deleted file mode 100644
index bd716b82ac..0000000000
--- a/images/solr-drupal/solr5.5/conf/schema_extra_types.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/solrconfig.xml b/images/solr-drupal/solr5.5/conf/solrconfig.xml
deleted file mode 100644
index 2ccb268e43..0000000000
--- a/images/solr-drupal/solr5.5/conf/solrconfig.xml
+++ /dev/null
@@ -1,1800 +0,0 @@
-
-
-
-
-
-
-
-
- ${solr.abortOnConfigurationError:true}
-
-
- ${solr.luceneMatchVersion:LUCENE_50}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /var/solr/${solr.core.name}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 32
-
-
-
-
-
-
-
-
-
- 4
-
-
- ${solr.lock.type:none}
-
-
-
-
-
- false
-
-
- true
-
-
-
-
- 1
-
- 0
-
-
-
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${solr.autoCommit.MaxDocs:10000}
- ${solr.autoCommit.MaxTime:120000}
-
-
-
-
- ${solr.autoSoftCommit.MaxDocs:2000}
- ${solr.autoSoftCommit.MaxTime:10000}
-
-
-
-
-
-
-
-
- ${solr.data.dir:}
-
-
-
-
-
-
-
-
-
-
- 1024
-
-
- -1
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
- 20
-
-
- 200
-
-
-
-
-
-
-
-
-
-
-
- solr rocks010
-
-
-
-
-
- false
-
-
- 2
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- json
- true
- text
-
-
-
-
-
-
-
- {!xport}
- xsort
- false
-
-
-
- query
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- edismax
- content
- explicit
- true
- 0.01
-
- ${solr.pinkPony.timeAllowed:-1}
- *:*
-
-
- false
-
- true
- false
-
- 1
-
-
- spellcheck
- elevator
-
-
-
-
-
-
- content
- 1
- 1
- 3
- 15
- 20
- false
-
- ${solr.mlt.timeAllowed:2000}
-
-
-
-
-
-
- content
- explicit
- true
-
-
-
-
-
-
-
- text
-
-
-
-
-
-
- _src_
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- text
- true
- ignored_
-
-
- true
- links
- ignored_
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- true
-
-
-
-
-
-
- ${solr.replication.master:false}
- commit
- startup
- ${solr.replication.confFiles:schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml}
-
-
- ${solr.replication.slave:false}
- ${solr.replication.masterUrl:http://localhost:8983/solr}/replication
- ${solr.replication.pollInterval:00:00:60}
-
-
-
-
-
-
- true
- json
- true
-
-
-
-
-
-
-
-
-
- default
- wordbreak
- false
- false
- 1
- 5
- 5
- true
- true
- 10
- 5
-
-
- spellcheck
-
-
-
-
-
-
- mySuggester
- FuzzyLookupFactory
- DocumentDictionaryFactory
- cat
- price
- string
-
-
-
-
-
- true
- 10
-
-
- suggest
-
-
-
-
-
-
-
-
-
- true
-
-
- tvComponent
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
- terms
-
-
-
-
-
-
- string
- elevate.xml
-
-
-
-
-
- explicit
-
-
- elevator
-
-
-
-
-
-
-
-
-
-
- 100
-
-
-
-
-
-
-
- 70
-
- 0.5
-
- [-\w ,/\n\"']{20,200}
-
-
-
-
-
-
- ]]>
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ,,
- ,,
- ,,
- ,,
- ,]]>
- ]]>
-
-
-
-
-
- 10
- .,!?
-
-
-
-
-
-
- WORD
-
-
- en
- US
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- text/plain; charset=UTF-8
-
-
-
-
-
-
-
-
- 5
-
-
-
-
-
-
-
-
-
-
-
-
- *:*
-
-
-
-
-
-
-
-
-
-
-
- textSpell
-
-
-
- default
- spell
- spellchecker
- true
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/solrconfig_extra.xml b/images/solr-drupal/solr5.5/conf/solrconfig_extra.xml
deleted file mode 100644
index c5bc3acfb5..0000000000
--- a/images/solr-drupal/solr5.5/conf/solrconfig_extra.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
-textSpell
-
-
-
-
-
- default
- spell
- spellchecker
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr5.5/conf/solrcore.properties b/images/solr-drupal/solr5.5/conf/solrcore.properties
deleted file mode 100644
index 3a2433f676..0000000000
--- a/images/solr-drupal/solr5.5/conf/solrcore.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defines Solr properties for this specific core.
-solr.replication.master=false
-solr.replication.slave=false
-solr.replication.pollInterval=00:00:60
-solr.replication.masterUrl=http://localhost:8983/solr
-solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
-solr.mlt.timeAllowed=2000
-# You should not set your luceneMatchVersion to anything lower than your Solr
-# Version.
-solr.luceneMatchVersion=5.0
-solr.pinkPony.timeAllowed=-1
-# autoCommit after 10000 docs
-solr.autoCommit.MaxDocs=10000
-# autoCommit after 2 minutes
-solr.autoCommit.MaxTime=120000
-# autoSoftCommit after 2000 docs
-solr.autoSoftCommit.MaxDocs=2000
-# autoSoftCommit after 10 seconds
-solr.autoSoftCommit.MaxTime=10000
-solr.install.dir=../../..
diff --git a/images/solr-drupal/solr5.5/conf/stopwords.txt b/images/solr-drupal/solr5.5/conf/stopwords.txt
deleted file mode 100644
index d7f243e48a..0000000000
--- a/images/solr-drupal/solr5.5/conf/stopwords.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Contains words which shouldn't be indexed for fulltext fields, e.g., because
-# they're too common. For documentation of the format, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-drupal/solr5.5/conf/synonyms.txt b/images/solr-drupal/solr5.5/conf/synonyms.txt
deleted file mode 100644
index 7d22eea6d6..0000000000
--- a/images/solr-drupal/solr5.5/conf/synonyms.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contains synonyms to use for your index. For the format used, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-drupal/solr6.6/conf/elevate.xml b/images/solr-drupal/solr6.6/conf/elevate.xml
deleted file mode 100644
index 193a0e727a..0000000000
--- a/images/solr-drupal/solr6.6/conf/elevate.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/mapping-ISOLatin1Accent.txt b/images/solr-drupal/solr6.6/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index b92d03c550..0000000000
--- a/images/solr-drupal/solr6.6/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# This file contains character mappings for the default fulltext field type.
-# The source characters (on the left) will be replaced by the respective target
-# characters before any other processing takes place.
-# Lines starting with a pound character # are ignored.
-#
-# For sensible defaults, use the mapping-ISOLatin1Accent.txt file distributed
-# with the example application of your Solr version.
-#
-# Examples:
-# "À" => "A"
-# "\u00c4" => "A"
-# "\u00c4" => "\u0041"
-# "æ" => "ae"
-# "\n" => " "
diff --git a/images/solr-drupal/solr6.6/conf/protwords.txt b/images/solr-drupal/solr6.6/conf/protwords.txt
deleted file mode 100644
index cda8581497..0000000000
--- a/images/solr-drupal/solr6.6/conf/protwords.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-#-----------------------------------------------------------------------
-# This file blocks words from being operated on by the stemmer and word delimiter.
-&
-<
->
-'
-"
diff --git a/images/solr-drupal/solr6.6/conf/schema.xml b/images/solr-drupal/solr6.6/conf/schema.xml
deleted file mode 100644
index 7374d6d8b6..0000000000
--- a/images/solr-drupal/solr6.6/conf/schema.xml
+++ /dev/null
@@ -1,843 +0,0 @@
-
-
-
-
-
-
-]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- &extrafields;
-
-
-
-
-
- &extratypes;
-
-
-
-
-
- id
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/schema_extra_fields.xml b/images/solr-drupal/solr6.6/conf/schema_extra_fields.xml
deleted file mode 100644
index 8933702d26..0000000000
--- a/images/solr-drupal/solr6.6/conf/schema_extra_fields.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
diff --git a/images/solr-drupal/solr6.6/conf/schema_extra_types.xml b/images/solr-drupal/solr6.6/conf/schema_extra_types.xml
deleted file mode 100644
index ce2ce0cb35..0000000000
--- a/images/solr-drupal/solr6.6/conf/schema_extra_types.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
diff --git a/images/solr-drupal/solr6.6/conf/schema_legacy_fields.xml b/images/solr-drupal/solr6.6/conf/schema_legacy_fields.xml
deleted file mode 100644
index 902bb28e75..0000000000
--- a/images/solr-drupal/solr6.6/conf/schema_legacy_fields.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/schema_legacy_types.xml b/images/solr-drupal/solr6.6/conf/schema_legacy_types.xml
deleted file mode 100644
index b46da0485e..0000000000
--- a/images/solr-drupal/solr6.6/conf/schema_legacy_types.xml
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/solrconfig.xml b/images/solr-drupal/solr6.6/conf/solrconfig.xml
deleted file mode 100644
index 0ed1eb55e1..0000000000
--- a/images/solr-drupal/solr6.6/conf/solrconfig.xml
+++ /dev/null
@@ -1,1494 +0,0 @@
-
-
-
-]>
-
-
-
-
-
-
- ${solr.abortOnConfigurationError:true}
-
-
- ${solr.luceneMatchVersion:LUCENE_60}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /var/solr/${solr.core.name}
-
-
-
-
-
-
-
-
- ${solr.hdfs.home:}
-
- ${solr.hdfs.confdir:}
-
- ${solr.hdfs.blockcache.enabled:true}
-
- ${solr.hdfs.blockcache.global:true}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 4
-
-
-
-
-
-
- ${solr.lock.type:none}
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${solr.ulog.dir:}
-
-
-
-
- ${solr.autoCommit.MaxDocs:10000}
- ${solr.autoCommit.MaxTime:120000}
- false
-
-
-
-
- ${solr.autoSoftCommit.MaxDocs:2000}
- ${solr.autoSoftCommit.MaxTime:10000}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1024
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
-
-
-
- 20
-
-
- 200
-
-
-
-
-
-
-
-
-
-
-
- static firstSearcher warming in solrconfig.xml
-
-
-
-
-
- false
-
-
- 2
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- edismax
- content
- explicit
- true
- 0.01
-
- ${solr.selectSearchHandler.timeAllowed:-1}
- *:*
-
-
- false
-
- true
- false
-
- 1
-
-
- spellcheck
- elevator
-
-
-
-
-
-
- explicit
- json
- true
- text
-
-
-
-
-
-
-
-
-
- content
- 1
- 1
- 3
- 15
- 20
- false
-
- ${solr.mlt.timeAllowed:2000}
-
-
-
-
-
-
- content
- explicit
- true
-
-
-
-
-
- text
-
-
-
-
-
-
- true
- ignored_
-
-
- true
- links
- ignored_
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- explicit
- true
-
-
-
-
-
-
- ${solr.replication.master:false}
- commit
- startup
- ${solr.replication.confFiles:schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml}
-
-
- ${solr.replication.slave:false}
- ${solr.replication.masterUrl:http://localhost:8983/solr}/replication
- ${solr.replication.pollInterval:00:00:60}
-
-
-
-
-
-
- true
- json
- true
-
-
-
-
-
-
-
- &spellcheck;
-
-
-
-
- spell
-
- default
- wordbreak
- on
- false
- false
- 1
- 5
- 5
- true
- true
- 10
- 5
-
-
- spellcheck
-
-
-
-
-
- mySuggester
- FuzzyLookupFactory
- DocumentDictionaryFactory
- cat
- price
- string
-
-
-
-
-
- true
- 10
-
-
- suggest
-
-
-
-
-
-
-
-
- text
- true
-
-
- tvComponent
-
-
-
-
-
-
-
-
-
- true
- false
-
-
- terms
-
-
-
-
-
-
- false
- false
-
- false
-
- true
- false
-
- 1
-
-
- terms
- spellcheck
-
-
-
-
-
-
- string
- elevate.xml
-
-
-
-
-
- explicit
- text
-
-
- elevator
-
-
-
-
-
-
-
-
-
-
- 100
-
-
-
-
-
-
-
- 70
-
- 0.5
-
- [-\w ,/\n\"']{20,200}
-
-
-
-
-
-
- ]]>
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ,,
- ,,
- ,,
- ,,
- ,]]>
- ]]>
-
-
-
-
-
- 10
- .,!?
-
-
-
-
-
-
- WORD
-
-
- en
- US
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- application/json; charset=UTF-8
-
-
-
-
-
-
-
-
- 5
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/solrconfig_spellcheck.xml b/images/solr-drupal/solr6.6/conf/solrconfig_spellcheck.xml
deleted file mode 100644
index 5c9d7ad79e..0000000000
--- a/images/solr-drupal/solr6.6/conf/solrconfig_spellcheck.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
- textSpell
-
-
-
-
-
-
- default
- spell
- solr.DirectSolrSpellChecker
-
- internal
-
- 0.5
-
- 2
-
- 1
-
- 5
-
- 4
-
- 0.01
-
-
-
-
-
- wordbreak
- solr.WordBreakSolrSpellChecker
- name
- true
- true
- 10
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr6.6/conf/solrcore.properties b/images/solr-drupal/solr6.6/conf/solrcore.properties
deleted file mode 100644
index d7d045b0fd..0000000000
--- a/images/solr-drupal/solr6.6/conf/solrcore.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defines Solr properties for this specific core.
-solr.replication.master=false
-solr.replication.slave=false
-solr.replication.pollInterval=00:00:60
-solr.replication.masterUrl=http://localhost:8983/solr
-solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
-solr.mlt.timeAllowed=2000
-# You should not set your luceneMatchVersion to anything lower than your Solr
-# Version.
-solr.luceneMatchVersion=6.0
-solr.selectSearchHandler.timeAllowed=-1
-# autoCommit after 10000 docs
-solr.autoCommit.MaxDocs=10000
-# autoCommit after 2 minutes
-solr.autoCommit.MaxTime=120000
-# autoSoftCommit after 2000 docs
-solr.autoSoftCommit.MaxDocs=2000
-# autoSoftCommit after 10 seconds
-solr.autoSoftCommit.MaxTime=10000
-solr.install.dir=../../..
diff --git a/images/solr-drupal/solr6.6/conf/stopwords.txt b/images/solr-drupal/solr6.6/conf/stopwords.txt
deleted file mode 100644
index d7f243e48a..0000000000
--- a/images/solr-drupal/solr6.6/conf/stopwords.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Contains words which shouldn't be indexed for fulltext fields, e.g., because
-# they're too common. For documentation of the format, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-drupal/solr6.6/conf/synonyms.txt b/images/solr-drupal/solr6.6/conf/synonyms.txt
deleted file mode 100644
index 7d22eea6d6..0000000000
--- a/images/solr-drupal/solr6.6/conf/synonyms.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contains synonyms to use for your index. For the format used, see
-# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
-# (Lines starting with a pound character # are ignored.)
diff --git a/images/solr-drupal/solr7.7/conf/accents_en.txt b/images/solr-drupal/solr7.7/conf/accents_en.txt
deleted file mode 100644
index bed051477b..0000000000
--- a/images/solr-drupal/solr7.7/conf/accents_en.txt
+++ /dev/null
@@ -1,164 +0,0 @@
-# À => A
-"\u00C0" => "A"
-# Á => A
-"\u00C1" => "A"
-# Â => A
-"\u00C2" => "A"
-# Ã => A
-"\u00C3" => "A"
-# Ä => A
-"\u00C4" => "A"
-# Å => A
-"\u00C5" => "A"
-# Ą => A
-"\u0104" => "A"
-# Æ => AE
-"\u00C6" => "AE"
-# Ç => C
-"\u00C7" => "C"
-# Ć => C
-"\U0106" => "C"
-# È => E
-"\u00C8" => "E"
-# É => E
-"\u00C9" => "E"
-# Ê => E
-"\u00CA" => "E"
-# Ë => E
-"\u00CB" => "E"
-# Ę => E
-"\u0118" => "E"
-# Ì => I
-"\u00CC" => "I"
-# Í => I
-"\u00CD" => "I"
-# Î => I
-"\u00CE" => "I"
-# Ï => I
-"\u00CF" => "I"
-# IJ => IJ
-"\u0132" => "IJ"
-# Ð => D
-"\u00D0" => "D"
-# Ł => L
-"\u0141" => "L"
-# Ñ => N
-"\u00D1" => "N"
-# Ń => N
-"\u0143" => "N"
-# Ò => O
-"\u00D2" => "O"
-# Ó => O
-"\u00D3" => "O"
-# Ô => O
-"\u00D4" => "O"
-# Õ => O
-"\u00D5" => "O"
-# Ö => O
-"\u00D6" => "O"
-# Ø => O
-"\u00D8" => "O"
-# Œ => OE
-"\u0152" => "OE"
-# Þ
-"\u00DE" => "TH"
-# Ù => U
-"\u00D9" => "U"
-# Ú => U
-"\u00DA" => "U"
-# Û => U
-"\u00DB" => "U"
-# Ü => U
-"\u00DC" => "U"
-# Ý => Y
-"\u00DD" => "Y"
-# Ÿ => Y
-"\u0178" => "Y"
-# à => a
-"\u00E0" => "a"
-# á => a
-"\u00E1" => "a"
-# â => a
-"\u00E2" => "a"
-# ã => a
-"\u00E3" => "a"
-# ä => a
-"\u00E4" => "a"
-# å => a
-"\u00E5" => "a"
-# æ => ae
-"\u00E6" => "ae"
-# ç => c
-"\u00E7" => "c"
-# è => e
-"\u00E8" => "e"
-# é => e
-"\u00E9" => "e"
-# ê => e
-"\u00EA" => "e"
-# ë => e
-"\u00EB" => "e"
-# ì => i
-"\u00EC" => "i"
-# í => i
-"\u00ED" => "i"
-# î => i
-"\u00EE" => "i"
-# ï => i
-"\u00EF" => "i"
-# ij => ij
-"\u0133" => "ij"
-# ð => d
-"\u00F0" => "d"
-# ñ => n
-"\u00F1" => "n"
-# ò => o
-"\u00F2" => "o"
-# ó => o
-"\u00F3" => "o"
-# ô => o
-"\u00F4" => "o"
-# õ => o
-"\u00F5" => "o"
-# ö => o
-"\u00F6" => "o"
-# ø => o
-"\u00F8" => "o"
-# œ => oe
-"\u0153" => "oe"
-# ß => ss
-"\u00DF" => "ss"
-# Ś => S
-"\u015a" => "S"
-# þ => th
-"\u00FE" => "th"
-# ù => u
-"\u00F9" => "u"
-# ú => u
-"\u00FA" => "u"
-# û => u
-"\u00FB" => "u"
-# ü => u
-"\u00FC" => "u"
-# ý => y
-"\u00FD" => "y"
-# ÿ => y
-"\u00FF" => "y"
-# Ź => Z
-"\u0179" => "Z"
-# Ż => Z
-"\u017b" => "Z"
-# ff => ff
-"\uFB00" => "ff"
-# fi => fi
-"\uFB01" => "fi"
-# fl => fl
-"\uFB02" => "fl"
-# ffi => ffi
-"\uFB03" => "ffi"
-# ffl => ffl
-"\uFB04" => "ffl"
-# ſt => st
-"\uFB05" => "st"
-# st => st
-"\uFB06" => "st"
diff --git a/images/solr-drupal/solr7.7/conf/accents_und.txt b/images/solr-drupal/solr7.7/conf/accents_und.txt
deleted file mode 100644
index 7c883f87a3..0000000000
--- a/images/solr-drupal/solr7.7/conf/accents_und.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-# À => A
-"\u00C0" => "A"
-# Á => A
-"\u00C1" => "A"
-# Â => A
-"\u00C2" => "A"
-# Ã => A
-"\u00C3" => "A"
-# Ä => A
-"\u00C4" => "A"
-# Å => A
-"\u00C5" => "A"
-# Æ => AE
-"\u00C6" => "AE"
-# Ç => C
-"\u00C7" => "C"
-# È => E
-"\u00C8" => "E"
-# É => E
-"\u00C9" => "E"
-# Ê => E
-"\u00CA" => "E"
-# Ë => E
-"\u00CB" => "E"
-# Ì => I
-"\u00CC" => "I"
-# Í => I
-"\u00CD" => "I"
-# Î => I
-"\u00CE" => "I"
-# Ï => I
-"\u00CF" => "I"
-# IJ => IJ
-"\u0132" => "IJ"
-# Ð => D
-"\u00D0" => "D"
-# Ñ => N
-"\u00D1" => "N"
-# Ò => O
-"\u00D2" => "O"
-# Ó => O
-"\u00D3" => "O"
-# Ô => O
-"\u00D4" => "O"
-# Õ => O
-"\u00D5" => "O"
-# Ö => O
-"\u00D6" => "O"
-# Ø => O
-"\u00D8" => "O"
-# Œ => OE
-"\u0152" => "OE"
-# Þ
-"\u00DE" => "TH"
-# Ù => U
-"\u00D9" => "U"
-# Ú => U
-"\u00DA" => "U"
-# Û => U
-"\u00DB" => "U"
-# Ü => U
-"\u00DC" => "U"
-# Ý => Y
-"\u00DD" => "Y"
-# Ÿ => Y
-"\u0178" => "Y"
-# à => a
-"\u00E0" => "a"
-# á => a
-"\u00E1" => "a"
-# â => a
-"\u00E2" => "a"
-# ã => a
-"\u00E3" => "a"
-# ä => a
-"\u00E4" => "a"
-# å => a
-"\u00E5" => "a"
-# æ => ae
-"\u00E6" => "ae"
-# ç => c
-"\u00E7" => "c"
-# è => e
-"\u00E8" => "e"
-# é => e
-"\u00E9" => "e"
-# ê => e
-"\u00EA" => "e"
-# ë => e
-"\u00EB" => "e"
-# ì => i
-"\u00EC" => "i"
-# í => i
-"\u00ED" => "i"
-# î => i
-"\u00EE" => "i"
-# ï => i
-"\u00EF" => "i"
-# ij => ij
-"\u0133" => "ij"
-# ð => d
-"\u00F0" => "d"
-# ñ => n
-"\u00F1" => "n"
-# ò => o
-"\u00F2" => "o"
-# ó => o
-"\u00F3" => "o"
-# ô => o
-"\u00F4" => "o"
-# õ => o
-"\u00F5" => "o"
-# ö => o
-"\u00F6" => "o"
-# ø => o
-"\u00F8" => "o"
-# œ => oe
-"\u0153" => "oe"
-# ß => ss
-"\u00DF" => "ss"
-# þ => th
-"\u00FE" => "th"
-# ù => u
-"\u00F9" => "u"
-# ú => u
-"\u00FA" => "u"
-# û => u
-"\u00FB" => "u"
-# ü => u
-"\u00FC" => "u"
-# ý => y
-"\u00FD" => "y"
-# ÿ => y
-"\u00FF" => "y"
-# ff => ff
-"\uFB00" => "ff"
-# fi => fi
-"\uFB01" => "fi"
-# fl => fl
-"\uFB02" => "fl"
-# ffi => ffi
-"\uFB03" => "ffi"
-# ffl => ffl
-"\uFB04" => "ffl"
-# ſt => st
-"\uFB05" => "st"
-# st => st
-"\uFB06" => "st"
diff --git a/images/solr-drupal/solr7.7/conf/elevate.xml b/images/solr-drupal/solr7.7/conf/elevate.xml
deleted file mode 100644
index 193a0e727a..0000000000
--- a/images/solr-drupal/solr7.7/conf/elevate.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/protwords_en.txt b/images/solr-drupal/solr7.7/conf/protwords_en.txt
deleted file mode 100644
index 8b13789179..0000000000
--- a/images/solr-drupal/solr7.7/conf/protwords_en.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/images/solr-drupal/solr7.7/conf/protwords_und.txt b/images/solr-drupal/solr7.7/conf/protwords_und.txt
deleted file mode 100644
index 8b13789179..0000000000
--- a/images/solr-drupal/solr7.7/conf/protwords_und.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/images/solr-drupal/solr7.7/conf/schema.xml b/images/solr-drupal/solr7.7/conf/schema.xml
deleted file mode 100644
index b643a91ee7..0000000000
--- a/images/solr-drupal/solr7.7/conf/schema.xml
+++ /dev/null
@@ -1,474 +0,0 @@
-
-
-
-
-]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- &extrafields;
-
-
- &extratypes;
-
-
- id
-
-
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/schema_extra_fields.xml b/images/solr-drupal/solr7.7/conf/schema_extra_fields.xml
deleted file mode 100644
index f5d74188a5..0000000000
--- a/images/solr-drupal/solr7.7/conf/schema_extra_fields.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/schema_extra_types.xml b/images/solr-drupal/solr7.7/conf/schema_extra_types.xml
deleted file mode 100644
index 8b5e82f035..0000000000
--- a/images/solr-drupal/solr7.7/conf/schema_extra_types.xml
+++ /dev/null
@@ -1,228 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/solrconfig.xml b/images/solr-drupal/solr7.7/conf/solrconfig.xml
deleted file mode 100644
index 38ff72e62d..0000000000
--- a/images/solr-drupal/solr7.7/conf/solrconfig.xml
+++ /dev/null
@@ -1,812 +0,0 @@
-
-
-
-
-
-
-]>
-
-
-
-
-
-
- ${solr.abortOnConfigurationError:true}
-
-
- ${solr.luceneMatchVersion:LUCENE_70}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- /var/solr/${solr.core.name}
-
-
-
-
-
-
-
-
- ${solr.hdfs.home:}
-
- ${solr.hdfs.confdir:}
-
- ${solr.hdfs.blockcache.enabled:true}
-
- ${solr.hdfs.blockcache.global:true}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ${solr.lock.type:none}
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
-
- &index;
-
-
-
-
-
-
-
-
- ${solr.ulog.dir:}
-
-
-
-
- ${solr.autoCommit.MaxDocs:-1}
- ${solr.autoCommit.MaxTime:15000}
- false
-
-
-
-
-
- ${solr.autoSoftCommit.MaxDocs:-1}
- ${solr.autoSoftCommit.MaxTime:-1}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- &query;
-
-
-
-
-
-
-
-
-
-
-
- static firstSearcher warming in solrconfig.xml
-
-
-
-
-
- false
-
-
-
-
-
-
-
-
- &requestdispatcher;
-
-
-
-
-
-
- &extra;
-
-
-
-
-
-
-
-
- 100
-
-
-
-
-
-
-
- 70
-
- 0.5
-
- [-\w ,/\n\"']{20,200}
-
-
-
-
-
-
- ]]>
- ]]>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- ,,
- ,,
- ,,
- ,,
- ,]]>
- ]]>
-
-
-
-
-
- 10
- .,!?
-
-
-
-
-
-
- WORD
-
-
- en
- US
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 5
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/solrconfig_extra.xml b/images/solr-drupal/solr7.7/conf/solrconfig_extra.xml
deleted file mode 100644
index 1a59c45938..0000000000
--- a/images/solr-drupal/solr7.7/conf/solrconfig_extra.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-
-
- en
- spellcheck_en
- solr.DirectSolrSpellChecker
- internal
- 0.5
- 2
- 1
- 5
- 4
- 0.01
- .01
- true
-
-
-
- und
- spellcheck_und
- solr.DirectSolrSpellChecker
- internal
- 0.5
- 2
- 1
- 5
- 4
- 0.01
- .01
- true
-
-
-
-
- en
- AnalyzingInfixLookupFactory
- DocumentDictionaryFactory
- twm_suggest
- text_en
- sm_context_tags
- true
- false
-
-
-
- und
- AnalyzingInfixLookupFactory
- DocumentDictionaryFactory
- twm_suggest
- text_und
- sm_context_tags
- true
- false
-
-
-
-
-
- false
- false
- false
- true
- false
- 1
- false
- 10
-
-
- terms
- spellcheck
- suggest
-
-
-
-
-
-
- true
- ignored_
- true
- links
- ignored_
-
-
-
-
-
-
- 1
- 1
- false
- ${solr.mlt.timeAllowed:2000}
-
-
-
-
-
-
- lucene
- id
- explicit
- true
- ${solr.selectSearchHandler.timeAllowed:-1}
- false
-
-
- spellcheck
- elevator
-
-
-
-
-
-
- id
- und
- on
- false
- false
- 1
- 5
- 5
- true
- true
- 10
- 5
-
-
- spellcheck
-
-
-
-
-
-
- true
- und
- 10
-
-
- suggest
-
-
-
-
-
-
- id
- true
-
-
- tvComponent
-
-
-
-
-
- string
- elevate.xml
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/solrconfig_query.xml b/images/solr-drupal/solr7.7/conf/solrconfig_query.xml
deleted file mode 100644
index 5bdd696902..0000000000
--- a/images/solr-drupal/solr7.7/conf/solrconfig_query.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- true
-
- false
-
- 20
- 200
- 1024
diff --git a/images/solr-drupal/solr7.7/conf/solrconfig_requestdispatcher.xml b/images/solr-drupal/solr7.7/conf/solrconfig_requestdispatcher.xml
deleted file mode 100644
index 3a3f17d1c9..0000000000
--- a/images/solr-drupal/solr7.7/conf/solrconfig_requestdispatcher.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
diff --git a/images/solr-drupal/solr7.7/conf/solrcore.properties b/images/solr-drupal/solr7.7/conf/solrcore.properties
deleted file mode 100644
index c75ec4d7ff..0000000000
--- a/images/solr-drupal/solr7.7/conf/solrcore.properties
+++ /dev/null
@@ -1,13 +0,0 @@
-solr.replication.master=false
-solr.replication.slave=false
-solr.replication.pollInterval=00:00:60
-solr.replication.masterUrl=http://solr:8983/solr
-solr.replication.confFiles=schema.xml,schema_extra_types.xml,schema_extra_fields.xml,elevate.xml,stopwords_en.txt,synonyms_en.txt,protwords_en.txt,accents_en.txt,stopwords_und.txt,synonyms_und.txt,protwords_und.txt,accents_und.txt
-solr.mlt.timeAllowed=2000
-solr.luceneMatchVersion=7.7
-solr.selectSearchHandler.timeAllowed=-1
-solr.autoCommit.MaxDocs=-1
-solr.autoCommit.MaxTime=15000
-solr.autoSoftCommit.MaxDocs=-1
-solr.autoSoftCommit.MaxTime=-1
-solr.install.dir=/opt/solr
diff --git a/images/solr-drupal/solr7.7/conf/stopwords_en.txt b/images/solr-drupal/solr7.7/conf/stopwords_en.txt
deleted file mode 100644
index 6981050710..0000000000
--- a/images/solr-drupal/solr7.7/conf/stopwords_en.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-a
-an
-and
-are
-as
-at
-be
-but
-by
-for
-if
-in
-into
-is
-it
-no
-not
-of
-on
-or
-s
-such
-t
-that
-the
-their
-then
-there
-these
-they
-this
-to
-was
-will
-with
diff --git a/images/solr-drupal/solr7.7/conf/stopwords_und.txt b/images/solr-drupal/solr7.7/conf/stopwords_und.txt
deleted file mode 100644
index 8b13789179..0000000000
--- a/images/solr-drupal/solr7.7/conf/stopwords_und.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/images/solr-drupal/solr7.7/conf/synonyms_en.txt b/images/solr-drupal/solr7.7/conf/synonyms_en.txt
deleted file mode 100644
index 91689ff92f..0000000000
--- a/images/solr-drupal/solr7.7/conf/synonyms_en.txt
+++ /dev/null
@@ -1 +0,0 @@
-drupal, durpal
diff --git a/images/solr-drupal/solr7.7/conf/synonyms_und.txt b/images/solr-drupal/solr7.7/conf/synonyms_und.txt
deleted file mode 100644
index 91689ff92f..0000000000
--- a/images/solr-drupal/solr7.7/conf/synonyms_und.txt
+++ /dev/null
@@ -1 +0,0 @@
-drupal, durpal
diff --git a/images/solr/10-solr-port.sh b/images/solr/10-solr-port.sh
deleted file mode 100755
index a7c8ef3feb..0000000000
--- a/images/solr/10-solr-port.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-# SOLR is capable of having it's own port overwritten via SOLR_PORT, but on kubernetes this env variable is filled with
-# SOLR_PORT=tcp://172.30.32.255:8983, so we check if the env variable is an actual port only and if not we use the default port
-# Inspired from: https://github.com/docker-solr/docker-solr/blob/6d7fa219c3b3407e0dd29fb17b15ec9e6df85058/6.6/alpine/scripts/docker-entrypoint.sh#L11-L13
-if [[ -n "$SOLR_PORT" ]] && ! /bin/bash -c "grep -E -q '^[0-9]+$' <<<\"${SOLR_PORT:-}\""; then
- SOLR_PORT=8983
- export SOLR_PORT
-fi
\ No newline at end of file
diff --git a/images/solr/20-solr-datadir.sh b/images/solr/20-solr-datadir.sh
deleted file mode 100755
index d7c1f486cc..0000000000
--- a/images/solr/20-solr-datadir.sh
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/bin/sh
-set -eo pipefail
-
-# check if SOLR_COPY_DATA_DIR_SOURCE is set, if yes we're coping the contents of the given folder into the data dir folder
-# this allows to prefill the datadir with a provided datadir (either added in a Dockerfile build, or mounted into the running container).
-# This is different than just setting $SOLR_DATA_DIR to the source folder, as only /var/solr is a persistent folder, so setting
-# $SOLR_DATA_DIR to another folder will make solr to not store the datadir across container restarts, while with this copy system
-# the data will be prefilled and persistent across container restarts.
-if [ -n "$SOLR_COPY_DATA_DIR_SOURCE" ]; then
- echo "MARIADB_COPY_DATA_DIR_SOURCE is set, start copying from source location"
- for solrcorepath in $(ls -d $SOLR_COPY_DATA_DIR_SOURCE/*/ | grep -v lost+found) ; do
- corename=$(basename $solrcorepath)
- if [ -d ${SOLR_DATA_DIR:-/var/solr}/$corename ]; then
- echo "core $corename already present in destination, skipping copying"
- else
- echo "copying datadir contents from '$SOLR_COPY_DATA_DIR_SOURCE/$corename to '${SOLR_DATA_DIR:-/var/solr}/$corename'"
- CUR_DIR=${PWD}
- mkdir ${SOLR_DATA_DIR:-/var/solr}/$corename
- cd $SOLR_COPY_DATA_DIR_SOURCE/$corename; tar cf - . | (cd ${SOLR_DATA_DIR:-/var/solr}/$corename; tar xvf -)
- cd $CUR_DIR
- fi
- done
-fi
-
-# Previously the Solr Config and Solr Data Dir was both kept in the persistent volume:
-# - Solr data: /opt/solr/server/solr/mycores/${corename}/data
-# - Solr config: /opt/solr/server/solr/mycores/${corename}/config
-# - Persistent Volume: /opt/solr/server/solr/mycores/
-# The Solr Config was copied from the Docker Image into the Solr Config directory on the very first time solr started.
-# This had the problem that if a new Solr Config was shipped in a new Docker Iamage the config was not copied again.
-# Therefore there was no possibility to ship with updated configs and also the system does not follow any other
-# services like nginx or php which have their configs not existing in persistent volumes and insted in Docker Images.
-# The following script migrates from to the new directory structure:
-# - Solr data: /var/solr/${corename}
-# - Solr config: /opt/solr/server/solr/mycores/${corename}/config
-# - Persistent Volume: /var/solr/
-# It does:
-# 1. Move folders from /var/solr/${corename}/data to /var/solr/${corename} - this is needed if the existing persistent volume is
-# mounted now to /var/solr/ but the data is still within data/
-# 2. Create the folder /opt/solr/server/solr/mycores/${corename} if not existing (because there is no persistent volume mounted anymore)
-# and copy the config from the persistent storage to that folder.
-
-# It then also tries to update existing non-compatible configs inside solrconfig.xml:
-# - dataDir now needs to be `/var/solr/${solr.core.name}` to point to the new persistent Volume
-# - lockType needs to be `${solr.lock.type:none}` to prevent issues with the default file based Lock system which
-# can cause issues if the solr is not stopped correctly
-# The script does that for existing configs in `/opt/solr/server/solr/mycores/${corename}/config` if that folder exists which can be on two cases:
-# 1. During a docker build the solr core has already been created via `precreate-core` (which should be used now all the time)
-# 2. The first part of the script has copied the config from the previous persistent volume into these folders
-# If `/opt/solr/server/solr/mycores` is empty, this means the container has never been started, had no previous persistent volume and also did not
-# run `precreate-core` yet, it checks if the common used folder `/solr-conf/conf/` has a config in it and tries to adapt it.
-# This probably fails because of permissions issues, it will throw an error and exit.
-
-if [ ! -n "$(ls /opt/solr/server/solr/mycores)" ]; then
- echo 'No pre-created Solr Cores found in `/opt/solr/server/solr/mycores` this probably means that your Dockerfile does not run'
- echo ' RUN precreate-core corename /solr-conf'
- echo 'within Dockerfile and instead uses'
- echo ' CMD ["solr-precreate", "corename", "/solr-conf"]'
- echo 'Please update your Dockerfile to:'
- echo ' RUN precreate-core corename /solr-conf'
- echo ' CMD ["solr-foreground"]'
- printf "\n\n"
-fi
-
-if [ -n "$(ls ${SOLR_DATA_DIR:-/var/solr})" ]; then
- # Iterate through all existing solr cores
- for solrcorepath in $(ls -d ${SOLR_DATA_DIR:-/var/solr}/*/ | grep -v lost+found) ; do
- corename=$(basename $solrcorepath)
- if [ -d ${solrcorepath}data ]; then
- echo "${solrcorepath} has it's data in deprecated location ${solrcorepath}data, moving to ${solrcorepath}."
- # moving the contents of /var/solr/${corename}/data to /var/solr/${corename}
- # the datadir now has the layout that a newly created core would.
- mv ${solrcorepath}data/* ${solrcorepath}
- # remove empty directory
- rm -Rf ${solrcorepath}data || mv ${solrcorepath}data ${solrcorepath}data-delete
- fi
-
- # If the core has no files in /opt/solr/server/solr/mycores/${corename} this means:
- # The Docker Image did not run `precreate-core corename /solr-conf` during the Dockerfile
- # and instead is running `solr-precreate corname solr-conf` as CMD of the container.
- # But we already have an existing solr config from the persistent storage, we copy that over
- if [ ! -d /opt/solr/server/solr/mycores/${corename} ]; then
- mkdir -p /opt/solr/server/solr/mycores/${corename}
- # Copy the solr config from the persistent volume in the solr home config directory
- cp -R ${solrcorepath}conf /opt/solr/server/solr/mycores/${corename}/
- echo "copied pre-existing solr config from '${solrcorepath}conf' to '/opt/solr/server/solr/mycores/${corename}/conf'"
- printf "\n\n"
- # there must be a core.properties to be recognized as a core
- touch /opt/solr/server/solr/mycores/${corename}/core.properties
- fi
- done
-fi
-
-function fixConfig {
- fail=0
- if cat $1/solrconfig.xml | grep dataDir | grep -qv "${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"; then
- echo "Found old non lagoon compatible dataDir config in solrconfig.xml:"
- cat $1/solrconfig.xml | grep dataDir
- SOLR_DATA_DIR=${SOLR_DATA_DIR:-/var/solr}
- SOLR_DATA_DIR_ESCAPED=${SOLR_DATA_DIR//\//\\/} # escapig the forward slashes with backslahes
- if [ -w $1/ ]; then
- sed -ibak "s/.*/$SOLR_DATA_DIR_ESCAPED\/\${solr.core.name}<\/dataDir>/" $1/solrconfig.xml
- echo "automagically updated to compatible config: "
- echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"
- echo "Please update your solrconfig.xml to make this persistent."
- else
- echo "but no write permission to automagically change to compatible config: "
- echo " ${SOLR_DATA_DIR:-/var/solr}/\${solr.core.name}"
- echo "Please update your solrconfig.xml and commit again."
- fail=1
- fi
- printf "\n\n"
- fi
- # change lockType to none
- if cat $1/solrconfig.xml | grep lockType | grep -qv '${solr.lock.type:none}'; then
- echo "Found old non lagoon compatible lockType config in solrconfig.xml:"
- cat $1/solrconfig.xml | grep lockType
- if [ -w $1/ ]; then
- sed -ibak 's/\${solr\.lock\.type:native}<\/lockType>/${solr.lock.type:none}<\/lockType>/' $1/solrconfig.xml
- echo "automagically updated to compatible config: "
- echo ' ${solr.lock.type:none}'
- echo "Please update your solrconfig.xml to make this persistent."
- else
- echo "but no write permission to automagically change to compatible config: "
- echo ' ${solr.lock.type:none}'
- echo "Please update your solrconfig.xml and commit again."
- fail=1
- fi
- printf "\n\n"
- fi
- if [ "$fail" == "1" ]; then
- exit 1;
- fi
-}
-
-# check if `/opt/solr/server/solr/mycores` has cores, which means that `precreate-core` has already be called so we check the configs there
-if [ -n "$(ls /opt/solr/server/solr/mycores)" ]; then
- # Iterate through all solr cores
- for solrcorepath in $(ls -d /opt/solr/server/solr/mycores/*/) ; do
- corename=$(basename $solrcorepath)
- # Check and Update the solr config with lagoon compatible config
- if [ -f /opt/solr/server/solr/mycores/${corename}/conf/solrconfig.xml ]; then
- fixConfig /opt/solr/server/solr/mycores/${corename}/conf
- fi
- done
-else
- # `/opt/solr/server/solr/mycores` is empty, meaning that no `precreate-core` has been called and probably this container is started via `solr-precreate
- # We try to update the solr configs within `/solr-conf/conf` to the new lagoon default config as this one will most probably be used to create a new core
- if [ -f /solr-conf/conf/solrconfig.xml ]; then
- fixConfig /solr-conf/conf
- else
- echo "No config found in '/solr-conf/conf' and was not able to automatically update solr config to newest lagoon compatible version."
- echo "Cannot guarantee if this Solr config will work!"
- fi
-fi
-
diff --git a/images/solr/Dockerfile b/images/solr/Dockerfile
deleted file mode 100644
index 2ad030949d..0000000000
--- a/images/solr/Dockerfile
+++ /dev/null
@@ -1,48 +0,0 @@
-ARG SOLR_MAJ_MIN_VERSION
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-FROM solr:${SOLR_MAJ_MIN_VERSION}-alpine
-
-LABEL maintainer="amazee.io"
-ENV LAGOON=solr
-
-ARG LAGOON_VERSION
-ENV LAGOON_VERSION=$LAGOON_VERSION
-
-# Copy commons files
-COPY --from=commons /lagoon /lagoon
-COPY --from=commons /bin/fix-permissions /bin/ep /bin/docker-sleep /bin/
-COPY --from=commons /sbin/tini /sbin/
-COPY --from=commons /home/.bashrc /home/.bashrc
-
-ENV TMPDIR=/tmp \
- TMP=/tmp \
- HOME=/home \
- # When Bash is invoked via `sh` it behaves like the old Bourne Shell and sources a file that is given in `ENV`
- ENV=/home/.bashrc \
- # When Bash is invoked as non-interactive (like `bash -c command`) it sources a file that is given in `BASH_ENV`
- BASH_ENV=/home/.bashrc
-
-# we need root for the fix-permissions to work
-USER root
-
-RUN mkdir -p /var/solr
-RUN fix-permissions /var/solr \
- && chown solr:solr /var/solr \
- && fix-permissions /opt/solr/server/logs \
- && fix-permissions /opt/solr/server/solr
-
-
-# solr really doesn't like to be run as root, so we define the default user agin
-USER solr
-
-COPY 10-solr-port.sh /lagoon/entrypoints/
-COPY 20-solr-datadir.sh /lagoon/entrypoints/
-
-
-# Define Volume so locally we get persistent cores
-VOLUME /var/solr
-
-ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"]
-
-CMD ["solr-precreate", "mycore"]
diff --git a/images/toolbox/Dockerfile b/images/toolbox/Dockerfile
deleted file mode 100644
index 40c59f9400..0000000000
--- a/images/toolbox/Dockerfile
+++ /dev/null
@@ -1,17 +0,0 @@
-ARG IMAGE_REPO
-
-FROM ${IMAGE_REPO:-lagoon}/mariadb as mariadb
-
-FROM ${IMAGE_REPO:-lagoon}/commons as commons
-
-LABEL maintainer="amazee.io"
-
-RUN apk update \
- && apk upgrade \
- && apk add --no-cache curl ncdu socat ca-certificates openssl perl perl-doc mysql-client rsync mariadb-mytop \
- && update-ca-certificates \
- && rm -rf /var/cache/apk/* \
- && wget https://raw.githubusercontent.com/major/MySQLTuner-perl/master/mysqltuner.pl -O mysqltuner.pl \
- && chmod +x mysqltuner.pl
-
-COPY --from=mariadb /usr/bin/my_print_defaults /usr/bin/my_print_defaults
diff --git a/images/varnish-drupal/Dockerfile b/images/varnish-drupal/Dockerfile
deleted file mode 100644
index 100ebedad0..0000000000
--- a/images/varnish-drupal/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-ARG IMAGE_REPO
-FROM ${IMAGE_REPO:-lagoon}/varnish
-
-COPY drupal.vcl /etc/varnish/default.vcl
-
-RUN fix-permissions /etc/varnish/default.vcl
diff --git a/images/varnish-drupal/drupal.vcl b/images/varnish-drupal/drupal.vcl
deleted file mode 100644
index c1e2ba2103..0000000000
--- a/images/varnish-drupal/drupal.vcl
+++ /dev/null
@@ -1,477 +0,0 @@
-vcl 4.0;
-
-import std;
-import dynamic;
-
-# set backend default
-backend default {
- .host = "${VARNISH_BACKEND_HOST:-nginx}";
- .port = "${VARNISH_BACKEND_PORT:-8080}";
- .first_byte_timeout = 35m;
- .between_bytes_timeout = 10m;
-}
-
-# Allow purging from localhost
-# @TODO allow from openshift network
-acl purge {
- "127.0.0.1";
- "10.0.0.0"/8;
- "172.16.0.0"/12;
- "192.168.0.0"/16;
-}
-
-sub vcl_init {
- new www_dir = dynamic.director(
- port = "${VARNISH_BACKEND_PORT:-8080}",
- first_byte_timeout = 90s,
- between_bytes_timeout = 90s,
- ttl = 60s);
-}
-
-# This configuration is optimized for Drupal hosting:
-# Respond to incoming requests.
-sub vcl_recv {
- if (req.url ~ "^/varnish_status$") {
- return (synth(200,"OK"));
- }
- # set the backend, which should be used:
- set req.backend_hint = www_dir.backend("${VARNISH_BACKEND_HOST:-nginx}");
-
- # Always set the forward ip.
- if (req.restarts == 0) {
- if (req.http.x-forwarded-for) {
- set req.http.X-Forwarded-For = req.http.X-Forwarded-For + ", " + client.ip;
- }
- else {
- set req.http.X-Forwarded-For = client.ip;
- }
- }
-
- if (req.http.X-LAGOON-VARNISH ) {
- # Pass all Requests which are handled via an upstream Varnish
- set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, " + req.http.X-LAGOON-VARNISH;
- set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- }
- else if (req.http.Fastly-FF) {
- # Pass all Requests which are handled via Fastly
- set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, fastly";
- set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- set req.http.X-Forwarded-For = req.http.Fastly-Client-IP;
- }
- else if (req.http.CF-RAY) {
- # Pass all Requests which are handled via CloudFlare
- set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, cloudflare";
- set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- set req.http.X-Forwarded-For = req.http.CF-Connecting-IP;
- }
- else if (req.http.X-Pull) {
- # Pass all Requests which are handled via KeyCDN
- set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}, keycdn";
- set req.http.X-LAGOON-VARNISH-BYPASS = "true";
- }
- else {
- # We set a header to let a Varnish chain know that it already has been varnishcached
- set req.http.X-LAGOON-VARNISH = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}";
-
- # Allow to bypass based on env variable `VARNISH_BYPASS`
- set req.http.X-LAGOON-VARNISH-BYPASS = "${VARNISH_BYPASS:-false}";
- }
-
- # Websockets are piped
- if (req.http.Upgrade ~ "(?i)websocket") {
- return (pipe);
- }
-
- if (req.http.X-LAGOON-VARNISH-BYPASS == "true" || req.http.X-LAGOON-VARNISH-BYPASS == "TRUE") {
- return (pass);
- }
-
- # SA-CORE-2014-004 preventing access to /xmlrpc.php
- if (req.url ~ "^/xmlrpc.php$") {
- return (synth(701, "Unauthorized"));
- }
-
- # Strip out Google Analytics campaign variables. They are only needed
- # by the javascript running on the page
- # utm_source, utm_medium, utm_campaign, gclid
- if(req.url ~ "(\?|&)(gclid|utm_[a-z]+)=") {
- set req.url = regsuball(req.url, "(gclid|utm_[a-z]+)=[^\&]+&?", "");
- set req.url = regsub(req.url, "(\?|&)$", "");
- }
-
- # Bypass a cache hit (the request is still sent to the backend)
- if (req.method == "REFRESH") {
- if (!client.ip ~ purge) {
- return (synth(405, "Not allowed"));
- }
- set req.method = "GET";
- set req.hash_always_miss = true;
- }
-
- # Only allow BAN requests from IP addresses in the 'purge' ACL.
- if (req.method == "BAN" || req.method == "URIBAN" || req.method == "PURGE") {
- # Only allow BAN from defined ACL
- if (!client.ip ~ purge) {
- return (synth(403, "Your IP is not allowed."));
- }
-
- # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}".
- # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header
- if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") {
- return (synth(403, "Only allowed from within own network."));
- }
-
- if (req.method == "BAN") {
- # Logic for the ban, using the Cache-Tags header.
- if (req.http.Cache-Tags) {
- ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags);
- # Throw a synthetic page so the request won't go to the backend.
- return (synth(200, "Ban added."));
- }
- else {
- return (synth(403, "Cache-Tags header missing."));
- }
- }
-
- if (req.method == "URIBAN" || req.method == "PURGE") {
- ban("req.url ~ " + req.url);
- # Throw a synthetic page so the request won't go to the backend.
- return (synth(200, "Ban added."));
- }
- }
-
- # Non-RFC2616 or CONNECT which is weird, we pipe that
- if (req.method != "GET" &&
- req.method != "HEAD" &&
- req.method != "PUT" &&
- req.method != "POST" &&
- req.method != "TRACE" &&
- req.method != "OPTIONS" &&
- req.method != "DELETE") {
- return (pipe);
- }
-
- # Large binary files are passed.
- if (req.url ~ "\.(msi|exe|dmg|zip|tgz|gz|pkg)$") {
- return(pass);
- }
-
- # We only try to cache GET and HEAD, other things are passed.
- if (req.method != "GET" && req.method != "HEAD") {
- return (pass);
- }
-
- # Any requests with Basic Authentication are passed.
- if (req.http.Authorization || req.http.Authenticate) {
- return (pass);
- }
-
- # Blackfire requests are passed.
- if (req.http.X-Blackfire-Query) {
- return (pass);
- }
-
- # Some URLs should never be cached.
- if (req.url ~ "^/status\.php$" ||
- req.url ~ "^/update\.php$" ||
- req.url ~ "^/admin([/?]|$).*$" ||
- req.url ~ "^/info([/?]|$).*$" ||
- req.url ~ "^/flag([/?]|$).*$" ||
- req.url ~ "^.*/system/files([/?]|$).*$" ||
- req.url ~ "^/cgi" ||
- req.url ~ "^/cgi-bin"
- ) {
- return (pass);
- }
-
- # Plupload likes to get piped.
- if (req.url ~ "^.*/plupload-handle-uploads.*$") {
- return (pipe);
- }
-
- # Handle compression correctly. Different browsers send different
- # "Accept-Encoding" headers, even though they mostly all support the same
- # compression mechanisms. By consolidating these compression headers into
- # a consistent format, we can reduce the size of the cache and get more hits.=
- # @see: http:// varnish.projects.linpro.no/wiki/FAQ/Compression
- if (req.http.Accept-Encoding) {
- if (req.http.Accept-Encoding ~ "gzip") {
- # If the browser supports it, we'll use gzip.
- set req.http.Accept-Encoding = "gzip";
- }
- else if (req.http.Accept-Encoding ~ "deflate") {
- # Next, try deflate if it is supported.
- set req.http.Accept-Encoding = "deflate";
- }
- else {
- # Unknown algorithm. Remove it and send unencoded.
- unset req.http.Accept-Encoding;
- }
- }
-
- # Always cache the following file types for all users.
- if (req.url ~ "(?i)\.(css|js|jpg|jpeg|gif|ico|png|tiff|tif|img|tga|wmf|swf|html|htm|woff|woff2|mp4|ttf|eot|svg)(\?.*)?$") {
- unset req.http.Cookie;
- }
-
- # Remove all cookies that Drupal doesn't need to know about. ANY remaining
- # cookie will cause the request to pass-through to a backend. For the most part
- # we always set the NO_CACHE cookie after any POST request, disabling the
- # Varnish cache temporarily. The session cookie allows all authenticated users
- # to pass through as long as they're logged in.
- #
- # 1. Append a semi-colon to the front of the cookie string.
- # 2. Remove all spaces that appear after semi-colons.
- # 3. Match the cookies we want to keep, adding the space we removed
- # previously, back. (\1) is first matching group in the regsuball.
- # 4. Remove all other cookies, identifying them by the fact that they have
- # no space after the preceding semi-colon.
- # 5. Remove all spaces and semi-colons from the beginning and end of the
- # cookie string.
- if (req.http.Cookie) {
- set req.http.CookieCheck = ";" + req.http.Cookie;
- set req.http.CookieCheck = regsuball(req.http.CookieCheck, "; +", ";");
- set req.http.CookieCheck = regsuball(req.http.CookieCheck, ";(${VARNISH_COOKIECHECK:-SESS[a-z0-9]+|SSESS[a-z0-9]+|NO_CACHE})=", "; \1=");
- set req.http.CookieCheck = regsuball(req.http.CookieCheck, ";[^ ][^;]*", "");
- set req.http.CookieCheck = regsuball(req.http.CookieCheck, "^[; ]+|[; ]+$", "");
-
- set req.http.Cookie = req.http.Cookie + ";";
-
- if (req.http.CookieCheck == "") {
- # If there are no remaining cookies, remove the cookie header. If there
- # aren't any cookie headers, Varnish's default behavior will be to cache
- # the page.
-
- unset req.http.CookieCheck;
- unset req.http.Cookie;
- }
- else {
- # If there is any cookies left (a session or NO_CACHE cookie), do not
- # cache the page. Pass it on to Apache directly.
- unset req.http.CookieCheck;
- return (pass);
- }
- }
-
- # Cacheable, lookup in cache.
- return (hash);
-}
-
-sub vcl_pipe {
- # Support for Websockets
- if (req.http.upgrade) {
- set bereq.http.upgrade = req.http.upgrade;
- set bereq.http.connection = req.http.connection;
- }
-}
-
-sub vcl_hit {
- if (obj.ttl >= 0s) {
- # normal hit
- return (deliver);
- }
- # We have no fresh fish. Lets look at the stale ones.
- if (std.healthy(req.backend_hint)) {
- # Backend is healthy. If the object is not older then 30secs, deliver it to the client
- # and automatically create a separate backend request to warm the cache for this request.
- if (obj.ttl + 30s > 0s) {
- set req.http.grace = "normal(limited)";
- return (deliver);
- } else {
- # No candidate for grace. Fetch a fresh object.
- return (miss);
- }
- }
- else {
- # backend is sick - use full grace
- if (obj.ttl + obj.grace > 0s) {
- set req.http.grace = "full";
- return (deliver);
- } else {
- # no graced object.
- return (miss);
- }
- }
-}
-
-sub vcl_backend_response {
- # Allow items to be stale if needed.
- set beresp.grace = 6h;
-
- # Set ban-lurker friendly custom headers.
- set beresp.http.X-Url = bereq.url;
- set beresp.http.X-Host = bereq.http.host;
-
- # If the backend sends a X-LAGOON-VARNISH-BACKEND-BYPASS header we directly deliver
- if (beresp.http.X-LAGOON-VARNISH-BACKEND-BYPASS == "TRUE") {
- return (deliver);
- }
-
- # Cache 404 and 403 for 10 seconds
- if (beresp.status == 404 || beresp.status == 403) {
- set beresp.ttl = 10s;
- return (deliver);
- }
-
- # Don't allow static files to set cookies.
- if (bereq.url ~ "(?i)\.(css|js|jpg|jpeg|gif|ico|png|tiff|tif|img|tga|wmf|swf|html|htm|woff|woff2|mp4|ttf|eot|svg)(\?.*)?$") {
- unset beresp.http.set-cookie;
- unset beresp.http.Cache-Control;
-
- # If an asset would come back with statuscode 500 we only cache it for 10 seconds instead of the usual static file cache
- if (beresp.status == 500) {
- set beresp.ttl = 10s;
- return (deliver);
- }
-
- set beresp.ttl = ${VARNISH_ASSETS_TTL:-2628001}s;
- set beresp.http.Cache-Control = "public, max-age=${VARNISH_ASSETS_TTL:-2628001}";
- set beresp.http.Expires = "" + (now + beresp.ttl);
- }
-
- # Files larger than 10 MB get streamed.
- if (beresp.http.Content-Length ~ "[0-9]{8,}") {
- set beresp.do_stream = true;
- set beresp.uncacheable = true;
- set beresp.ttl = 120s;
- }
-
- # Disable buffering only for BigPipe responses
- if (beresp.http.Surrogate-Control ~ "BigPipe/1.0") {
- set beresp.do_stream = true;
- set beresp.ttl = 0s;
- }
-
- # The following is taken from https://github.com/varnishcache/varnish-cache/blob/master/bin/varnishd/builtin.vcl#L149
- if (bereq.uncacheable) {
- return (deliver);
- } else if (beresp.ttl <= 0s ||
- beresp.http.Set-Cookie ||
- beresp.http.Surrogate-control ~ "(?i)no-store" ||
- (!beresp.http.Surrogate-Control &&
- beresp.http.Cache-Control ~ "(?i:no-cache|no-store|private)") ||
- beresp.http.Vary == "*") {
- # Mark as "Hit-For-Miss" for the next 2 minutes
- set beresp.ttl = 120s;
- set beresp.uncacheable = true;
- }
- return (deliver);
-}
-
-# Set a header to track a cache HIT/MISS.
-sub vcl_deliver {
- if (obj.hits > 0) {
- set resp.http.X-Varnish-Cache = "HIT";
- }
- else {
- set resp.http.X-Varnish-Cache = "MISS";
- }
-
- # Remove ban-lurker friendly custom headers when delivering to client.
- unset resp.http.X-Url;
- unset resp.http.X-Host;
-
- # unset Cache-Tags Header by default, can be disabled with VARNISH_SET_HEADER_CACHE_TAGS=true
- if (!${VARNISH_SET_HEADER_CACHE_TAGS:-false}) {
- unset resp.http.Cache-Tags;
- }
-
- unset resp.http.X-Generator;
- unset resp.http.Server;
- # Inject information about grace
- if (req.http.grace) {
- set resp.http.X-Varnish-Grace = req.http.grace;
- }
- set resp.http.X-LAGOON = "${HOSTNAME}-${LAGOON_GIT_BRANCH:-undef}-${LAGOON_PROJECT}>" + resp.http.X-LAGOON;
- return (deliver);
-}
-
-sub vcl_hash {
- hash_data(req.url);
- if (req.http.host) {
- hash_data(req.http.host);
- }
- else {
- hash_data(server.ip);
- }
- if (req.http.X-Forwarded-Proto) {
- hash_data(req.http.X-Forwarded-Proto);
- }
- if (req.http.HTTPS) {
- hash_data(req.http.HTTPS);
- }
- return (lookup);
-}
-
-sub vcl_synth {
- if (resp.status == 701) {
- set resp.status = 401;
- set resp.http.Content-Type = "text/plain; charset=utf-8";
- synthetic({"XMLRPC Interface is blocked due to SA-CORE-2014-004 - mail support@amazee.io if you need it."});
- return (deliver);
- }
- if (resp.status == 700) {
- # Set a status the client will understand
- set resp.status = 200;
- # Create our synthetic response
- synthetic("");
- return(deliver);
- }
- return (deliver);
-}
-
-sub vcl_backend_error {
- # Restart the request, when we have a backend server error, to try another backend.
- # Restart max twice.
- if (bereq.retries < 2) {
- return(retry);
- }
-
- set beresp.http.Content-Type = "text/html; charset=utf-8";
- set beresp.http.Retry-After = "5";
- synthetic({"
-
-
-
-
- Server Error
-
-
-
-
-
We are sorry...
-
We encountered a server-side error. This means that the problem is not with your computer or Internet connection, but rather with the website's server.
-
We are currently working on solving this problem and apologise for the inconvenience.
+
\ No newline at end of file
diff --git a/tests/files/node-mongodb/dbaas/views/index.html b/tests/files/node-mongodb/dbaas/views/index.html
new file mode 100644
index 0000000000..75884cdc2c
--- /dev/null
+++ b/tests/files/node-mongodb/dbaas/views/index.html
@@ -0,0 +1,19 @@
+
+
+
+
+ List
+
+
+
+
Home
+
+
\ No newline at end of file
diff --git a/tests/files/node-mongodb/dbaas/views/list.html b/tests/files/node-mongodb/dbaas/views/list.html
new file mode 100644
index 0000000000..c7d2ff30b5
--- /dev/null
+++ b/tests/files/node-mongodb/dbaas/views/list.html
@@ -0,0 +1,33 @@
+
+
+
+
+ Add
+
+
+
+
Add
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/.dockerignore b/tests/files/node-mongodb/single/.dockerignore
new file mode 100644
index 0000000000..b512c09d47
--- /dev/null
+++ b/tests/files/node-mongodb/single/.dockerignore
@@ -0,0 +1 @@
+node_modules
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/.env b/tests/files/node-mongodb/single/.env
new file mode 100644
index 0000000000..cdda283af8
--- /dev/null
+++ b/tests/files/node-mongodb/single/.env
@@ -0,0 +1,7 @@
+MONGODB_USERNAME=mongouser
+MONGODB_PASSWORD=password
+MONGODB_HOST=mongodb
+MONGODB_PORT=27017
+MONGODB_DATABASE=admin
+MONGODB_AUTHSOURCE=admin
+MONGODB_AUTHTLS=True
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/.lagoon.yml b/tests/files/node-mongodb/single/.lagoon.yml
new file mode 100644
index 0000000000..eade588656
--- /dev/null
+++ b/tests/files/node-mongodb/single/.lagoon.yml
@@ -0,0 +1,11 @@
+docker-compose-yaml: docker-compose.yml
+
+environment_variables:
+ git_sha: 'true'
+
+environments:
+ node10:
+ routes:
+ - node:
+ - customdomain-will-be-main-domain.com
+ - customdomain-will-be-not-be-main-domain.com
diff --git a/tests/files/node-mongodb/single/Dockerfile b/tests/files/node-mongodb/single/Dockerfile
new file mode 100644
index 0000000000..d5be35ae98
--- /dev/null
+++ b/tests/files/node-mongodb/single/Dockerfile
@@ -0,0 +1,11 @@
+ARG UPSTREAM_REPO
+ARG UPSTREAM_TAG
+
+FROM ${UPSTREAM_REPO:-testlagoon}/node-10:${UPSTREAM_TAG:-latest}
+COPY . /app/
+
+RUN npm install
+
+EXPOSE 3000
+
+CMD ["node", "index.js"]
diff --git a/tests/files/node-mongodb/single/Dockerfile.mongo b/tests/files/node-mongodb/single/Dockerfile.mongo
new file mode 100644
index 0000000000..69b0dcce48
--- /dev/null
+++ b/tests/files/node-mongodb/single/Dockerfile.mongo
@@ -0,0 +1,21 @@
+FROM mongo:4.2.3
+# generate a self signed mongodb with tls support
+COPY mongodb/openssl-test-ca.cnf openssl-test-ca.cnf
+
+RUN openssl genrsa -out mongodb-test-ca.key 4096 && \
+ openssl req -subj "/C=PE/ST=Lima/L=Lima/O=Acme Inc. /OU=IT Department/CN=acme.com" -new -x509 -days 1826 -key mongodb-test-ca.key -out mongodb-test-ca.crt -config openssl-test-ca.cnf && \
+ openssl genrsa -out mongodb-test-ia.key 4096 && \
+ openssl req -subj "/C=PE/ST=Lima/L=Lima/O=Acme Inc. /OU=IT Department/CN=acme.com" -new -key mongodb-test-ia.key -out mongodb-test-ia.csr -config openssl-test-ca.cnf && \
+ openssl x509 -sha256 -req -days 730 -in mongodb-test-ia.csr -CA mongodb-test-ca.crt -CAkey mongodb-test-ca.key -set_serial 01 -out mongodb-test-ia.crt -extfile openssl-test-ca.cnf -extensions v3_ca && \
+ cat mongodb-test-ca.crt mongodb-test-ia.crt > test-ca.pem
+
+COPY mongodb/openssl-test-server.cnf openssl-test-server.cnf
+
+RUN openssl genrsa -out mongodb-test-server1.key 4096 && \
+ openssl req -subj "/C=PE/ST=Lima/L=Lima/O=Acme Inc. /OU=IT Department/CN=acme.com" -new -key mongodb-test-server1.key -out mongodb-test-server1.csr -config openssl-test-server.cnf && \
+ openssl x509 -sha256 -req -days 365 -in mongodb-test-server1.csr -CA mongodb-test-ia.crt -CAkey mongodb-test-ia.key -CAcreateserial -out mongodb-test-server1.crt -extfile openssl-test-server.cnf -extensions v3_req && \
+ cat mongodb-test-server1.crt mongodb-test-server1.key > test-server1.pem
+
+COPY mongodb/mongo-init.js ./docker-entrypoint-initdb.d
+
+CMD ["mongod","--tlsMode","requireTLS","--tlsAllowConnectionsWithoutCertificates","--tlsCertificateKeyFile","test-server1.pem","--tlsCAFile","test-ca.pem","--bind_ip","0.0.0.0"]
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/controllers/list.js b/tests/files/node-mongodb/single/controllers/list.js
new file mode 100644
index 0000000000..e9bd5c7be0
--- /dev/null
+++ b/tests/files/node-mongodb/single/controllers/list.js
@@ -0,0 +1,29 @@
+const path = require('path');
+const List = require('../models/list');
+
+exports.index = function (req, res) {
+ res.sendFile(path.resolve('views/list.html'));
+};
+
+exports.create = function (req, res) {
+ var newItem = new List(req.body);
+ console.log(req.body);
+ newItem.save(function (err) {
+ if(err) {
+ res.status(400).send('Unable to save item to database');
+ } else {
+ res.redirect('/list/getlist');
+ }
+ });
+ };
+
+exports.list = function (req, res) {
+ List.find({}).exec(function (err, items) {
+ if (err) {
+ return res.send(500, err);
+ }
+ res.render('getlist', {
+ items: items
+ });
+ });
+};
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/db.js b/tests/files/node-mongodb/single/db.js
new file mode 100644
index 0000000000..61c3490377
--- /dev/null
+++ b/tests/files/node-mongodb/single/db.js
@@ -0,0 +1,38 @@
+const mongoose = require('mongoose');
+
+const {
+ MONGODB_USERNAME,
+ MONGODB_PASSWORD,
+ MONGODB_HOST,
+ MONGODB_PORT,
+ MONGODB_DATABASE,
+ MONGODB_AUTHSOURCE,
+ MONGODB_AUTHTLS
+} = process.env;
+
+const options = {
+ useNewUrlParser: true,
+ reconnectTries: Number.MAX_VALUE,
+ reconnectInterval: 500,
+ connectTimeoutMS: 10000,
+};
+
+let authTLSString = "";
+if (MONGODB_AUTHTLS == "True") {
+ authTLSString = "ssl=true&sslInsecure=true&tls=true&tlsInsecure=true";
+ if (MONGODB_AUTHSOURCE != "") {
+ authTLSString = `authSource=${MONGODB_AUTHSOURCE}&ssl=true&sslInsecure=true&tls=true&tlsInsecure=true`;
+ }
+}
+
+const url = `mongodb://${MONGODB_USERNAME}:${MONGODB_PASSWORD}@${MONGODB_HOST}:${MONGODB_PORT}/${MONGODB_DATABASE}?${authTLSString}`;
+
+console.log(url);
+
+mongoose.connect(url, options).then( function() {
+ console.log('MongoDB is connected');
+})
+ .catch( function(err) {
+ console.log(err);
+ process.exit()
+});
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/docker-compose.yml b/tests/files/node-mongodb/single/docker-compose.yml
new file mode 100644
index 0000000000..aa933aeebd
--- /dev/null
+++ b/tests/files/node-mongodb/single/docker-compose.yml
@@ -0,0 +1,39 @@
+version: '3'
+services:
+ node:
+ networks:
+ - amazeeio-network
+ - default
+ build:
+ context: .
+ dockerfile: Dockerfile
+ labels:
+ lagoon.type: node
+ ports:
+ - "3020:3000"
+ depends_on:
+ - mongodb
+ environment:
+ - AMAZEEIO_URL=node-mongo.docker.amazee.io
+ - MONGODB_USERNAME=mongouser
+ - MONGODB_PASSWORD=password
+ - MONGODB_HOST=mongodb
+ - MONGODB_PORT=27017
+ - MONGODB_DATABASE=admin
+ - MONGODB_AUTHSOURCE=admin
+ - MONGODB_AUTHTLS=True
+ mongodb:
+ build:
+ context: .
+ dockerfile: Dockerfile.mongo
+ labels:
+ lagoon.type: mongodb-single
+ # ports:
+ # - "27027:27017"
+ # networks:
+ # - amazeeio-network
+ # - default
+
+networks:
+ amazeeio-network:
+ external: true
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/index.js b/tests/files/node-mongodb/single/index.js
new file mode 100644
index 0000000000..d9caf6ebd9
--- /dev/null
+++ b/tests/files/node-mongodb/single/index.js
@@ -0,0 +1,18 @@
+const express = require('express');
+const app = express();
+const router = express.Router();
+const db = require('./db');
+const list = require('./routes/list');
+
+const path = __dirname + '/views/';
+const port = process.env.PORT || 3000;
+
+app.engine('html', require('ejs').renderFile);
+app.set('view engine', 'html');
+app.use(express.urlencoded({ extended: true }));
+app.use(express.static(path));
+app.use('/list', list);
+
+app.listen(port, function () {
+ console.log(`App listening on ${port}!`);
+});
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/models/list.js b/tests/files/node-mongodb/single/models/list.js
new file mode 100644
index 0000000000..3ad8c09f7d
--- /dev/null
+++ b/tests/files/node-mongodb/single/models/list.js
@@ -0,0 +1,9 @@
+const mongoose = require('mongoose');
+const Schema = mongoose.Schema;
+
+const List = new Schema ({
+ name: { type: String, required: true },
+ character: { type: String, required: true },
+});
+
+module.exports = mongoose.model('List', List)
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/mongodb/mongo-init.js b/tests/files/node-mongodb/single/mongodb/mongo-init.js
new file mode 100644
index 0000000000..0584e5bf21
--- /dev/null
+++ b/tests/files/node-mongodb/single/mongodb/mongo-init.js
@@ -0,0 +1,11 @@
+// connect to admin database to create users
+db = new Mongo().getDB("admin");
+// create admin user
+db.createUser({
+ user: "mongouser",
+ pwd: "password",
+ roles: [{
+ role: "clusterAdmin",
+ db: "admin"
+ }]
+});
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/mongodb/openssl-test-ca.cnf b/tests/files/node-mongodb/single/mongodb/openssl-test-ca.cnf
new file mode 100644
index 0000000000..db9062eda2
--- /dev/null
+++ b/tests/files/node-mongodb/single/mongodb/openssl-test-ca.cnf
@@ -0,0 +1,57 @@
+# NOT FOR PRODUCTION USE. OpenSSL configuration file for testing.
+
+# For the CA policy
+[ policy_match ]
+countryName = match
+stateOrProvinceName = match
+organizationName = match
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 4096
+default_keyfile = myTestCertificateKey.pem ## The default private key file name.
+default_md = sha256 ## Use SHA-256 for Signatures
+distinguished_name = req_dn
+req_extensions = v3_req
+x509_extensions = v3_ca # The extentions to add to the self signed cert
+
+[ v3_req ]
+subjectKeyIdentifier = hash
+basicConstraints = CA:FALSE
+keyUsage = critical, digitalSignature, keyEncipherment
+nsComment = "OpenSSL Generated Certificate for TESTING only. NOT FOR PRODUCTION USE."
+extendedKeyUsage = serverAuth, clientAuth
+
+[ req_dn ]
+countryName = AU
+countryName_default = .
+countryName_min = 2
+countryName_max = 2
+
+stateOrProvinceName = State or Province Name
+stateOrProvinceName_default = TestCertificateStateName
+stateOrProvinceName_max = 64
+
+localityName = Locality Name
+localityName_default = TestCertificateLocalityName
+localityName_max = 64
+
+organizationName = Organization Name
+organizationName_default = TestCertificateOrgName
+organizationName_max = 64
+
+organizationalUnitName = Organizational Unit Name
+organizationalUnitName_default = TestCertificateOrgUnitName
+organizationalUnitName_max = 64
+
+commonName = Common Name (eg, YOUR name)
+commonName_max = 64
+
+[ v3_ca ]
+# Extensions for a typical CA
+
+subjectKeyIdentifier=hash
+basicConstraints = critical,CA:true
+authorityKeyIdentifier=keyid:always,issuer:always
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/mongodb/openssl-test-server.cnf b/tests/files/node-mongodb/single/mongodb/openssl-test-server.cnf
new file mode 100644
index 0000000000..7a3857af5e
--- /dev/null
+++ b/tests/files/node-mongodb/single/mongodb/openssl-test-server.cnf
@@ -0,0 +1,45 @@
+# NOT FOR PRODUCTION USE. OpenSSL configuration file for testing.
+
+
+[ req ]
+default_bits = 4096
+default_keyfile = myTestServerCertificateKey.pem ## The default private key file name.
+default_md = sha256
+distinguished_name = req_dn
+req_extensions = v3_req
+
+[ v3_req ]
+subjectKeyIdentifier = hash
+basicConstraints = CA:FALSE
+keyUsage = critical, digitalSignature, keyEncipherment
+nsComment = "OpenSSL Generated Certificate for TESTING only. NOT FOR PRODUCTION USE."
+extendedKeyUsage = serverAuth, clientAuth
+subjectAltName = @alt_names
+
+[ alt_names ]
+DNS.1 = www.acme.com ##TODO: Enter the DNS names. The DNS names should match the server names.
+
+[ req_dn ]
+countryName = Country Name (2 letter code)
+countryName_default = TestServerCertificateCountry
+countryName_min = 2
+countryName_max = 2
+
+stateOrProvinceName = State or Province Name (full name)
+stateOrProvinceName_default = TestServerCertificateState
+stateOrProvinceName_max = 64
+
+localityName = Locality Name (eg, city)
+localityName_default = TestServerCertificateLocality
+localityName_max = 64
+
+organizationName = Organization Name (eg, company)
+organizationName_default = TestServerCertificateOrg
+organizationName_max = 64
+
+organizationalUnitName = Organizational Unit Name (eg, section)
+organizationalUnitName_default = TestServerCertificateOrgUnit
+organizationalUnitName_max = 64
+
+commonName = Common Name (eg, YOUR name)
+commonName_max = 64
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/package.json b/tests/files/node-mongodb/single/package.json
new file mode 100644
index 0000000000..0ccb4b7a55
--- /dev/null
+++ b/tests/files/node-mongodb/single/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "node",
+ "version": "1.0.0",
+ "main": "index.js",
+ "license": "MIT",
+ "dependencies": {
+ "ejs": "^2.6.1",
+ "express": "^4.16.4",
+ "mongoose": "^5.4.10",
+ "nodemon": "^1.19.4"
+ },
+ "scripts": {
+ "start": "node index.js"
+ }
+}
diff --git a/tests/files/node-mongodb/single/routes/index.js b/tests/files/node-mongodb/single/routes/index.js
new file mode 100644
index 0000000000..589ccae5ee
--- /dev/null
+++ b/tests/files/node-mongodb/single/routes/index.js
@@ -0,0 +1,14 @@
+const express = require('express');
+const router = express.Router();
+const path = require('path');
+
+router.use (function (req,res,next) {
+ console.log('/' + req.method);
+ next();
+});
+
+router.get('/',function(req,res){
+ res.sendFile(path.resolve('views/index.html'));
+});
+
+module.exports = router;
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/routes/list.js b/tests/files/node-mongodb/single/routes/list.js
new file mode 100644
index 0000000000..1cefe59ef7
--- /dev/null
+++ b/tests/files/node-mongodb/single/routes/list.js
@@ -0,0 +1,17 @@
+const express = require('express');
+const router = express.Router();
+const list = require('../controllers/list');
+
+router.get('/', function(req, res){
+ list.index(req,res);
+});
+
+router.post('/additem', function(req, res) {
+ list.create(req,res);
+});
+
+router.get('/getlist', function(req, res) {
+ list.list(req,res);
+});
+
+module.exports = router;
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/views/getlist.html b/tests/files/node-mongodb/single/views/getlist.html
new file mode 100644
index 0000000000..b6fdf973f6
--- /dev/null
+++ b/tests/files/node-mongodb/single/views/getlist.html
@@ -0,0 +1,30 @@
+
+
+
+
+ List
+
+
+
+
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/views/index.html b/tests/files/node-mongodb/single/views/index.html
new file mode 100644
index 0000000000..75884cdc2c
--- /dev/null
+++ b/tests/files/node-mongodb/single/views/index.html
@@ -0,0 +1,19 @@
+
+
+
+
+ List
+
+
+
+
Home
+
+
\ No newline at end of file
diff --git a/tests/files/node-mongodb/single/views/list.html b/tests/files/node-mongodb/single/views/list.html
new file mode 100644
index 0000000000..c7d2ff30b5
--- /dev/null
+++ b/tests/files/node-mongodb/single/views/list.html
@@ -0,0 +1,33 @@
+
+
+
+
+ Add
+
+
+