diff --git a/docker-compose.yaml b/docker-compose.yaml index 8818918660..319188f606 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -290,6 +290,17 @@ services: - ./local-dev/api-data-watcher-pusher:/home labels: lagoon.type: none + local-minio: + image: minio/minio + entrypoint: sh + command: -c 'mkdir -p /export/lagoon-files && /usr/bin/minio server /export' + ports: + - '9000:9000' + environment: + - MINIO_ACCESS_KEY=minio + - MINIO_SECRET_KEY=minio123 + labels: + lagoon.type: none drush-alias: image: ${IMAGE_REPO:-lagoon}/drush-alias volumes: diff --git a/docs/using_lagoon/index.md b/docs/using_lagoon/index.md index a33c0d93da..e5b91936fa 100644 --- a/docs/using_lagoon/index.md +++ b/docs/using_lagoon/index.md @@ -43,16 +43,17 @@ Some Docker Images and Containers need additional customizations from the provid | Type | Versions | Dockerfile | | ---------------| -------------------| -------------------------------------------------------------------------------------------------------------| -| nginx | 1.12 | [nginx/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx/Dockerfile) | +| nginx | openresty/1.13.6.2 | [nginx/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx/Dockerfile) | | nginx-drupal | | [nginx-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/nginx-drupal/Dockerfile) | -| [php-fpm](docker_images/php-fpm.md) | 5.6, 7.0, 7.1, 7.2 | [php/fpm/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/Dockerfile) | -| php-cli | 5.6, 7.0, 7.1, 7.2 | [php/cli/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli/Dockerfile) | -| php-cli-drupal | 5.6, 7.0, 7.1, 7.2 | [php/cli-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli-drupal/Dockerfile) | +| [php-fpm](docker_images/php-fpm.md) | 7.1, 7.2, 7.3 | [php/fpm/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/fpm/Dockerfile) | +| php-cli | 7.1, 7.2, 7.3 | [php/cli/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli/Dockerfile) | +| php-cli-drupal | 7.1, 7.2, 7.3 | [php/cli-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/php/cli-drupal/Dockerfile) | | mariadb | 10 | [mariadb/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mariadb/Dockerfile) | | mariadb-drupal | 10 | [mariadb-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mariadb-drupal/Dockerfile) | | mongo | 3.6 | [mongo/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/mongo/Dockerfile) | -| solr | 5.5, 6.6 | | -| solr-drupal | 5.5, 6.6 | | -| redis | | | -| varnish | 5 | | -| varnish-drupal | 5 | | +| solr | 5.5, 6.6, 7.5 | [solr/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile) | +| solr-drupal | 5.5, 6.6, 7.5 | [solr-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/solr-drupal/Dockerfile)| +| redis | 5.0.0 | [redis/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/redis/Dockerfile) | +| redis-persistent | 5.0.0 | [redis-persistent/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/redis-persistent/Dockerfile) | +| varnish | 5 | [varnish/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/varnish/Dockerfile) | +| varnish-drupal | 5 | [varnish-drupal/Dockerfile](https://github.com/amazeeio/lagoon/blob/master/images/varnish-drupal/Dockerfile) | diff --git a/docs/using_lagoon/remote_shell.md b/docs/using_lagoon/remote_shell.md index cac30f06ed..735e922322 100644 --- a/docs/using_lagoon/remote_shell.md +++ b/docs/using_lagoon/remote_shell.md @@ -47,7 +47,7 @@ ssh -p [PORT] -t [PROJECT-ENVIRONMENT-NAME]@[HOST] rsh service=[SERVICE-NAME] co As example to connect to the `php` container within the `nginx` pod: ``` -ssh -p 32222 -t drupal-example-master@ssh.lagoon.amazeeio.cloud rsh service=nginx container=pod +ssh -p 32222 -t drupal-example-master@ssh.lagoon.amazeeio.cloud rsh service=nginx container=php ``` ### Execute Commands @@ -60,4 +60,4 @@ Example: ssh -p 32222 -t drupal-example-master@ssh.lagoon.amazeeio.cloud rsh whoami ``` -Will execute `whoami` within the `cli` container. \ No newline at end of file +Will execute `whoami` within the `cli` container. diff --git a/helpers/mariadb-galera2shared.sh b/helpers/mariadb-galera2shared.sh new file mode 100755 index 0000000000..fffab3c708 --- /dev/null +++ b/helpers/mariadb-galera2shared.sh @@ -0,0 +1,114 @@ +#!/bin/bash + + + +if [ ! "$1" ]; then + echo "please define openshift project as first argument" + exit 1; +fi + +set -uo pipefail + +which shyaml > /dev/null +if [ $? -gt 0 ]; then + echo "please install shyaml (pip3 install shyaml)" + exit 1 +fi + +which jq > /dev/null +if [ $? -gt 0 ]; then + echo "please install jq" + exit 1 +fi + +which svcat > /dev/null +if [ $? -gt 0 ]; then + echo "please install svcat" + exit 1 +fi + +set -e + +PROJECT_NAME=$1 + +echo "*** Starting mariadb-galera --> mariadb-shared migration in ${PROJECT_NAME}" + +SERVICE_NAME=mariadb +SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) +SERVICE_TYPE=mariadb-shared + +ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') + +OLD_POD="mariadb-galera-0" + +if [[ "$OLD_POD" ]]; then + echo "found $SERVICE_NAME pod $OLD_POD" +else + echo "no running pod found for service '${SERVICE_NAME}'', is it running?" + exit 1 +fi + +echo "*** Pausing nginx and cli" +NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas') +CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas') +oc -n $1 scale dc/nginx --replicas=0 +oc -n $1 scale dc/cli --replicas=0 + + +# create service broker +## taken from build-deploy-docker-compose.sh + +OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" +SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" +SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" +OPENSHIFT_PROJECT=$1 +. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh + +# ServiceBrokers take a bit, wait until the credentials secret is available +until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials +do + echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" + sleep 10 +done + +# Load credentials out of secret +SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml +oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS + +DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) +DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) +DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) +DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) +DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) + +echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" +# transfer database between from old to new +oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" + +CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" +echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" +oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK + +echo "*** updating configmap to point to ${DB_HOST}." +# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker +oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ + -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" + + +echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" +oc -n $1 delete service mariadb +oc -n $1 scale dc/mariadb-maxscale --replicas=0 +oc -n $1 scale statefulset/mariadb-galera --replicas=0 + + +# transfer complete, clean up +rm -f $SECRETS + +oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS +oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS + +oc -n $1 rollout latest dc/nginx +oc -n $1 rollout latest dc/cli +oc -n $1 rollout status dc/nginx +oc -n $1 rollout status dc/cli +echo "*** done." diff --git a/helpers/mariadb-single2shared-no-nginx.sh b/helpers/mariadb-single2shared-no-nginx.sh new file mode 100755 index 0000000000..8b59b98a62 --- /dev/null +++ b/helpers/mariadb-single2shared-no-nginx.sh @@ -0,0 +1,106 @@ +#!/bin/bash + + + +if [ ! "$1" ]; then + echo "please define openshift project as first argument" + exit 1; +fi + +set -uo pipefail + +which shyaml > /dev/null +if [ $? -gt 0 ]; then + echo "please install shyaml (pip3 install shyaml)" + exit 1 +fi + +which jq > /dev/null +if [ $? -gt 0 ]; then + echo "please install jq" + exit 1 +fi + +which svcat > /dev/null +if [ $? -gt 0 ]; then + echo "please install svcat" + exit 1 +fi + +set -e + +PROJECT_NAME=$1 + +echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}" + +SERVICE_NAME=mariadb +SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) +SERVICE_TYPE=mariadb-shared + +ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') + +MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas') + +if [ "$MARIADB_REPLICAS" == "0" ]; then + oc -n $1 scale dc/mariadb --replicas=1 + oc -n $1 rollout status dc/mariadb +fi + +# export old mariadb pod name +OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME) + +if [[ "$OLD_POD" ]]; then + echo "found $SERVICE_NAME pod $OLD_POD" +else + echo "no running pod found for service '${SERVICE_NAME}'', is it running?" + exit 1 +fi + +# create service broker +## taken from build-deploy-docker-compose.sh + +OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" +SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" +SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" +OPENSHIFT_PROJECT=$1 +. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh + +# ServiceBrokers take a bit, wait until the credentials secret is available +until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials +do + echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" + sleep 10 +done + +# Load credentials out of secret +SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml +oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS + +DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) +DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) +DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) +DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) +DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) + +echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" +# transfer database between from old to new +oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db \$MARIADB_DATABASE || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" + +CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" +echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" +oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK + +echo "*** updating configmap to point to ${DB_HOST}." +# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker +oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ + -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" + + +echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" +oc -n $1 delete service mariadb +oc -n $1 scale dc/mariadb --replicas=0 + +# transfer complete, clean up +rm -f $SECRETS + +echo "*** done." diff --git a/helpers/mariadb-single2shared-wordpress.sh b/helpers/mariadb-single2shared-wordpress.sh new file mode 100755 index 0000000000..13461cfbf5 --- /dev/null +++ b/helpers/mariadb-single2shared-wordpress.sh @@ -0,0 +1,120 @@ +#!/bin/bash + + + +if [ ! "$1" ]; then + echo "please define openshift project as first argument" + exit 1; +fi + +set -uo pipefail + +which shyaml > /dev/null +if [ $? -gt 0 ]; then + echo "please install shyaml (pip3 install shyaml)" + exit 1 +fi + +which jq > /dev/null +if [ $? -gt 0 ]; then + echo "please install jq" + exit 1 +fi + +which svcat > /dev/null +if [ $? -gt 0 ]; then + echo "please install svcat" + exit 1 +fi + +set -e + +PROJECT_NAME=$1 + +echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}" + +SERVICE_NAME=mariadb +SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) +SERVICE_TYPE=mariadb-shared + +ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') + +MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas') + +if [ "$MARIADB_REPLICAS" == "0" ]; then + oc -n $1 scale dc/mariadb --replicas=1 + oc -n $1 rollout status dc/mariadb +fi + +# export old mariadb pod name +OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME) + +if [[ "$OLD_POD" ]]; then + echo "found $SERVICE_NAME pod $OLD_POD" +else + echo "no running pod found for service '${SERVICE_NAME}'', is it running?" + exit 1 +fi + +echo "*** Pausing nginx and cli" +NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas') +CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas') +oc -n $1 scale dc/nginx --replicas=0 +oc -n $1 scale dc/cli --replicas=0 + + +# create service broker +## taken from build-deploy-docker-compose.sh + +OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" +SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" +SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" +OPENSHIFT_PROJECT=$1 +. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh + +# ServiceBrokers take a bit, wait until the credentials secret is available +until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials +do + echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" + sleep 10 +done + +# Load credentials out of secret +SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml +oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS + +DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) +DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) +DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) +DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) +DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) + +echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" +# transfer database between from old to new +oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db lagoon || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" + +CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" +echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" +oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK + +echo "*** updating configmap to point to ${DB_HOST}." +# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker +oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ + -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" + + +echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" +oc -n $1 delete service mariadb +oc -n $1 scale dc/mariadb --replicas=0 + +# transfer complete, clean up +rm -f $SECRETS + +oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS +oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS + +oc -n $1 rollout latest dc/nginx +oc -n $1 rollout latest dc/cli +oc -n $1 rollout status dc/nginx +oc -n $1 rollout status dc/cli +echo "*** done." diff --git a/helpers/mariadb-single2shared.sh b/helpers/mariadb-single2shared.sh index 09709f02fa..9a281271bd 100755 --- a/helpers/mariadb-single2shared.sh +++ b/helpers/mariadb-single2shared.sh @@ -91,7 +91,7 @@ DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" # transfer database between from old to new -oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" +oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" diff --git a/images/elasticsearch/Dockerfile b/images/elasticsearch/Dockerfile index e2d76938e6..9f959bd4e8 100644 --- a/images/elasticsearch/Dockerfile +++ b/images/elasticsearch/Dockerfile @@ -1,6 +1,6 @@ ARG IMAGE_REPO FROM ${IMAGE_REPO:-lagoon}/commons as commons -FROM docker.elastic.co/elasticsearch/elasticsearch:6.4.2 +FROM docker.elastic.co/elasticsearch/elasticsearch:6.5.4 LABEL maintainer="amazee.io" ENV LAGOON=elasticsearch diff --git a/images/kibana/Dockerfile b/images/kibana/Dockerfile index d646ca3b38..f2f5d9edd2 100644 --- a/images/kibana/Dockerfile +++ b/images/kibana/Dockerfile @@ -1,6 +1,6 @@ ARG IMAGE_REPO FROM ${IMAGE_REPO:-lagoon}/commons as commons -FROM docker.elastic.co/kibana/kibana:6.4.2 +FROM docker.elastic.co/kibana/kibana:6.5.4 LABEL maintainer="amazee.io" ENV LAGOON=kibana diff --git a/images/logstash/Dockerfile b/images/logstash/Dockerfile index 881fc49d6e..a7622ba62b 100644 --- a/images/logstash/Dockerfile +++ b/images/logstash/Dockerfile @@ -1,6 +1,6 @@ ARG IMAGE_REPO FROM ${IMAGE_REPO:-lagoon}/commons as commons -FROM docker.elastic.co/logstash/logstash:6.4.2 +FROM docker.elastic.co/logstash/logstash:6.5.4 LABEL maintainer="amazee.io" ENV LAGOON=logstash diff --git a/images/php/cli/Dockerfile b/images/php/cli/Dockerfile index 47c2f71cbf..a7a29bec6e 100644 --- a/images/php/cli/Dockerfile +++ b/images/php/cli/Dockerfile @@ -54,6 +54,9 @@ RUN echo "source /lagoon/entrypoints/61-php-xdebug-cli-env.sh" >> /home/.bashrc COPY 55-cli-helpers.sh /lagoon/entrypoints/ RUN echo "source /lagoon/entrypoints/55-cli-helpers.sh" >> /home/.bashrc +RUN if [ ${PHP_VERSION%.*} == "5.6" ] || [ ${PHP_VERSION%.*} == "7.0" ] ; then \ + echo echo \"PHP ${PHP_VERSION} is end of life and should no longer be used. For more information, visit https://secure.php.net/eol.php\" \> /dev/stderr >> /home/.bashrc ; fi + # SSH Key and Agent Setup COPY 05-ssh-key.sh /lagoon/entrypoints/ COPY 10-ssh-agent.sh /lagoon/entrypoints/ diff --git a/node-packages/commons/src/api.js b/node-packages/commons/src/api.js index a2ee91e67e..71cfd5dda4 100644 --- a/node-packages/commons/src/api.js +++ b/node-packages/commons/src/api.js @@ -731,24 +731,24 @@ const updateEnvironment = ( async function deleteEnvironment( name: string, project: string, + execute: boolean = true, ): Promise { - const result = await graphqlapi.query(` - { - project:projectByName(name: "${project}"){ - id - } - } - `); - - if (!result || !result.project) { - throw new ProjectNotFound(`Cannot load id for project ${project}`); + return graphqlapi.mutate( + ` + ($name: String!, $project: String!, $execute: Boolean) { + deleteEnvironment(input: { + name: $name + project: $project + execute: $execute + }) } - - return graphqlapi.query(` - mutation { - deleteEnvironment(input: {name: "${name}", project: ${result.project.id}}) - } - `); + `, + { + name, + project, + execute, + }, + ); } const getOpenShiftInfoForProject = (project: string): Promise => diff --git a/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql b/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql index 89398fd2a6..a53e1bf73f 100644 --- a/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql +++ b/services/api-db/docker-entrypoint-initdb.d/03-procedures.sql @@ -164,7 +164,8 @@ CREATE OR REPLACE PROCEDURE e.* FROM environment e WHERE e.name = name AND - deleted = '0000-00-00 00:00:00'; + e.project = pid AND + e.deleted = '0000-00-00 00:00:00'; END; $$ diff --git a/services/api/src/clients/aws.js b/services/api/src/clients/aws.js index 5d42302ae5..45f599dbd8 100644 --- a/services/api/src/clients/aws.js +++ b/services/api/src/clients/aws.js @@ -4,9 +4,9 @@ const R = require('ramda'); const AWS = require('aws-sdk'); const s3Host = R.propOr('http://docker.for.mac.localhost:9000', 'S3_HOST', process.env); -const accessKeyId = R.propOr('minio', 'S3_ACCESS_KEY_ID', process.env); -const secretAccessKey = R.propOr('minio123', 'S3_SECRET_ACCESS_KEY', process.env); -const bucket = R.propOr('api-files', 'S3_BUCKET', process.env); +const accessKeyId = R.propOr('minio', 'S3_ACCESS_KEY_ID', process.env); +const secretAccessKey = R.propOr('minio123', 'S3_SECRET_ACCESS_KEY', process.env); +const bucket = R.propOr('lagoon-files', 'S3_BUCKET', process.env); const s3 = new AWS.S3({ endpoint: s3Host, diff --git a/services/api/src/resolvers.js b/services/api/src/resolvers.js index 25759e9887..8158926c16 100644 --- a/services/api/src/resolvers.js +++ b/services/api/src/resolvers.js @@ -29,6 +29,8 @@ const { deleteTask, updateTask, taskDrushArchiveDump, + taskDrushSqlDump, + taskDrushCacheClear, taskDrushSqlSync, taskDrushRsyncFiles, taskSubscriber, @@ -273,6 +275,8 @@ const resolvers /* : { [string]: ResolversObj | typeof GraphQLDate } */ = { deleteEnvVariable, addTask, taskDrushArchiveDump, + taskDrushSqlDump, + taskDrushCacheClear, taskDrushSqlSync, taskDrushRsyncFiles, deleteTask, diff --git a/services/api/src/resources/environment/resolvers.js b/services/api/src/resources/environment/resolvers.js index c374cad45b..0dffcc3b2e 100644 --- a/services/api/src/resources/environment/resolvers.js +++ b/services/api/src/resources/environment/resolvers.js @@ -1,6 +1,8 @@ // @flow const R = require('ramda'); +const { sendToLagoonLogs } = require('@lagoon/commons/src/logs'); +const { createRemoveTask } = require('@lagoon/commons/src/tasks'); const esClient = require('../../clients/esClient'); const sqlClient = require('../../clients/sqlClient'); const { @@ -12,6 +14,8 @@ const { whereAnd, } = require('../../util/db'); const Sql = require('./sql'); +const projectSql = require('../project/sql'); +const projectHelpers = require('../project/helpers'); /* :: @@ -472,17 +476,99 @@ const addOrUpdateEnvironmentStorage = async ( const deleteEnvironment = async ( root, - { input }, - { credentials: { role } }, + { + input, + input: { + project: projectName, + name, + execute, + }, + }, + { credentials: { role, permissions: { customers, projects } } }, ) => { if (role !== 'admin') { - throw new Error('Unauthorized'); + const prep = prepare(sqlClient, 'SELECT `id` AS `pid`, `customer` AS `cid` FROM project WHERE `name` = :name'); + const rows = await query(sqlClient, prep({ name: projectName })); + + if ( + !R.contains(R.path(['0', 'pid'], rows), projects) && + !R.contains(R.path(['0', 'cid'], rows), customers) + ) { + throw new Error('Unauthorized.'); + } + } + + const projectId = await projectHelpers.getProjectIdByName(projectName); + + const projectRows = await query( + sqlClient, + projectSql.selectProject(projectId), + ); + const project = projectRows[0]; + + const environmentRows = await query( + sqlClient, + Sql.selectEnvironmentByNameAndProject(name, projectId), + ); + const environment = environmentRows[0]; + + if (!environment) { + throw new Error(`Environment "${name}" does not exist in project "${projectId}"`); + } + + if (role !== 'admin' && environment.environmentType === 'production') { + throw new Error('Unauthorized - You may not delete a production environment'); + } + + // Deleting environment in api w/o executing the openshift remove. + // This gets called by openshiftremove service after successful remove. + if (role === 'admin' && execute === false) { + const prep = prepare(sqlClient, 'CALL DeleteEnvironment(:name, :project)'); + await query(sqlClient, prep({ name, project: projectId })); + + // TODO: maybe check rows for changed result + return 'success'; } - const prep = prepare(sqlClient, 'CALL DeleteEnvironment(:name, :project)'); - await query(sqlClient, prep(input)); + let data = { + projectName: project.name, + type: environment.deployType, + forceDeleteProductionEnvironment: role === 'admin', + }; + + const meta = { + projectName: data.projectName, + environmentName: environment.name, + }; + + switch (environment.deployType) { + case 'branch': + case 'promote': + data = { + ...data, + branch: name, + }; + break; + + case 'pullrequest': + data = { + ...data, + pullrequestNumber: environment.name.replace('pr-', ''), + }; + break; + + default: + sendToLagoonLogs('error', data.projectName, '', 'api:deleteEnvironment:error', meta, + `*[${data.projectName}]* Unknown deploy type ${environment.deployType} \`${environment.name}\``, + ); + return `Error: unknown deploy type ${environment.deployType}`; + } + + await createRemoveTask(data); + sendToLagoonLogs('info', data.projectName, '', 'api:deleteEnvironment', meta, + `*[${data.projectName}]* Deleting environment \`${environment.name}\``, + ); - // TODO: maybe check rows for changed result return 'success'; }; diff --git a/services/api/src/resources/environment/sql.js b/services/api/src/resources/environment/sql.js index b85f9d905c..1fa900db52 100644 --- a/services/api/src/resources/environment/sql.js +++ b/services/api/src/resources/environment/sql.js @@ -18,6 +18,11 @@ const Sql /* : SqlObj */ = { knex('environment') .where('id', '=', id) .toString(), + selectEnvironmentByNameAndProject: (name /* : string */, projectId /* : numbere */) => + knex('environment') + .where('name', '=', name) + .andWhere('project', '=', projectId) + .toString(), truncateEnvironment: () => knex('environment') .truncate() diff --git a/services/api/src/resources/project/sql.js b/services/api/src/resources/project/sql.js index 4f6c8c671b..a5421a5736 100644 --- a/services/api/src/resources/project/sql.js +++ b/services/api/src/resources/project/sql.js @@ -87,6 +87,11 @@ const Sql /* : SqlObj */ = { .where('project_via_user.id', projectId) .orWhere('project_via_customer.id', projectId) .toString(), + selectPermsForProject: (id /* : number */) => + knex('project') + .select({ pid: 'id', cid: 'customer' }) + .where('id', id) + .toString(), updateProject: ({ id, patch } /* : {id: number, patch: {[string]: any}} */) => knex('project') .where('id', '=', id) diff --git a/services/api/src/resources/task/resolvers.js b/services/api/src/resources/task/resolvers.js index aea05fc91c..fcb66bac9d 100644 --- a/services/api/src/resources/task/resolvers.js +++ b/services/api/src/resources/task/resolvers.js @@ -262,12 +262,12 @@ const taskDrushArchiveDump = async ( await envValidators.userAccessEnvironment(credentials, environmentId); await envValidators.environmentHasService(environmentId, 'cli'); - const command = String.raw`drush ard --pipe | \ -xargs -I_file curl -sS "$TASK_API_HOST"/graphql \ + const command = String.raw`file="/tmp/$LAGOON_SAFE_PROJECT-$LAGOON_GIT_SAFE_BRANCH-$(date --iso-8601=seconds).tar" && drush ard --destination=$file && \ +curl -sS "$TASK_API_HOST"/graphql \ -H "Authorization: Bearer $TASK_API_AUTH" \ -F operations='{ "query": "mutation ($task: Int!, $files: [Upload!]!) { uploadFilesForTask(input:{task:$task, files:$files}) { id files { filename } } }", "variables": { "task": '"$TASK_DATA_ID"', "files": [null] } }' \ -F map='{ "0": ["variables.files.0"] }' \ --F 0=@_file +-F 0=@$file; rm -rf $file; `; const taskData = await Helpers.addTask({ @@ -281,6 +281,73 @@ xargs -I_file curl -sS "$TASK_API_HOST"/graphql \ return taskData; }; +const taskDrushSqlDump = async ( + root, + { + environment: environmentId, + }, + { + credentials, + }, +) => { + await envValidators.environmentExists(environmentId); + await envValidators.userAccessEnvironment(credentials, environmentId); + await envValidators.environmentHasService(environmentId, 'cli'); + + const command = String.raw`file="/tmp/$LAGOON_SAFE_PROJECT-$LAGOON_GIT_SAFE_BRANCH-$(date --iso-8601=seconds).sql" && drush sql-dump --result-file=$file --gzip && \ +curl -sS "$TASK_API_HOST"/graphql \ +-H "Authorization: Bearer $TASK_API_AUTH" \ +-F operations='{ "query": "mutation ($task: Int!, $files: [Upload!]!) { uploadFilesForTask(input:{task:$task, files:$files}) { id files { filename } } }", "variables": { "task": '"$TASK_DATA_ID"', "files": [null] } }' \ +-F map='{ "0": ["variables.files.0"] }' \ +-F 0=@$file.gz; rm -rf $file.gz +`; + + const taskData = await Helpers.addTask({ + name: 'Drush sql-dump', + environment: environmentId, + service: 'cli', + command, + execute: true, + }); + + return taskData; +}; + +const taskDrushCacheClear = async ( + root, + { + environment: environmentId, + }, + { + credentials, + }, +) => { + await envValidators.environmentExists(environmentId); + await envValidators.userAccessEnvironment(credentials, environmentId); + await envValidators.environmentHasService(environmentId, 'cli'); + + const command = 'drupal_version=$(drush status drupal-version --format=list) && \ + if [ ${drupal_version%.*.*} == "8" ]; then \ + drush cr; \ + elif [ ${drupal_version%.*} == "7" ]; then \ + drush cc all; \ + else \ + echo \"could not clear cache for found Drupal Version ${drupal_version}\"; \ + exit 1; \ + fi'; + + + const taskData = await Helpers.addTask({ + name: 'Drush cache-clear', + environment: environmentId, + service: 'cli', + command, + execute: true, + }); + + return taskData; +}; + const taskDrushSqlSync = async ( root, { @@ -347,7 +414,7 @@ const taskSubscriber = createEnvironmentFilteredSubscriber( [ EVENTS.TASK.ADDED, EVENTS.TASK.UPDATED, - ] + ], ); const Resolvers /* : ResolversObj */ = { @@ -357,9 +424,12 @@ const Resolvers /* : ResolversObj */ = { deleteTask, updateTask, taskDrushArchiveDump, + taskDrushSqlDump, + taskDrushCacheClear, taskDrushSqlSync, taskDrushRsyncFiles, taskSubscriber, }; + module.exports = Resolvers; diff --git a/services/api/src/typeDefs.js b/services/api/src/typeDefs.js index 873b9927af..ff6eb430ff 100644 --- a/services/api/src/typeDefs.js +++ b/services/api/src/typeDefs.js @@ -421,7 +421,8 @@ const typeDefs = gql` input DeleteEnvironmentInput { name: String! - project: Int! + project: String! + execute: Boolean } type Query { @@ -927,6 +928,8 @@ const typeDefs = gql` deleteEnvVariable(input: DeleteEnvVariableInput!): String addTask(input: TaskInput!): Task taskDrushArchiveDump(environment: Int!): Task + taskDrushSqlDump(environment: Int!): Task + taskDrushCacheClear(environment: Int!): Task taskDrushSqlSync( sourceEnvironment: Int! destinationEnvironment: Int! diff --git a/services/logs-db-ui/Dockerfile b/services/logs-db-ui/Dockerfile index 565d3f6d08..aa236b4a36 100644 --- a/services/logs-db-ui/Dockerfile +++ b/services/logs-db-ui/Dockerfile @@ -46,9 +46,12 @@ searchguard.openid.base_redirect_url: "${LOGSDB_UI_URL:-http://0.0.0.0:5601}"\n\ searchguard.openid.scope: "profile email"\n\ \n\ searchguard.cookie.password: "${SEARCHGUARD_COOKIE_PASSWORD:-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa}"\n\ +\n\ +# disable kibana spaces, as they don\'t work with searchguard multitenancy\n\ +xpack.spaces.enabled: false\n\ ' >> config/kibana.yml -RUN bin/kibana-plugin install https://search.maven.org/remotecontent?filepath=com/floragunn/search-guard-kibana-plugin/6.4.2-15/search-guard-kibana-plugin-6.4.2-15.zip +RUN bin/kibana-plugin install https://search.maven.org/remotecontent?filepath=com/floragunn/search-guard-kibana-plugin/6.5.4-17/search-guard-kibana-plugin-6.5.4-17.zip COPY entrypoints/80-keycloak-url.bash /lagoon/entrypoints/ COPY entrypoints/81-logs-db-ui-url.bash /lagoon/entrypoints/ diff --git a/services/logs-db/Dockerfile b/services/logs-db/Dockerfile index 67f73abd59..0b6866f07a 100644 --- a/services/logs-db/Dockerfile +++ b/services/logs-db/Dockerfile @@ -1,7 +1,7 @@ ARG IMAGE_REPO FROM ${IMAGE_REPO:-lagoon}/elasticsearch -RUN bin/elasticsearch-plugin install -b com.floragunn:search-guard-6:6.4.2-23.1 \ +RUN bin/elasticsearch-plugin install -b com.floragunn:search-guard-6:6.5.4-24.0 \ && chmod a+x plugins/search-guard-6/tools/install_demo_configuration.sh \ && plugins/search-guard-6/tools/install_demo_configuration.sh -y \ && sed -i 's/searchguard.ssl.http.*//' config/elasticsearch.yml \ diff --git a/services/openshiftremove/src/index.js b/services/openshiftremove/src/index.js index ef5f428b8d..b2cdb6933d 100644 --- a/services/openshiftremove/src/index.js +++ b/services/openshiftremove/src/index.js @@ -52,6 +52,11 @@ const messageConsumer = async function(msg) { environmentName = branch openshiftProject = `${safeProjectName}-${safeBranchName}` break; + + case 'promote': + environmentName = branch + openshiftProject = `${projectName}-${branch}` + break; } } catch(error) { @@ -98,7 +103,7 @@ const messageConsumer = async function(msg) { ) // Update GraphQL API that the Environment has been deleted try { - await deleteEnvironment(environmentName, projectName) + await deleteEnvironment(environmentName, projectName, false) logger.info(`${openshiftProject}: Deleted Environment '${environmentName}' in API`) } catch (err) { logger.error(err) @@ -160,7 +165,7 @@ const messageConsumer = async function(msg) { // Update GraphQL API that the Environment has been deleted try { - await deleteEnvironment(environmentName, projectName) + await deleteEnvironment(environmentName, projectName, false) logger.info(`${openshiftProject}: Deleted Environment '${environmentName}' in API`) } catch (err) { logger.error(err) diff --git a/services/rest2tasks/src/index.js b/services/rest2tasks/src/index.js index 2df21c2ce5..081779f997 100755 --- a/services/rest2tasks/src/index.js +++ b/services/rest2tasks/src/index.js @@ -143,72 +143,6 @@ app.post('/pullrequest/deploy', async (req, res) => { }) - -app.post('/pullrequest/remove', async (req, res) => { - - req.checkBody({ - 'projectName': { - notEmpty: true, - matches: { - options: [/^[a-zA-Z0-9-_]+$/], - errorMessage: 'projectName must be defined and must only contain alphanumeric, dashes and underline' - }, - }, - 'pullrequestNumber': { - notEmpty: true, - isInt: {}, - matches: { - errorMessage: 'pullrequestNumber must be defined and a number' - }, - } - }); - - const result = await req.getValidationResult() - - if (!result.isEmpty()) { - res.status(400).send('There have been validation errors: ' + util.inspect(result.mapped())); - return; - } - - const data = { - projectName: req.body.projectName, - pullrequestNumber: req.body.pullrequestNumber, - type: 'pullrequest' - } - - const meta = { - projectName: data.projectName, - pullrequestNumber: data.pullrequestNumber - } - - try { - const taskResult = await createRemoveTask(data); - - sendToLagoonLogs('info', data.projectName, '', `rest:pullrequest:remove`, meta, - `*[${data.projectName}]* REST remove trigger \`${data.pullrequestNumber}\`` - ) - - res.status(200).type('json').send({ "ok": "true", "message": taskResult}) - return; - } catch (error) { - switch (error.name) { - case "ProjectNotFound": - case "ActiveSystemsNotFound": - case "UnknownActiveSystem": - res.status(404).type('json').send({ "ok": "false", "message": error.message}) - return; - break; - - default: - logger.error(error) - res.status(500).type('json').send({ "ok": "false", "message": `Internal Error: ${error}`}) - return; - break; - } - } - -}) - app.post('/deploy', async (req, res) => { req.checkBody({ @@ -377,75 +311,3 @@ app.post('/promote', async (req, res) => { } }) - - -app.post('/remove', async (req, res) => { - - req.checkBody({ - 'projectName': { - notEmpty: true, - matches: { - options: [/^[a-zA-Z0-9-_]+$/], - errorMessage: 'projectName must be defined and must only contain alphanumeric, dashes and underline' - }, - }, - 'branch': { - notEmpty: true, - matches: { - options: [/^[a-zA-Z0-9-._\/]+$/], - errorMessage: 'branch must be defined and must only contain alphanumeric, dashes, underline, dots and slashes' - }, - }, - 'forceDeleteProductionEnvironment': { - optional: {}, - isBoolean: { - errorMessage: 'should be a boolean' - } - } - }); - - const result = await req.getValidationResult() - - if (!result.isEmpty()) { - res.status(400).send('There have been validation errors: ' + util.inspect(result.mapped())); - return; - } - - const data = { - projectName: req.body.projectName, - branch: req.body.branch, - type: 'branch', - forceDeleteProductionEnvironment: req.body.forceDeleteProductionEnvironment, - } - - console.log(data) - - const meta = { - projectName: data.projectName, - branchName: data.branch - } - - try { - const taskResult = await createRemoveTask(data); - sendToLagoonLogs('info', data.projectName, '', `rest:remove:receive`, meta, - `*[${data.projectName}]* REST remove trigger \`${data.branch}\`` - ) - res.status(200).type('json').send({ "ok": "true", "message": taskResult}) - return; - } catch (error) { - switch (error.name) { - case "ProjectNotFound": - case "ActiveSystemsNotFound": - res.status(404).type('json').send({ "ok": "false", "message": error.message}) - return; - break; - - default: - logger.error(error) - res.status(500).type('json').send({ "ok": "false", "message": `Internal Error: ${error}`}) - return; - break; - } - } - -}) diff --git a/services/ui/package.json b/services/ui/package.json index d60f4996f3..c1cd7f6551 100644 --- a/services/ui/package.json +++ b/services/ui/package.json @@ -32,6 +32,7 @@ "react-copy-to-clipboard": "^5.0.1", "react-dom": "^16.4.2", "react-highlight-words": "^0.14.0", + "react-modal": "^3.8.1", "react-select": "^2.1.1", "react-typekit": "^1.1.3", "recompose": "^0.30.0", diff --git a/services/ui/src/components/AddTask/components/DrushCacheClear.js b/services/ui/src/components/AddTask/components/DrushCacheClear.js new file mode 100644 index 0000000000..6f0e7cc32b --- /dev/null +++ b/services/ui/src/components/AddTask/components/DrushCacheClear.js @@ -0,0 +1,96 @@ +import React from 'react'; +import { Mutation } from 'react-apollo'; +import gql from 'graphql-tag'; +import ReactSelect from 'react-select'; +import { bp, color, fontSize } from '../../../variables'; + +const taskDrushCacheClear = gql` + mutation taskDrushCacheClear( + $environment: Int! + ) { + taskDrushCacheClear( + environment: $environment + ) { + id + name + status + created + started + completed + remoteId + command + service + } + } +`; + +const DrushCacheClear = ({ + pageEnvironment, + onCompleted, + onError, +}) => ( + + {(taskDrushCacheClear, { loading, called, error, data }) => { + return ( + +
+ + +
+ + +
+ ); + }} +
+); + +export default DrushCacheClear; diff --git a/services/ui/src/components/AddTask/components/DrushSqlDump.js b/services/ui/src/components/AddTask/components/DrushSqlDump.js new file mode 100644 index 0000000000..0084bed049 --- /dev/null +++ b/services/ui/src/components/AddTask/components/DrushSqlDump.js @@ -0,0 +1,96 @@ +import React from 'react'; +import { Mutation } from 'react-apollo'; +import gql from 'graphql-tag'; +import ReactSelect from 'react-select'; +import { bp, color, fontSize } from '../../../variables'; + +const taskDrushSqlDump = gql` + mutation taskDrushSqlDump( + $environment: Int! + ) { + taskDrushSqlDump( + environment: $environment + ) { + id + name + status + created + started + completed + remoteId + command + service + } + } +`; + +const DrushSqlDump = ({ + pageEnvironment, + onCompleted, + onError, +}) => ( + + {(taskDrushSqlDump, { loading, called, error, data }) => { + return ( + +
+ + +
+ + +
+ ); + }} +
+); + +export default DrushSqlDump; diff --git a/services/ui/src/components/AddTask/index.js b/services/ui/src/components/AddTask/index.js index b410bff6e1..f04240542d 100644 --- a/services/ui/src/components/AddTask/index.js +++ b/services/ui/src/components/AddTask/index.js @@ -2,6 +2,8 @@ import React from 'react'; import ReactSelect from 'react-select'; import withLogic from './logic'; import DrushArchiveDump from './components/DrushArchiveDump'; +import DrushSqlDump from './components/DrushSqlDump'; +import DrushCacheClear from './components/DrushCacheClear'; import DrushRsyncFiles from './components/DrushRsyncFiles'; import DrushSqlSync from './components/DrushSqlSync'; import Empty from './components/Empty'; @@ -20,6 +22,8 @@ const AddTask = ({ }) => { const newTaskComponents = { DrushArchiveDump, + DrushSqlDump, + DrushCacheClear, DrushRsyncFiles, DrushSqlSync, Empty, diff --git a/services/ui/src/components/AddTask/logic.js b/services/ui/src/components/AddTask/logic.js index d7ad7313ab..9f94cf91ac 100644 --- a/services/ui/src/components/AddTask/logic.js +++ b/services/ui/src/components/AddTask/logic.js @@ -19,17 +19,26 @@ const withOptions = withProps(({ pageEnvironment }) => { return { options: [ { - label: 'Drush sql-sync', - value: 'DrushSqlSync' + label: 'Drush cache-clear', + value: 'DrushCacheClear' }, { - label: 'Drush archive-dump', - value: 'DrushArchiveDump' + label: 'Drush sql-sync', + value: 'DrushSqlSync' }, { label: 'Drush rsync', value: 'DrushRsyncFiles' + }, + { + label: 'Drush sql-dump', + value: 'DrushSqlDump' + }, + { + label: 'Drush archive-dump (D7 only)', + value: 'DrushArchiveDump' } + ] }; }); diff --git a/services/ui/src/components/DeleteConfirm/index.js b/services/ui/src/components/DeleteConfirm/index.js new file mode 100644 index 0000000000..e650dd06bc --- /dev/null +++ b/services/ui/src/components/DeleteConfirm/index.js @@ -0,0 +1,106 @@ +import React from 'react'; +import Modal from '../Modal'; +import { color, fontSize, bp } from '../../variables'; +import withLogic from './logic'; + +const DeleteConfirm = ({ + deleteType, + deleteName, + onDelete, + inputValue, + setInputValue, + open, + openModal, + closeModal +}) => { + return ( + + + + +

+ This will delete all resources associated with the {deleteType}{' '} + {deleteName} and cannot be + undone. Make sure this is something you really want to do! +

+

Type the name of the {deleteType} to confirm.

+
+ + + cancel + + +
+
+
+ +
+ ); +}; + +export default withLogic(DeleteConfirm); diff --git a/services/ui/src/components/DeleteConfirm/logic.js b/services/ui/src/components/DeleteConfirm/logic.js new file mode 100644 index 0000000000..f3c6cab89e --- /dev/null +++ b/services/ui/src/components/DeleteConfirm/logic.js @@ -0,0 +1,22 @@ +import compose from 'recompose/compose'; +import withState from 'recompose/withState'; +import withHandlers from 'recompose/withHandlers'; + +const withInputValue = withState('inputValue', 'setInputValue', ''); +const withInputHandlers = withHandlers({ + setInputValue: ({ setInputValue }) => event => + setInputValue(event.target.value) +}); + +const withModalState = withState('open', 'setOpen', false); +const withModalHandlers = withHandlers({ + openModal: ({ setOpen }) => () => setOpen(true), + closeModal: ({ setOpen }) => () => setOpen(false) +}); + +export default compose( + withInputValue, + withInputHandlers, + withModalState, + withModalHandlers +); diff --git a/services/ui/src/components/Environment/index.js b/services/ui/src/components/Environment/index.js index 40113fedf4..ec4077515a 100644 --- a/services/ui/src/components/Environment/index.js +++ b/services/ui/src/components/Environment/index.js @@ -1,155 +1,223 @@ import React from 'react'; import Link from 'next/link'; import moment from 'moment'; +import gql from 'graphql-tag'; +import { Mutation } from 'react-apollo'; +import DeleteConfirm from '../DeleteConfirm'; import { bp, color } from '../../variables'; import giturlparse from 'git-url-parse'; +const DELETE_ENVIRONMENT_MUTATION = gql` + mutation($input: DeleteEnvironmentInput!) { + deleteEnvironment(input: $input) + } +`; + class Environment extends React.Component { constructor(props) { super(props); const gitUrlParsed = giturlparse(this.props.environment.project.gitUrl); - const gitBranchLink = `${gitUrlParsed.resource}/${gitUrlParsed.full_name}/tree/${this.props.environment.name}`; + const gitBranchLink = `${gitUrlParsed.resource}/${ + gitUrlParsed.full_name + }/tree/${this.props.environment.name}`; this.state = { gitBranchLinkWithScheme: `https://${gitBranchLink}`, - gitBranchLink: gitBranchLink, + gitBranchLink: gitBranchLink }; } render() { return ( -
-
-
-
- -
{this.props.environment.environmentType}
+
+
+
+
+ +
+ {this.props.environment.environmentType} +
+
-
-
-
- -
{this.props.environment.deployType}
+
+
+ +
{this.props.environment.deployType}
+
-
-
-
- -
{moment.utc(this.props.environment.created).local().format('DD MMM YYYY, HH:mm:ss')}
+
+
+ +
+ {moment + .utc(this.props.environment.created) + .local() + .format('DD MMM YYYY, HH:mm:ss')} +
+
-
-
-
- -
{moment.utc(this.props.environment.updated).local().format('DD MMM YYYY, HH:mm:ss')}
+
+
+ +
+ {moment + .utc(this.props.environment.updated) + .local() + .format('DD MMM YYYY, HH:mm:ss')} +
+
-
-
- -
-
- -
- {this.props.environment.routes ? this.props.environment.routes.split(',').map(route => ) : ''} +
+
+ +
+ {this.props.environment.routes + ? this.props.environment.routes.split(',').map(route => ( +
+ + {route} + +
+ )) + : ''} +
+ + {(deleteEnvironment, { loading, called, error, data }) => { + if (error) { + return
{error.message}
; + } + + if (called) { + return
Delete queued
; + } + + return ( + + deleteEnvironment({ + variables: { + input: { + name: this.props.environment.name, + project: this.props.environment.project.name + } + } + }) + } + /> + ); + }} +
-
- -
+ `} +
); } } -export default Environment; \ No newline at end of file +export default Environment; diff --git a/services/ui/src/components/Modal/index.js b/services/ui/src/components/Modal/index.js new file mode 100644 index 0000000000..9e3ca35736 --- /dev/null +++ b/services/ui/src/components/Modal/index.js @@ -0,0 +1,60 @@ +import React from 'react'; +import ReactModal from 'react-modal'; +import { color, bp } from '../../variables'; + +ReactModal.setAppElement('#__next'); + +const Modal = ({ isOpen, onRequestClose, contentLabel, children, ...rest }) => ( + + + {children} + + + +); + +export default Modal; diff --git a/services/ui/src/components/NavTabs/index.js b/services/ui/src/components/NavTabs/index.js index 7c20bdcd46..1471bee3b0 100644 --- a/services/ui/src/components/NavTabs/index.js +++ b/services/ui/src/components/NavTabs/index.js @@ -19,11 +19,11 @@ const NavTabs = ({activeTab, environment}) => ( Backups - {/*
  • +
  • Tasks -
  • */} +