diff --git a/helpers/annotate-pvc-backup.sh b/helpers/annotate-pvc-backup.sh deleted file mode 100644 index 1b5dd04067..0000000000 --- a/helpers/annotate-pvc-backup.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -## -## This annotates all PVCs with the name `solr` and `nginx` with appuio.ch/backup="true" in order that the restic backup system will back them up -## - -oc get pvc --all-namespaces | grep solr | sed '1d' | awk '{ print $2, "--namespace", $1 }' | while read line; do oc annotate --overwrite pvc $line appuio.ch/backup="true"; done -oc get pvc --all-namespaces | grep nginx | sed '1d' | awk '{ print $2, "--namespace", $1 }' | while read line; do oc annotate --overwrite pvc $line appuio.ch/backup="true"; done - -oc get --all-namespaces pod -l 'service in (cli)' | sed '1d' | awk '{ print "--namespace", $1, "pod", $2 }' | while read line; do oc annotate $line --overwrite appuio.ch/backupcommand='/bin/sh -c "if [[ $MARIADB_HOST ]]; then dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -h $MARIADB_HOST -u $MARIADB_USERNAME -p$MARIADB_PASSWORD $MARIADB_DATABASE > $dump && cat $dump && rm $dump; fi"'; done \ No newline at end of file diff --git a/helpers/check_acme_routes.sh b/helpers/check_acme_routes.sh deleted file mode 100755 index c4dc864235..0000000000 --- a/helpers/check_acme_routes.sh +++ /dev/null @@ -1,301 +0,0 @@ -#!/bin/bash - -# Description: script to check routes with exposer pods. -# In case of no DNS record or mis-configuration, script will update the route -# by disabling the tls-acme, removing other acme related annotations and add -# an interal one for filtering purpose - -set -eu -o pipefail - -# Set DEBUG variable to true, to start bash in debug mode -DEBUG="${DEBUG:-"false"}" -if [ "$DEBUG" = "true" ]; then - set -x -fi - -# Some variables - -# Cluster full hostname and API hostname -CLUSTER_HOSTNAME="${CLUSTER_HOSTNAME:-""}" -CLUSTER_API_HOSTNAME="${CLUSTER_API_HOSTNAME:-"$CLUSTER_HOSTNAME"}" - -# Default command -COMMAND=${1:-"help"} - -# Set DRYRUN variable to true to run in dry-run mode -DRYRUN="${DRYRUN:-"false"}" - - -# Set a REGEX variable to filter the execution of the script -REGEX=${REGEX:-".*"} - -# Set NOTIFYONLY to true if you want to send customers a notification -# explaining why Lagoon is not able to issue Let'S Encrypt certificate for -# some routes defined in customer's .lagoon.yml file. -# If set to true, no other action rather than notification is done (ie: no annotation or deletion) -NOTIFYONLY=${NOTIFYONLY:-"false"} - -# Help function -function usage() { - echo -e "The available commands are: - - help (get this help) - - getpendingroutes (get a list of routes with acme \"orderStatus\" in Pending - - getdisabledroutes (get a list of routes with \"administratively-disabled\" annotation - - getbrokenroutes (get a list of all possible broken routes) - - updateroutes (update broken routes) - - By default, script doesn't set any default cluster to run routes' checks. Please set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME variables. - If you want to change the API endpoint, set CLUSTER_API_HOSTNAME variable. - If you want to change the cluster's hostname, set CLUSTER_HOSTNAME variable. - If you want to filter the execution of the script only for certain projects, set the REGEX variable. - If you want to test against a specific IP, set the CLUSTER_IPS array. - - Examples: - CLUSTER_HOSTNAME=\"ch.amazee.io\" CLUSTER_API_HOSTNAME=\"ch.amazeeio.cloud\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes witl TLS in Pending status for the defined cluster) - REGEX=\"drupal-example\" ./check_acme_routes.sh getpendingroutes (Returns a list of all routes for all projects matchiing the regex \`drupal-example\` with TLS in Pending status) - REGEX=\"drupal-example-master\" DRYRUN=true ./check_acme_routes.sh updateroutes (Will run in DRYRUN mode to check and update all broken routes in \`drupal-example-master\` project)" - -} - -# Function that performs mandatory variales and dependencies checks -function initial_checks() { - # By default script doesn't set CLUSTER_HOSTNAME and CLUSTER_API_HOSTNAME. At least CLUSTER_HOSTNAME must be set - if [ -z "$CLUSTER_HOSTNAME" ]; then - echo "Please set CLUSTER_HOSTNAME variable" - usage - exit 1 - fi - - # Script depends on `lagoon-cli`. Check if it in installed - if [[ ! $(command -v lagoon) ]]; then - echo "Please install \`lagoon-cli\` from https://github.com/amazeeio/lagoon-cli because the script relys on it" - exit 1 - fi -} - -# function to get a list of all "administratively-disabled" routes -function get_all_disabled_routes() { - echo -e "List of routes administratively disabled\n" - oc get route --all-namespaces -o=jsonpath="{range .items[?(@.metadata.annotations.amazee\.io/administratively-disabled)]}{.metadata.namespace}{'\t'}{.metadata.name}{'\n'}{end}" - exit 0 -} - -# Function to check if you are running the script on the right cluster and if you're logged in correctly -function check_cluster_api() { - # Check on which cluster you're going to run commands - if oc whoami --show-server | grep -q -v "$CLUSTER_API_HOSTNAME"; then - echo "Please connect to the right cluster" - exit 1 - fi - - # Check if you're logged in correctly - if [ $(oc status|grep -q "Unauthorized";echo $?) -eq 0 ]; then - echo "Please login into the cluster" - exit 1 - fi -} - -# Function to get a list of all routes with acme.openshift.io/status.provisioningStatus.orderStatus=pending -function get_pending_routes() { - for namespace in $(oc get projects --no-headers=true |awk '{print $1}'|sort -u|grep -E "$REGEX") - do - IFS=$';' - # For each route in a namespace with `tls-acme` set to true, check the `orderStatus` if in pending status - for routelist in $(oc get route -n "$namespace" -o=jsonpath="{range .items[?(@.metadata.annotations.kubernetes\.io/tls-acme=='true')]}{.metadata.name}{'\n'}{.metadata.annotations.acme\.openshift\.io/status}{';'}{end}"|sed "s/^[[:space:]]*//") - do - PENDING_ROUTE_NAME=$(echo "$routelist"|sed -n 1p) - if echo "$routelist"|sed -n 4p | grep -q pending; then - STATUS="Pending" - echo "Route $PENDING_ROUTE_NAME in $namespace is in $STATUS status" - fi - - done - unset IFS - done -} - -# Function for creating an array with all routes that might be updated -function create_routes_array() { - # Get the list of namespaces with broker routes, according to REGEX - for namespace in $(oc get routes --all-namespaces|grep exposer|awk '{print $1}'|sort -u|grep -E "$REGEX") - do - # Raw JSON Openshift project output - PROJECTJSON="$(oc get project "$namespace" -o json)" - - # Gather project name based on a label or an annotation - if [ $(echo $PROJECTJSON |grep -q 'lagoon.sh/project'; echo $?) -eq 0 ]; then - PROJECTNAME=$(echo "${PROJECTJSON}" | grep 'lagoon.sh/project' | awk -F'"' '{print $4}') - else - PROJECTNAME=$(echo "${PROJECTJSON}" |grep display-name|awk -F'[][]' '{print $2}'|tr "_" "-") - fi - - # Get the list of broken unique routes for each namespace - for routelist in $(oc get -n "$namespace" route|grep exposer|awk -vNAMESPACE="$namespace" -vPROJECTNAME="$PROJECTNAME" '{print $1";"$2";"NAMESPACE";"PROJECTNAME}'|sort -u -k2 -t ";") - do - # Put the list into an array - ROUTES_ARRAY+=("$routelist") - done - done - - # Create a sorted array of unique route to check - ROUTES_ARRAY_SORTED=($(sort -u -k 2 -t ";"<<<"${ROUTES_ARRAY[*]}")) -} - -# Function to check the routes, update them and delete the exposer's routes -function check_routes() { - - # Cluster array of IPs - CLUSTER_IPS=($(dig +short "$CLUSTER_HOSTNAME")) - for i in "${ROUTES_ARRAY_SORTED[@]}" - do - # Tranform the item into an array - route=($(echo "$i" | tr ";" "\n")) - - # Gather some useful variables - ROUTE_NAME=${route[0]} - ROUTE_HOSTNAME=${route[1]} - ROUTE_NAMESPACE=${route[2]} - ROUTE_PROJECTNAME=${route[3]} - - # Get route DNS record(s) - if [[ $(dig +short "$ROUTE_HOSTNAME" &> /dev/null; echo $?) -ne 0 ]]; then - ROUTE_HOSTNAME_IP="null" - else - ROUTE_HOSTNAME_IP=$(dig +short "$ROUTE_HOSTNAME") - fi - - # Check if the route matches the Cluster's IP(s) - if echo "$ROUTE_HOSTNAME_IP" | grep -E -q -v "${CLUSTER_IPS[*]}"; then - - # If IP is empty, then no DNS record set - if [ -z "$ROUTE_HOSTNAME_IP" ]; then - DNS_ERROR="No A or CNAME record set" - else - DNS_ERROR="$ROUTE_HOSTNAME in $ROUTE_NAMESPACE has no DNS record poiting to ${CLUSTER_IPS[*]} and going to disable tls-acme" - fi - - # Print the error on stdout - echo "$DNS_ERROR" - - if [[ "$NOTIFYONLY" = "true" ]]; then - notify_customer "$ROUTE_PROJECTNAME" - else - # Call the update function to update the route - update_annotation "$ROUTE_HOSTNAME" "$ROUTE_NAMESPACE" - notify_customer "$ROUTE_PROJECTNAME" - - # Now once the main route is updated, it's time to get rid of exposers' routes - for j in $(oc get -n "$ROUTE_NAMESPACE" route|grep exposer|grep -E '(^|\s)'"$ROUTE_HOSTNAME"'($|\s)'|awk '{print $1";"$2}') - do - ocroute=($(echo "$j" | tr ";" "\n")) - OCROUTE_NAME=${ocroute[0]} - if [[ $DRYRUN = true ]]; then - echo -e "DRYRUN oc delete -n $ROUTE_NAMESPACE route $OCROUTE_NAME" - else - echo -e "\nDelete route $OCROUTE_NAME" - oc delete -n "$ROUTE_NAMESPACE" route "$OCROUTE_NAME" - fi - done - fi - fi - echo -e "\n" - - - done -} - -# Function to update route's annotation (ie: update tls-amce, remove tls-acme-awaiting-* and set a new one for internal purpose) -function update_annotation() { - echo "Update route's annotations" - OCOPTIONS="--overwrite" - if [[ "$DRYRUN" = "true" ]]; then - OCOPTIONS="--dry-run --overwrite" - fi - - # Annotate the route - oc annotate -n "$2" $OCOPTIONS route "$1" acme.openshift.io/status- kubernetes.io/tls-acme-awaiting-authorization-owner- kubernetes.io/tls-acme-awaiting-authorization-at-url- kubernetes.io/tls-acme="false" amazee.io/administratively-disabled="$(date +%s)" -} - - -# Function to notify customer about the misconfiguration of their routes -function notify_customer() { - - # Get Slack|Rocketchat channel and webhook - if [ $(TEST=$(lagoon list slack -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then - NOTIFICATION="slack" - elif [ $(TEST=$(lagoon list rocketchat -p "$1" --no-header|awk '{print $3";"$4}'); echo $?) -eq 0 ]; then - NOTIFICATION="rocketchat" - else - echo "No notification set" - return 0 - fi - - MESSAGE="Your $ROUTE_HOSTNAME route is configured in the \`.lagoon.yml\` file to issue an TLS certificate from Lets Encrypt. Unfortunately Lagoon is unable to issue a certificate as $DNS_ERROR.\nTo be issued correctly, the DNS records for $ROUTE_HOSTNAME should point to $CLUSTER_HOSTNAME with an CNAME record (preferred) or to ${CLUSTER_IPS[*]} via an A record (also possible but not preferred).\nIf you don't need the SSL certificate or you are using a CDN that provides you with an TLS certificate, please update your .lagoon.yml file by setting the tls-acme parameter to false for $ROUTE_HOSTNAME, as described here: https://lagoon.readthedocs.io/en/latest/using_lagoon/lagoon_yml/#ssl-configuration-tls-acme.\nWe have now administratively disabled the issuing of Lets Encrypt certificate for $ROUTE_HOSTNAME in order to protect the cluster, this will be reset during the next deployment, therefore we suggest to resolve this issue as soon as possible. Feel free to reach out to us for further information.\nThanks you.\namazee.io team" - - NOTIFICATION_DATA=($(lagoon list $NOTIFICATION -p "$1" --no-header|awk '{print $3";"$4}')) - for notification in ${NOTIFICATION_DATA[@]} - do - CHANNEL=$(echo "$notification"|cut -f1 -d ";") - WEBHOOK=$(echo "$notification"|cut -f2 -d ";") - - # json Payload - PAYLOAD="\"channel\": \"$CHANNEL\", \"text\": \"${MESSAGE}\"" - - echo -e "Sending notification into ${CHANNEL}" - - # Execute curl to send message into the channel - if [[ $DRYRUN = true ]]; then - echo "DRYRUN Sending notification on \"$NOTIFICATION\" curl -X POST -H 'Content-type: application/json' --data '{'"$PAYLOAD"'}' "$WEBHOOK"" - else - curl -X POST -H 'Content-type: application/json' --data '{'"${PAYLOAD}"'}' ${WEBHOOK} - fi - done -} - -# Main function -function main() { - - COMMAND="$1" - - # Check first the cluster you're connected to - echo -e "You're running the script on $CLUSTER_HOSTNAME\nDRYRUN mode is set to \"$DRYRUN\"" - check_cluster_api - - case "$COMMAND" in - help) - usage - ;; - getpendingroutes) - get_pending_routes - ;; - getdisabledroutes) - get_all_disabled_routes - ;; - getbrokenroutes) - echo -e "\nCreating a list of possible broken routes" - create_routes_array - echo -e "ROUTE_NAMESPACE;ROUTE_NAME;ROUTE_HOSTNAME"|column -t -s ";" - for i in "${ROUTES_ARRAY_SORTED[@]}" - do - # Tranform the item into an array - route=($(echo "$i" | tr ";" "\n")) - # Gather some useful variables - ROUTE_NAME=${route[0]} - ROUTE_HOSTNAME=${route[1]} - ROUTE_NAMESPACE=${route[2]} - echo -e "$ROUTE_NAMESPACE;$ROUTE_NAME;$ROUTE_HOSTNAME"|column -t -s ";" - done - ;; - updateroutes) - echo -e "Checking routes\n" - create_routes_array - check_routes - ;; - *) - usage - ;; - esac -} - -initial_checks "$COMMAND" -main "$COMMAND" diff --git a/helpers/k8up-initiate-archive.sh b/helpers/k8up-initiate-archive.sh deleted file mode 100755 index ba2cc4999d..0000000000 --- a/helpers/k8up-initiate-archive.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -function outputToYaml() { - IFS='' - while read data; do - echo "$data" >> /tmp/k8up-archive-initiate.yml; - done; -} - -if [ -z "$OPENSHIFT_PROJECT" ]; then - echo "OPENSHIFT_PROJECT not set" - exit 1 -fi - -if [ -z "$ARCHIVE_BUCKET" ]; then - echo "ARCHIVE_BUCKET not set" - exit 1 -fi - -set -e -o pipefail - -OC="oc" - -rm -f /tmp/k8up-archive-initiate.yml; - -echo "${OPENSHIFT_PROJECT}: starting ==================================================================" - -# Fill environment variables which are needed by exec-openshift-resources.sh and the lagoon templates -CONFIGMAP=$($OC -n $OPENSHIFT_PROJECT get configmap lagoon-env -o json) -PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_PROJECT') -SAFE_PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_SAFE_PROJECT') -BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_BRANCH') -SAFE_BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_SAFE_BRANCH') -ENVIRONMENT_TYPE=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') -LAGOON_GIT_SHA="00000000000000000000000000000000000000000" -OPENSHIFT_REGISTRY="docker-registry.default.svc:5000" -ROUTER_URL="" -SERVICE_NAME="none" - -# If restic backups are supported by this cluster we create the schedule definition -if oc get customresourcedefinition schedules.backup.appuio.ch > /dev/null; then - - # create archive only if there is a backup-schedule already existing for this project - if oc -n ${OPENSHIFT_PROJECT} get schedule backup-schedule &> /dev/null; then - - # create archive only if this is a production environment - if [[ "${ENVIRONMENT_TYPE}" == "production" ]]; then - TEMPLATE_PARAMETERS=() - - # Run Archive on Monday at 0300-0600 - ARCHIVE_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-9) 7 * *") - TEMPLATE_PARAMETERS+=(-p ARCHIVE_SCHEDULE="${ARCHIVE_SCHEDULE}") - - TEMPLATE_PARAMETERS+=(-p ARCHIVE_BUCKET="${ARCHIVE_BUCKET}") - - OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/backup-archive-schedule.yml" - . $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-resources.sh - - oc apply -n ${OPENSHIFT_PROJECT} -f /tmp/k8up-archive-initiate.yml - rm /tmp/k8up-archive-initiate.yml - else - echo "${OPENSHIFT_PROJECT}: Not production environment, not creating an archive-schedule" - fi - else - echo "${OPENSHIFT_PROJECT}: No backup-schedule found for project, not creating an archive-schedule" - fi -else - echo "k8up is not supported by this cluster" - exit 1 -fi - -echo "${OPENSHIFT_PROJECT}: done ==================================================================" diff --git a/helpers/k8up-initiate.sh b/helpers/k8up-initiate.sh deleted file mode 100755 index 9939931f12..0000000000 --- a/helpers/k8up-initiate.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -function outputToYaml() { - IFS='' - while read data; do - echo "$data" >> /tmp/k8up-initiate.yml; - done; -} - -if [ -z "$JWTSECRET" ]; then - echo "JWTSECRET not set" - exit 1 -fi - -if [ -z "$OPENSHIFT_PROJECT" ]; then - echo "OPENSHIFT_PROJECT not set" - exit 1 -fi - -set -eu -o pipefail - -OC="oc" - -echo "${OPENSHIFT_PROJECT}: starting ==================================================================" - -# Fill environment variables which are needed by exec-openshift-resources.sh and the lagoon templates -CONFIGMAP=$($OC -n $OPENSHIFT_PROJECT get configmap lagoon-env -o json) -PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_PROJECT') -SAFE_PROJECT=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_SAFE_PROJECT') -BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_BRANCH') -SAFE_BRANCH=$(echo "$CONFIGMAP" | jq -r '.data.LAGOON_GIT_SAFE_BRANCH') -LAGOON_GIT_SHA="00000000000000000000000000000000000000000" -OPENSHIFT_REGISTRY="docker-registry.default.svc:5000" -ROUTER_URL="" -SERVICE_NAME="none" - -PROJECT_SECRET=$(echo -n "$PROJECT-$JWTSECRET" | sha256sum | cut -d " " -f 1) - -# If restic backups are supported by this cluster we create the schedule definition -if oc get customresourcedefinition schedules.backup.appuio.ch > /dev/null; then - - baas_repo_pw=$(oc -n ${OPENSHIFT_PROJECT} create secret generic baas-repo-pw --from-literal=repo-pw=$(echo -n "$PROJECT_SECRET-BAAS-REPO-PW" | sha256sum | cut -d " " -f 1) -o json --dry-run) - - if ! oc -n ${OPENSHIFT_PROJECT} get secret baas-repo-pw &> /dev/null; then - # Create baas-repo-pw secret based on the project secret - echo "$baas_repo_pw" | oc -n ${OPENSHIFT_PROJECT} create -f - - else - echo "$baas_repo_pw" | oc -n ${OPENSHIFT_PROJECT} replace -f - - fi - - TEMPLATE_PARAMETERS=() - - # Run Backups every day at 2200-0200 - BACKUP_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(22-2) * * *") - TEMPLATE_PARAMETERS+=(-p BACKUP_SCHEDULE="${BACKUP_SCHEDULE}") - - # Run Checks on Sunday at 0300-0600 - CHECK_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 0") - TEMPLATE_PARAMETERS+=(-p CHECK_SCHEDULE="${CHECK_SCHEDULE}") - - # Run Prune on Saturday at 0300-0600 - PRUNE_SCHEDULE=$( $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "M H(3-6) * * 6") - TEMPLATE_PARAMETERS+=(-p PRUNE_SCHEDULE="${PRUNE_SCHEDULE}") - - OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/backup/schedule.yml" - . $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-resources.sh - - oc apply -n ${OPENSHIFT_PROJECT} -f /tmp/k8up-initiate.yml - rm /tmp/k8up-initiate.yml -else - echo "k8sup is not supported by this cluster" - exit 1 -fi - -# Disable backup of solr pvc's -if solr=$(oc -n ${OPENSHIFT_PROJECT} get pvc solr -o json 2> /dev/null) && [[ $(echo "$solr" | jq -r '.metadata.annotations."appuio.ch/backup"') != "false" ]]; then - oc -n ${OPENSHIFT_PROJECT} annotate --overwrite pvc solr appuio.ch/backup="false"; -fi - -# Enable backup of nginx pvc's -if nginx=$(oc -n ${OPENSHIFT_PROJECT} get pvc nginx -o json 2> /dev/null) && [[ $(echo "$nginx" | jq -r '.metadata.annotations."appuio.ch/backup"') != "true" ]]; then - oc -n ${OPENSHIFT_PROJECT} annotate --overwrite pvc nginx appuio.ch/backup="true"; -fi - -# Remove any backupcommand from nginx pods if they exit -if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig nginx -o json 2> /dev/null | jq -r -e '.spec.template.metadata.annotations."appuio.ch/backupcommand"' &> /dev/null; then - oc -n ${OPENSHIFT_PROJECT} patch deploymentconfig nginx --type json -p='[{"op": "remove", "path": "/spec/template/metadata/annotations/appuio.ch~1backupcommand"}]' -fi - -# add backupcommand to clis to backup mariadb -if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig cli &> /dev/null; then - oc -n ${OPENSHIFT_PROJECT} patch deploymentconfig cli -p '{"spec":{"template":{"metadata":{"annotations":{"appuio.ch/backupcommand":"/bin/sh -c \"if [[ $MARIADB_HOST ]]; then dump=$(mktemp) && mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -h $MARIADB_HOST -u $MARIADB_USERNAME -p$MARIADB_PASSWORD $MARIADB_DATABASE > $dump && cat $dump && rm $dump; fi\"", "backup.appuio.ch/file-extension": ".mysql.sql"}}}}}' || true -fi - -echo "${OPENSHIFT_PROJECT}: done ==================================================================" diff --git a/helpers/k8up-remove-prune.sh b/helpers/k8up-remove-prune.sh deleted file mode 100755 index 8403870c45..0000000000 --- a/helpers/k8up-remove-prune.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -if [ -z "$OPENSHIFT_PROJECT" ]; then - echo "OPENSHIFT_PROJECT not set" - exit 1 -fi - -set -e -o pipefail - -echo "${OPENSHIFT_PROJECT}: starting ==================================================================" - -if oc -n "${OPENSHIFT_PROJECT}" patch schedule backup-schedule --type=json -p="[{\"op\": \"remove\", \"path\": \"/spec/prune\"}]" 2>/dev/null; then - echo "${OPENSHIFT_PROJECT}: patched backup-schedule" -else - echo "${OPENSHIFT_PROJECT}: backup-schedule already patched" -fi - -echo "${OPENSHIFT_PROJECT}: done ==================================================================" diff --git a/helpers/label-namespaces.sh b/helpers/label-namespaces.sh deleted file mode 100755 index e9f1f93c8e..0000000000 --- a/helpers/label-namespaces.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -## -# Label all namespaces with lagoon info -# -# Old environments weren't labelled the way that Lagoon expects. This script -# can be run against a cluster to add the missing labels. - -set -euo pipefail -#set -x - -# Loop through all oc projects. -while read -r project ; do - - # Check if lagoon-env configmap exists. - if oc get configmap -n "$project" lagoon-env >/dev/null 2>&1; then - - echo "################################################" - echo "Annotating project: $project..." - echo "################################################" - - LAGOON_PROJECT=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_PROJECT:/ { print $2 }') - LAGOON_ENVIRONMENT_TYPE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_ENVIRONMENT_TYPE:/ { print $2 }') - LAGOON_GIT_SAFE_BRANCH=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/LAGOON_GIT_SAFE_BRANCH:/ { print $2 }') - MARIADB_DATABASE=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_DATABASE:/ { print $2 }') - MARIADB_USERNAME=$(oc get configmaps -n "$project" lagoon-env -o yaml | awk '/MARIADB_USERNAME:/ { print $2 }') - - oc label namespace "$project" "lagoon.sh/project=$LAGOON_PROJECT" --overwrite - oc label namespace "$project" "lagoon.sh/environmentType=$LAGOON_ENVIRONMENT_TYPE" --overwrite - oc label namespace "$project" "lagoon.sh/environment=$LAGOON_GIT_SAFE_BRANCH" --overwrite - oc label namespace "$project" "lagoon.sh/mariadb-schema=$MARIADB_DATABASE" --overwrite - oc label namespace "$project" "lagoon.sh/mariadb-username=$MARIADB_USERNAME" --overwrite - else - - echo "No lagoon-env configmap found for $project" - - fi - -done < <(oc get ns -l '!lagoon.sh/project' | sed '1d' | awk '{print $1}') diff --git a/helpers/lagoon-sync.sh b/helpers/lagoon-sync.sh deleted file mode 100644 index 38316b9097..0000000000 --- a/helpers/lagoon-sync.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash - -# to create serviceaccounts: -# oc -n $namespace create serviceaccount lagoon-sync -# oc -n $namespace adm policy add-role-to-user edit -z lagoon-sync -# oc -n $namespace serviceaccounts get-token lagoon-sync - -set -eu -o pipefail - -#SOURCE_CONSOLE="" -#SOURCE_NAMESPACE="" -#SOURCE_SERVICEACCOUNT_TOKEN="" - -#DESTINATION_CONSOLE="" -#DESTINATION_NAMESPACE="" -#DESTINATION_SERVICEACCOUNT_TOKEN="" - -if [ -z "$SOURCE_CONSOLE" ]; then - echo "SOURCE_CONSOLE not set" - exit 1 -fi - -if [ -z "$DESTINATION_CONSOLE" ]; then - echo "DESTINATION_CONSOLE not set" - exit 1 -fi - -if [ -z "$SOURCE_SERVICEACCOUNT_TOKEN" ]; then - echo "SOURCE_SERVICEACCOUNT_TOKEN not set" - exit 1 -fi - -if [ -z "$DESTINATION_SERVICEACCOUNT_TOKEN" ]; then - echo "DESTINATION_SERVICEACCOUNT_TOKEN not set" - exit 1 -fi - -if [ -z "$SOURCE_NAMESPACE" ]; then - echo "SOURCE_NAMESPACE not set" - exit 1 -fi - -if [ -z "$DESTINATION_NAMESPACE" ]; then - echo "DESTINATION_NAMESPACE not set" - exit 1 -fi - -echo "SOURCE_CONSOLE: $SOURCE_CONSOLE" -echo "SOURCE_NAMESPACE: $SOURCE_NAMESPACE" -echo "DESTINATION_CONSOLE: $DESTINATION_CONSOLE" -echo "DESTINATION_NAMESPACE: $DESTINATION_NAMESPACE" - -set -v - -mkdir -p /tmp/lagoon-sync/backup - -oc login $SOURCE_CONSOLE --token=$SOURCE_SERVICEACCOUNT_TOKEN -source_context=$(oc config current-context) - -oc login $DESTINATION_CONSOLE --token=$DESTINATION_SERVICEACCOUNT_TOKEN -destination_context=$(oc config current-context) - -source_api_db_pod=$(oc --context=$source_context -n $SOURCE_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=api-db) -oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- /lagoon/mysql-backup.sh 127.0.0.1 || true -source_api_db_backup=$(oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- sh -c "find . -name \"*.sql.gz\" -print0 | xargs -r -0 ls -1 -t | head -1") -oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_api_db_pod -- cat $source_api_db_backup > /tmp/lagoon-sync/$source_api_db_backup - - -destination_api_db_pod=$(oc --context=$destination_context -n $DESTINATION_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=api-db) -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "mkdir -p backup" -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "cat > $source_api_db_backup" < /tmp/lagoon-sync/$source_api_db_backup -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- sh -c "zcat $source_api_db_backup | mysql infrastructure" - - -source_keycloak_db_pod=$(oc --context=$source_context -n $SOURCE_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=keycloak-db) -oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- /lagoon/mysql-backup.sh 127.0.0.1 -source_keycloak_db_backup=$(oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- sh -c "find . -name \"*.sql.gz\" -print0 | xargs -r -0 ls -1 -t | head -1") -oc --context=$source_context -n $SOURCE_NAMESPACE exec $source_keycloak_db_pod -- cat $source_keycloak_db_backup > /tmp/lagoon-sync/$source_keycloak_db_backup - -destination_keycloak_db_pod=$(oc --context=$destination_context -n $DESTINATION_NAMESPACE get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=keycloak-db) -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "mkdir -p backup" -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "cat > $source_keycloak_db_backup" < /tmp/lagoon-sync/$source_keycloak_db_backup -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_keycloak_db_pod -- sh -c "zcat $source_keycloak_db_backup | mysql keycloak" - - -oc --context=$destination_context -n $DESTINATION_NAMESPACE rollout latest dc/keycloak -oc --context=$destination_context -n $DESTINATION_NAMESPACE rollout latest dc/api - -oc --context=$destination_context -n $DESTINATION_NAMESPACE exec -i $destination_api_db_pod -- /rerun_initdb.sh - - diff --git a/helpers/mariadb-galera2shared.sh b/helpers/mariadb-galera2shared.sh deleted file mode 100755 index fffab3c708..0000000000 --- a/helpers/mariadb-galera2shared.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/bash - - - -if [ ! "$1" ]; then - echo "please define openshift project as first argument" - exit 1; -fi - -set -uo pipefail - -which shyaml > /dev/null -if [ $? -gt 0 ]; then - echo "please install shyaml (pip3 install shyaml)" - exit 1 -fi - -which jq > /dev/null -if [ $? -gt 0 ]; then - echo "please install jq" - exit 1 -fi - -which svcat > /dev/null -if [ $? -gt 0 ]; then - echo "please install svcat" - exit 1 -fi - -set -e - -PROJECT_NAME=$1 - -echo "*** Starting mariadb-galera --> mariadb-shared migration in ${PROJECT_NAME}" - -SERVICE_NAME=mariadb -SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) -SERVICE_TYPE=mariadb-shared - -ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') - -OLD_POD="mariadb-galera-0" - -if [[ "$OLD_POD" ]]; then - echo "found $SERVICE_NAME pod $OLD_POD" -else - echo "no running pod found for service '${SERVICE_NAME}'', is it running?" - exit 1 -fi - -echo "*** Pausing nginx and cli" -NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas') -CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas') -oc -n $1 scale dc/nginx --replicas=0 -oc -n $1 scale dc/cli --replicas=0 - - -# create service broker -## taken from build-deploy-docker-compose.sh - -OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" -SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" -SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" -OPENSHIFT_PROJECT=$1 -. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh - -# ServiceBrokers take a bit, wait until the credentials secret is available -until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -do - echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" - sleep 10 -done - -# Load credentials out of secret -SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml -oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS - -DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) - -echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" -# transfer database between from old to new -oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" - -CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" -echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" -oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK - -echo "*** updating configmap to point to ${DB_HOST}." -# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker -oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ - -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" - - -echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" -oc -n $1 delete service mariadb -oc -n $1 scale dc/mariadb-maxscale --replicas=0 -oc -n $1 scale statefulset/mariadb-galera --replicas=0 - - -# transfer complete, clean up -rm -f $SECRETS - -oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS -oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS - -oc -n $1 rollout latest dc/nginx -oc -n $1 rollout latest dc/cli -oc -n $1 rollout status dc/nginx -oc -n $1 rollout status dc/cli -echo "*** done." diff --git a/helpers/mariadb-single2shared-no-nginx.sh b/helpers/mariadb-single2shared-no-nginx.sh deleted file mode 100755 index 8b59b98a62..0000000000 --- a/helpers/mariadb-single2shared-no-nginx.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash - - - -if [ ! "$1" ]; then - echo "please define openshift project as first argument" - exit 1; -fi - -set -uo pipefail - -which shyaml > /dev/null -if [ $? -gt 0 ]; then - echo "please install shyaml (pip3 install shyaml)" - exit 1 -fi - -which jq > /dev/null -if [ $? -gt 0 ]; then - echo "please install jq" - exit 1 -fi - -which svcat > /dev/null -if [ $? -gt 0 ]; then - echo "please install svcat" - exit 1 -fi - -set -e - -PROJECT_NAME=$1 - -echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}" - -SERVICE_NAME=mariadb -SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) -SERVICE_TYPE=mariadb-shared - -ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') - -MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas') - -if [ "$MARIADB_REPLICAS" == "0" ]; then - oc -n $1 scale dc/mariadb --replicas=1 - oc -n $1 rollout status dc/mariadb -fi - -# export old mariadb pod name -OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME) - -if [[ "$OLD_POD" ]]; then - echo "found $SERVICE_NAME pod $OLD_POD" -else - echo "no running pod found for service '${SERVICE_NAME}'', is it running?" - exit 1 -fi - -# create service broker -## taken from build-deploy-docker-compose.sh - -OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" -SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" -SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" -OPENSHIFT_PROJECT=$1 -. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh - -# ServiceBrokers take a bit, wait until the credentials secret is available -until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -do - echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" - sleep 10 -done - -# Load credentials out of secret -SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml -oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS - -DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) - -echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" -# transfer database between from old to new -oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db \$MARIADB_DATABASE || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" - -CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" -echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" -oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK - -echo "*** updating configmap to point to ${DB_HOST}." -# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker -oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ - -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" - - -echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" -oc -n $1 delete service mariadb -oc -n $1 scale dc/mariadb --replicas=0 - -# transfer complete, clean up -rm -f $SECRETS - -echo "*** done." diff --git a/helpers/mariadb-single2shared-wordpress.sh b/helpers/mariadb-single2shared-wordpress.sh deleted file mode 100755 index 13461cfbf5..0000000000 --- a/helpers/mariadb-single2shared-wordpress.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - - - -if [ ! "$1" ]; then - echo "please define openshift project as first argument" - exit 1; -fi - -set -uo pipefail - -which shyaml > /dev/null -if [ $? -gt 0 ]; then - echo "please install shyaml (pip3 install shyaml)" - exit 1 -fi - -which jq > /dev/null -if [ $? -gt 0 ]; then - echo "please install jq" - exit 1 -fi - -which svcat > /dev/null -if [ $? -gt 0 ]; then - echo "please install svcat" - exit 1 -fi - -set -e - -PROJECT_NAME=$1 - -echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}" - -SERVICE_NAME=mariadb -SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) -SERVICE_TYPE=mariadb-shared - -ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') - -MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas') - -if [ "$MARIADB_REPLICAS" == "0" ]; then - oc -n $1 scale dc/mariadb --replicas=1 - oc -n $1 rollout status dc/mariadb -fi - -# export old mariadb pod name -OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME) - -if [[ "$OLD_POD" ]]; then - echo "found $SERVICE_NAME pod $OLD_POD" -else - echo "no running pod found for service '${SERVICE_NAME}'', is it running?" - exit 1 -fi - -echo "*** Pausing nginx and cli" -NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas') -CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas') -oc -n $1 scale dc/nginx --replicas=0 -oc -n $1 scale dc/cli --replicas=0 - - -# create service broker -## taken from build-deploy-docker-compose.sh - -OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" -SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" -SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" -OPENSHIFT_PROJECT=$1 -. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh - -# ServiceBrokers take a bit, wait until the credentials secret is available -until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -do - echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" - sleep 10 -done - -# Load credentials out of secret -SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml -oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS - -DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) - -echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" -# transfer database between from old to new -oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db lagoon || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" - -CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" -echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" -oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK - -echo "*** updating configmap to point to ${DB_HOST}." -# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker -oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ - -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" - - -echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" -oc -n $1 delete service mariadb -oc -n $1 scale dc/mariadb --replicas=0 - -# transfer complete, clean up -rm -f $SECRETS - -oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS -oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS - -oc -n $1 rollout latest dc/nginx -oc -n $1 rollout latest dc/cli -oc -n $1 rollout status dc/nginx -oc -n $1 rollout status dc/cli -echo "*** done." diff --git a/helpers/mariadb-single2shared.sh b/helpers/mariadb-single2shared.sh deleted file mode 100755 index 9a281271bd..0000000000 --- a/helpers/mariadb-single2shared.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - - - -if [ ! "$1" ]; then - echo "please define openshift project as first argument" - exit 1; -fi - -set -uo pipefail - -which shyaml > /dev/null -if [ $? -gt 0 ]; then - echo "please install shyaml (pip3 install shyaml)" - exit 1 -fi - -which jq > /dev/null -if [ $? -gt 0 ]; then - echo "please install jq" - exit 1 -fi - -which svcat > /dev/null -if [ $? -gt 0 ]; then - echo "please install svcat" - exit 1 -fi - -set -e - -PROJECT_NAME=$1 - -echo "*** Starting mariadb-single --> mariadb-shared migration in ${PROJECT_NAME}" - -SERVICE_NAME=mariadb -SERVICE_NAME_UPPERCASE=$(echo $SERVICE_NAME | tr [:lower:] [:upper:]) -SERVICE_TYPE=mariadb-shared - -ENVIRONMENT_TYPE=$(oc -n $1 get configmap lagoon-env -o json | jq -r '.data.LAGOON_ENVIRONMENT_TYPE') - -MARIADB_REPLICAS=$(oc -n $1 get dc/mariadb -o json | jq -r '.spec.replicas') - -if [ "$MARIADB_REPLICAS" == "0" ]; then - oc -n $1 scale dc/mariadb --replicas=1 - oc -n $1 rollout status dc/mariadb -fi - -# export old mariadb pod name -OLD_POD=$(oc -n $1 get pod -o custom-columns=NAME:.metadata.name --no-headers -l service=$SERVICE_NAME) - -if [[ "$OLD_POD" ]]; then - echo "found $SERVICE_NAME pod $OLD_POD" -else - echo "no running pod found for service '${SERVICE_NAME}'', is it running?" - exit 1 -fi - -echo "*** Pausing nginx and cli" -NGINX_REPLICAS=$(oc -n $1 get dc/nginx -o json | jq -r '.spec.replicas') -CLI_REPLICAS=$(oc -n $1 get dc/cli -o json | jq -r '.spec.replicas') -oc -n $1 scale dc/nginx --replicas=0 -oc -n $1 scale dc/cli --replicas=0 - - -# create service broker -## taken from build-deploy-docker-compose.sh - -OPENSHIFT_TEMPLATE="$(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/openshift-templates/${SERVICE_TYPE}/servicebroker.yml" -SERVICEBROKER_CLASS="lagoon-dbaas-mariadb-apb" -SERVICEBROKER_PLAN="${ENVIRONMENT_TYPE}" -OPENSHIFT_PROJECT=$1 -. $(git rev-parse --show-toplevel)/images/oc-build-deploy-dind/scripts/exec-openshift-create-servicebroker.sh - -# ServiceBrokers take a bit, wait until the credentials secret is available -until oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -do - echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 10 secs" - sleep 10 -done - -# Load credentials out of secret -SECRETS=/tmp/${PROJECT_NAME}-${OLD_POD}-migration.yaml -oc -n $1 get --insecure-skip-tls-verify secret ${SERVICE_NAME}-servicebroker-credentials -o yaml > $SECRETS - -DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) - -echo "*** Transfering 'drupal' database from $OLD_POD to $DB_HOST" -# transfer database between from old to new -oc -n $1 exec $OLD_POD -- bash -eo pipefail -c "{ mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db drupal || mysqldump --max-allowed-packet=500M --events --routines --quick --add-locks --no-autocommit --single-transaction --no-create-db -S /tmp/mysql.sock -u \$MYSQL_USER -p\$MYSQL_PASSWORD \$MYSQL_DATABASE; } | sed -e 's/DEFINER[ ]*=[ ]*[^*]*\*/\*/' | mysql -h $DB_HOST -u $DB_USER -p${DB_PASSWORD} -P $DB_PORT $DB_NAME" - -CONFIG_BAK="/tmp/${PROJECT_NAME}-$(date +%F-%T)-lagoon-env.yaml" -echo "*** Backing up configmap in case we need to revert: ${CONFIG_BAK}" -oc -n $1 get configmap lagoon-env -o yaml > $CONFIG_BAK - -echo "*** updating configmap to point to ${DB_HOST}." -# Add credentials to our configmap, prefixed with the name of the servicename of this servicebroker -oc -n $1 patch --insecure-skip-tls-verify configmap lagoon-env \ - -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" - - -echo "*** Deleting mariadb service. Scaling old mariadb to 0; you can clean up the DC and pv later" -oc -n $1 delete service mariadb -oc -n $1 scale dc/mariadb --replicas=0 - -# transfer complete, clean up -rm -f $SECRETS - -oc -n $1 scale dc/nginx --replicas=$NGINX_REPLICAS -oc -n $1 scale dc/cli --replicas=$CLI_REPLICAS - -oc -n $1 rollout latest dc/nginx -oc -n $1 rollout latest dc/cli -oc -n $1 rollout status dc/nginx -oc -n $1 rollout status dc/cli -echo "*** done." diff --git a/helpers/migrate-resize-pv-nginx.sh b/helpers/migrate-resize-pv-nginx.sh deleted file mode 100755 index d3976d1d25..0000000000 --- a/helpers/migrate-resize-pv-nginx.sh +++ /dev/null @@ -1,222 +0,0 @@ -#!/bin/bash - -set -e -o pipefail - -# use oc -OC=oc - -usage() { - echo "Usage: ./migrate-resize-pv-nginx.sh -p solr -s 20Gi -d nginx,cli -n solr-namespace -c gp2 -m gluster" - echo "WARNING: Specify the storageclass(-m) for the migrator pvc to be created in, must be multi-az mountable" - echo " otherwise loss of data can occur" - echo "Options:" - echo " -m #required, should be a storageclass that is multi-az mountable, eg gluster,efs,etc.." - echo " -p #required" - echo " -s #optional, set to the size you want to resize it to, defaults to original requested claim" - echo " -d #required, separate with commas to define multiple deploymentconfigs" - echo " -n #required" - echo " -c #optional, change the storage class of the new migrated/resized pv" - exit 1 -} - -if [[ ! $@ =~ ^\-.+ ]] -then - usage -fi - -while getopts ":p:d:s:n:c:m:h:" opt; do - case ${opt} in - p ) # process option p - PVC=$OPTARG;; - d ) # process option d - DC=$OPTARG;; - s ) # process option s - PVSIZE=$OPTARG;; - n ) # process option n - NS=$OPTARG;; - c ) # process option c - SC=$OPTARG;; - m ) # process option m - MIGRATOR_SC=$OPTARG;; - h ) - usage;; - *) - usage;; - esac -done - -# need these, make sure we have them -if [[ -z "$PVC" || -z "$DC" || -z "$NS" || -z "$MIGRATOR_SC" ]]; then - usage -fi - -# convert given DC into an array -IFS=',' read -r -a DC_ARRAY <<< "$DC" - -# check if the storage class exists if a request to change is made -if [ ! -z "$SC" ]; then - SC_EXIST=$(${OC} -n ${NS} get sc ${SC} -o name --no-headers) - if [ "$SC_EXIST" = "" ]; then - exit 1 - fi -fi -# check if the migrator storage class exists too -if [ ! -z "$MIGRATOR_SC" ]; then - MIGRATOR_SC_EXIST=$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o name --no-headers) - if [ "$MIGRATOR_SC_EXIST" = "" ]; then - exit 1 - fi -fi -if [ "$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o json | jq -r .provisioner)" == "kubernetes.io/aws-ebs" ]; then - echo "You are using ${MIGRATOR_SC} which uses aws-ebs. This may result in loss of data if the pvc is created in a different az to the migrator pod." - read -p "Are you sure? " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]] - then - echo "Proceeding" - else - exit 1 - fi -fi - -PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC} -o name --no-headers) -if [ "$PVC_EXIST" = "" ]; then - exit 1 -else - # get the existing size of the PV - OLDSIZE=$(${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq -r '.spec.resources.requests.storage') - if [ -z "$PVSIZE" ]; then - echo "using existing PV size when migrating - $OLDSIZE" - #if a new size is not defined, use the original size when creating the new pv - PVSIZE=$OLDSIZE - else - if [ "$PVSIZE" != "$OLDSIZE" ]; then - echo "migrated PV will be created with the new size $PVSIZE" - fi - fi - - # cleanup objects in case they already exist. - ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvc-migrator || true - ${OC} -n ${NS} delete serviceaccount pvc-migrator || true - ${OC} -n ${NS} delete deploymentconfig/pv-migrator || true - #${OC} -n ${NS} delete pvc/${PVC}-migrator --wait || true - -# create the migrator pvc early and fail if it can't be created -cat << EOF | ${OC} -n ${NS} apply -f - - apiVersion: v1 - items: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ${PVC}-migrator - spec: - storageClassName: ${MIGRATOR_SC} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: $OLDSIZE - kind: List - metadata: {} -EOF - MIGRATOR_PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC}-migrator -o name --no-headers) - if [ "$PVC_EXIST" = "" ]; then - exit 1 - fi - - - - # create a svc account - ${OC} -n ${NS} create serviceaccount pvc-migrator - ${OC} -n ${NS} adm policy add-scc-to-user privileged -z pvc-migrator - - # run alpine base - ${OC} -n ${NS} run --image alpine pv-migrator -- sh -c "apk add --no-cache rsync; trap : TERM INT; (while true; do sleep 3600; done) & wait" - # pause the rollout to allow making multiple changes on the deploymentconfig - ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator - # change serviceaccount name so i can run as privileged - ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvc-migrator"}}}}' - # now run as root - ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}' - echo "adding ${PVC} to pv-migrator." - ${OC} -n ${NS} set volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage - # add migration pvc to migrator - ${OC} -n ${NS} set volume deploymentconfig/pv-migrator --add --name=${PVC}-migrator --type=persistentVolumeClaim --claim-name=${PVC}-migrator --mount-path=/migrator - ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator - ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch - - # check if the migrator pod is actually running - MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - if [[ ! $MIGRATOR ]]; then - echo "No running pod found for migrator" - exit 1 - fi - - echo "copy ${PVC} to ${PVC}-migrator" - ${OC} -n ${NS} exec $MIGRATOR -- rsync -av -W --inplace --delete --exclude='/css/' --exclude='/js/' --exclude='/advagg_css/' --exclude='/advagg_js/' --exclude='/styles/' --exclude='/php/' --info=progress2 /storage/. /migrator - - # update actual production pods with migrator PVC (this allows them to keep running while we migrate a second time) - for DC in "${DC_ARRAY[@]}" - do - ${OC} -n ${NS} set volume deploymentconfig/${DC} --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC}-migrator --overwrite - done - for DC in "${DC_ARRAY[@]}" - do - ${OC} -n ${NS} rollout status deploymentconfig/${DC} --watch - done - - TMP=$(mktemp temp.${PVC}.json.XXXX) - - echo "dumping pvc ${PVC} to ${TMP}." - ## we can change the storage class instead of using the default - if [ ! -z "$SC" ]; then - ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' | jq --arg SC "${SC}" '.spec.storageClassName=$SC' > $TMP - else - ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' > $TMP - fi - - # scale down migrator to change the volumes on it - ${OC} -n ${NS} scale --replicas=0 deploymentconfig/pv-migrator - # remove the original PVC from the migrator - - # remove the original PVC now that we have migrated everything to the PVC-migrator, we call `--wait` to make sure the PVC really has been deleted - ${OC} -n ${NS} delete pvc/${PVC} --wait - - # recreate the PVC based on what we dumped before - ${OC} -n ${NS} create -f $TMP && rm $TMP - - # check if deploymenconfig has at least 1 ready pod, if not, scale and check again in 3 secounds. - while [[ $(${OC} -n ${NS} get deploymentconfig/pv-migrator -o go-template --template='{{.status.readyReplicas}}') = "" ]] || [[ $(${OC} -n ${NS} get deploymentconfig/pv-migrator -o go-template --template='{{.status.readyReplicas}}') = "0" ]] - do - # Sending the scaling command while it already scaling is no problem for the Kubernetes API - ${OC} -n ${NS} scale --replicas=1 deploymentconfig/pv-migrator - sleep 3 - done - - MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - if [[ ! $MIGRATOR ]]; then - echo "No running pod found for migrator" - exit 1 - fi - - # copy data from the pvc-migrator to the newly created pvc - ${OC} -n ${NS} exec $MIGRATOR -- rsync -av -W --inplace --delete --exclude='/css/' --exclude='/js/' --exclude='/advagg_css/' --exclude='/advagg_js/' --info=progress2 --exclude='/styles/' --exclude='/php/' /migrator/. /storage - - # updating the production pods with the copied storage again - for DC in "${DC_ARRAY[@]}" - do - ${OC} -n ${NS} set volume deploymentconfig/${DC} --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --overwrite - done - for DC in "${DC_ARRAY[@]}" - do - ${OC} -n ${NS} rollout status deploymentconfig/${DC} --watch - done - - # delete the migrator DC and PVC - ${OC} -n ${NS} delete deploymentconfig/pv-migrator - ${OC} -n ${NS} delete pvc/${PVC}-migrator - - # cleanup serviceaccounts - ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvc-migrator - ${OC} -n ${NS} delete serviceaccount pvc-migrator -fi diff --git a/helpers/migrate-resize-pv.sh b/helpers/migrate-resize-pv.sh deleted file mode 100755 index 40ff2fc429..0000000000 --- a/helpers/migrate-resize-pv.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash - -# use oc -OC=oc - -usage() { - echo "Usage: ./migrate-resize-pv.sh -p solr -s 20Gi -d solr -n solr-namespace -c gp2 -m gluster" - echo "WARNING: Specify the storageclass(-m) for the migrator pvc to be created in, must be multi-az mountable" - echo " otherwise loss of data can occur" - echo "Options:" - echo " -m #required, should be a storageclass that is multi-az mountable, eg gluster,efs,etc.." - echo " -p #required" - echo " -s #optional, set to the size you want to resize it to, defaults to original requested claim" - echo " -d #required" - echo " -n #required" - echo " -c #optional, change the storage class of the new migrated/resized pv" - exit 1 -} - -if [[ ! $@ =~ ^\-.+ ]] -then - usage -fi - -while getopts ":p:d:s:n:c:m:h:" opt; do - case ${opt} in - p ) # process option p - PVC=$OPTARG;; - d ) # process option d - DC=$OPTARG;; - s ) # process option s - PVSIZE=$OPTARG;; - n ) # process option n - NS=$OPTARG;; - c ) # process option c - SC=$OPTARG;; - m ) # process option m - MIGRATOR_SC=$OPTARG;; - h ) - usage;; - *) - usage;; - esac -done - -# echo "Select which storage class is multi-az mountable, or exit:" -# COLUMNS=1 -# resourcelist=$(${OC} get sc --no-headers | awk '{print $1}') -# select opt in $(echo ${resourcelist} | tr -s " " "\n") "Q) exit" -# do -# if [[ "$opt" == "Q) exit-mach" || $REPLY == [Qq] ]]; then -# echo "Exiting" -# exit 1 -# fi -# MIGRATOR_SC=$opt -# break -# done - -# need these, make sure we have them -if [[ -z "$PVC" || -z "$DC" || -z "$NS" || -z "$MIGRATOR_SC" ]]; then - usage -fi - -# check if the storage class exists if a request to change is made -if [ ! -z "$SC" ]; then - SC_EXIST=$(${OC} -n ${NS} get sc ${SC} -o name --no-headers) - if [ "$SC_EXIST" = "" ]; then - exit 1 - fi -fi -# check if the migrator storage class exists too -if [ ! -z "$MIGRATOR_SC" ]; then - MIGRATOR_SC_EXIST=$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o name --no-headers) - if [ "$MIGRATOR_SC_EXIST" = "" ]; then - exit 1 - fi -fi -if [ "$(${OC} -n ${NS} get sc ${MIGRATOR_SC} -o json | jq -r .provisioner)" == "kubernetes.io/aws-ebs" ]; then - echo "You are using ${MIGRATOR_SC} which uses aws-ebs. This may result in loss of data if the pvc is created in a different az to the migrator pod." - read -p "Are you sure? " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]] - then - echo "Proceeding" - else - exit 1 - fi -fi - -PVC_EXIST=$(${OC} -n ${NS} get pvc ${PVC} -o name --no-headers) -if [ "$PVC_EXIST" = "" ]; then - exit 1 -else - # get the existing size of the PV - OLDSIZE=$(${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq -r '.spec.resources.requests.storage') - if [ -z "$PVSIZE" ]; then - echo "using existing PV size when migrating - $OLDSIZE" - #if a new size is not defined, use the original size when creating the new pv - PVSIZE=$OLDSIZE - else - if [ "$PVSIZE" != "$OLDSIZE" ]; then - echo "migrated PV will be created with the new size $PVSIZE" - fi - fi - -# create the migrator pvc early and fail if it can't be created -cat << EOF | ${OC} -n ${NS} apply -f - - apiVersion: v1 - items: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: migrator - spec: - storageClassName: ${MIGRATOR_SC} - accessModes: - - ReadWriteOnce - resources: - requests: - storage: $OLDSIZE - kind: List - metadata: {} -EOF - MIGRATOR_PVC_EXIST=$(${OC} -n ${NS} get pvc migrator -o name --no-headers) - if [ "$PVC_EXIST" = "" ]; then - exit 1 - fi - - # create a svc account - ${OC} -n ${NS} create serviceaccount pvcreclaim - ${OC} -n ${NS} adm policy add-scc-to-user privileged -z pvcreclaim - # scale the DC to 0 - ${OC} -n ${NS} scale --replicas=0 dc/${DC} - # run alpine base - ${OC} -n ${NS} run --image alpine pv-migrator -- sh -c "while sleep 3600; do :; done" - # change serviceaccount name so i can run as privileged - ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvcreclaim"}}}}' - # now run as root - ${OC} -n ${NS} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}' - # pause the rollout - ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator - echo "adding ${PVC} to pv-migrator." - ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC} - - - - ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator - ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator - ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch - - # - MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - if [[ ! $MIGRATOR ]]; then - echo "No running pod found for migrator" - exit 1 - fi - - echo "copy ${PVC} to storage" - ${OC} -n ${NS} exec $MIGRATOR -- cp -Rpav /storage/${PVC} /migrator/ - - TMP=$(mktemp temp.${PVC}.json.XXXX) - - echo "dumping pvc ${PVC} to ${TMP}." - ## we can change the storage class instead of using the default - if [ ! -z "$SC" ]; then - ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' | jq --arg SC "${SC}" '.spec.storageClassName=$SC' > $TMP - else - ${OC} -n ${NS} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' | jq --arg PVSIZE "${PVSIZE}" '.spec.resources.requests.storage=$PVSIZE' > $TMP - fi - - ${OC} -n ${NS} rollout pause deploymentconfig/pv-migrator - - ${OC} -n ${NS} volume deploymentconfig/pv-migrator --remove --name=${PVC} - ${OC} -n ${NS} delete pvc/${PVC} - ${OC} -n ${NS} create -f $TMP && rm $TMP - ${OC} -n ${NS} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC} - - ${OC} -n ${NS} rollout resume deploymentconfig/pv-migrator - ${OC} -n ${NS} rollout status deploymentconfig/pv-migrator --watch - - MIGRATOR=$(${OC} -n ${NS} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - - ${OC} -n ${NS} exec $MIGRATOR -- cp -Rpav /migrator/${PVC} /storage/ - ${OC} -n ${NS} exec $MIGRATOR -- ls -la /storage/${PVC} - - ${OC} -n ${NS} delete deploymentconfig/pv-migrator - ${OC} -n ${NS} delete pvc/migrator - ${OC} -n ${NS} scale --replicas=1 dc/${DC} - - ${OC} -n ${NS} adm policy remove-scc-from-user privileged -z pvcreclaim - ${OC} -n ${NS} delete serviceaccount pvcreclaim -fi diff --git a/helpers/nginx-healthchecks.sh b/helpers/nginx-healthchecks.sh deleted file mode 100755 index 9dde57a5ab..0000000000 --- a/helpers/nginx-healthchecks.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -if [ -z "$OPENSHIFT_PROJECT" ]; then - echo "OPENSHIFT_PROJECT not set" - exit 1 -fi - -set -eu -o pipefail - -OC="oc" - -echo "${OPENSHIFT_PROJECT}: starting ==================================================================" - -# Remove any backupcommand from nginx pods if they exit -if oc -n ${OPENSHIFT_PROJECT} get deploymentconfig nginx -o yaml --ignore-not-found | grep -q php &> /dev/null; then - oc -n ${OPENSHIFT_PROJECT} patch dc/nginx --patch '{"spec":{"template":{"spec":{"containers":[{"name":"php","livenessProbe":{"$patch":"replace","tcpSocket":{"port":9000},"initialDelaySeconds":60,"periodSeconds":10},"readinessProbe":{"$patch":"replace","tcpSocket":{"port":9000},"initialDelaySeconds":2,"periodSeconds":10}}]}}}}' || true - oc -n ${OPENSHIFT_PROJECT} rollout status --watch dc/nginx -fi - - -echo "${OPENSHIFT_PROJECT}: done ==================================================================" diff --git a/helpers/reclaim-pv.sh b/helpers/reclaim-pv.sh deleted file mode 100755 index 53ad5bfba2..0000000000 --- a/helpers/reclaim-pv.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash - -# written for openshift 3.7; small changes may be required for other versions. -# -# usage ./reclaim-pv.sh -# -# using the current openshift server and namepsace this script will: -# 1. scale all deployments to zero pods -# 2. create a pod and attach all temporary pvc. -# 3. attach all other pvcs in the namepace current claims to this pod. -# 4. for each pvc, -# copy the contents to temporary pvc, recreate the claim. -# this allows for the prefered pv to be used -# attach the newly created pvc, copy contents back to it -# 6. clean up - -OC=oc -PVCS=($(${OC} get pvc -o name | sed 's/persistentvolumeclaims\///')) - -if [[ $# -gt 0 ]]; then - unset PVCS - PVCS=("${BASH_ARGV[@]}") -fi - -if [[ ! ${#PVCS[@]} -gt 0 ]]; then - echo "no PVCs found." - -else - ${OC} create serviceaccount pvcreclaim - ${OC} adm policy add-scc-to-user privileged -z pvcreclaim - - ${OC} get dc -o name --no-headers | xargs -P3 -n1 ${OC} scale --replicas=0 - - ${OC} run --image alpine pv-migrator -- sh -c "while sleep 3600; do :; done" - - # change serviceaccount name so i can run as privileged - ${OC} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "pvcreclaim"}}}}' - # now run as root - ${OC} patch deploymentconfig/pv-migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}' - - ${OC} rollout pause deploymentconfig/pv-migrator - - for PVC in "${PVCS[@]}" - do - echo "adding ${PVC} to pv-migrator." - ${OC} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC} - done - -cat << EOF | ${OC} apply -f - - apiVersion: v1 - items: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: migrator - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - kind: List - metadata: {} -EOF - - - ${OC} volume deploymentconfig/pv-migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator - - ${OC} rollout resume deploymentconfig/pv-migrator - ${OC} rollout status deploymentconfig/pv-migrator --watch - - # - MIGRATOR=$(${OC} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - #MIGRATOR=$(${OC} get pod -o custom-columns=NAME:.metadata.name --no-headers -l run=pv-migrator) - if [[ ! $MIGRATOR ]]; then - echo "No running pod found for migrator" - exit 1 - fi - - for PVC in "${PVCS[@]}" - do - echo "copy ${PVC} to storage" - ${OC} exec $MIGRATOR -- cp -Rpav /storage/${PVC} /migrator/ - - TMP=$(mktemp temp.${PVC}.json.XXXX) - - echo "dumping pvc ${PVC} to ${TMP}." - ${OC} get -o json pvc/${PVC} --export=true | jq 'del(.metadata.annotations, .metadata.selfLink, .spec.volumeName, .spec.storageClassName, .status)' > $TMP - - - ${OC} rollout pause deploymentconfig/pv-migrator - - ${OC} volume deploymentconfig/pv-migrator --remove --name=${PVC} - ${OC} delete pvc/${PVC} - ${OC} create -f $TMP && rm $TMP - ${OC} volume deploymentconfig/pv-migrator --add --name=${PVC} --type=persistentVolumeClaim --claim-name=${PVC} --mount-path=/storage/${PVC} - - ${OC} rollout resume deploymentconfig/pv-migrator - ${OC} rollout status deploymentconfig/pv-migrator --watch - - - MIGRATOR=$(${OC} get pods -l run=pv-migrator -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - - ${OC} exec $MIGRATOR -- cp -Rpav /migrator/${PVC} /storage/ - ${OC} exec $MIGRATOR -- ls -la /storage/${PVC} - - - - - done - - ${OC} delete deploymentconfig/pv-migrator - ${OC} delete pvc/migrator - ${OC} get dc -o name --no-headers | xargs -P3 -n1 ${OC} scale --replicas=1 - - ${OC} adm policy remove-scc-from-user privileged -z pvcreclaim - ${OC} delete serviceaccount pvcreclaim - -fi diff --git a/helpers/run-in-all-lagoon-projects.sh b/helpers/run-in-all-lagoon-projects.sh deleted file mode 100755 index dbb82b969b..0000000000 --- a/helpers/run-in-all-lagoon-projects.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e -o pipefail - -oc get configmaps --all-namespaces --no-headers | grep lagoon-env | awk '{ print $1 }' | while read OPENSHIFT_PROJECT; do - REGEX=${REGEX:-.*} - if [[ $OPENSHIFT_PROJECT =~ $REGEX ]]; then - . "$1" - fi -done diff --git a/helpers/shared-cleanup.sh b/helpers/shared-cleanup.sh deleted file mode 100755 index 6dedc3e62d..0000000000 --- a/helpers/shared-cleanup.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -# this script will assumed you're logged into an openshift cluster locally. -# and that you can connect directly to the database servers listed in DB_HOST -# on port 3306 with a .my.cnf that allows you to run -# non-interactive mysql commands. - -# use oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -# if the database is not directly connectable, an ssh tunnel can be used: -# ~/.my.cnf-mysql-development-cluster.cluster-xxx.rds.amazonaws.com -# [client] -# host=127.0.0.1 -# port=33007 -# user=root -# password=af105380aa4a2f034a083daeb9ed27b7a8395a44 - -# ssh -L 33007:mysql-development-cluster.cluster-xxx.rds.amazonaws.com:3306 infra1.cluster1.amazee.io - -# after running this script, the user will be presented with a list of -# databases that are probably ok to remove. - -set -euo pipefail - -for util in oc jq mysql; do - if ! command -v ${util} > /dev/null; then - echo "please install ${util}" - exit 1 - fi -done - -# Colours. -shw_grey () { - echo $(tput bold)$(tput setaf 0) $@ $(tput sgr 0) -} -shw_norm () { - echo $(tput bold)$(tput setaf 9) $@ $(tput sgr 0) -} -shw_info () { - echo $(tput bold)$(tput setaf 4) $@ $(tput sgr 0) -} -shw_warn () { - echo $(tput bold)$(tput setaf 2) $@ $(tput sgr 0) -} -shw_err () { - echo $(tput bold)$(tput setaf 1) $@ $(tput sgr 0) -} - -# Services with a port are not servicebrokers. -shw_grey "Getting a list of services for cluster $(oc whoami --show-server)." -oc get service --all-namespaces -o=jsonpath='{range .items[*]}{.metadata.namespace}{"\t"}{.metadata.name}{"\t"}{.spec.externalName}{"\n"}{end}' \ - | awk '$2 ~ /^mariadb-/ {print}' > /tmp/mariadb-services -# Remove read replica services. -sed -i.bak '/mariadb-readreplica-/d' /tmp/mariadb-services -# Remove random database pods. -sed -i.bak '/mariadb-d7[[:space:]]*$/d' /tmp/mariadb-services - -# Get a list of database clusters: -# - Ignore the dedicated clusters. -# - Ignore the read replicas. -SERVERS=$(awk '{print $3}' /tmp/mariadb-services | sort -u | grep -v "^dedicated" | grep -v ".cluster-ro-") - -# Ensure you can connect to all database clusters, once you do that, list every -# database that you can that belongs to the Ansible Service Broker. -for SERVER in $SERVERS; do - CONFFILE="${HOME}/.my.cnf-${SERVER}" - if [ -f "$CONFFILE" ]; then - shw_info "Getting current database list for cluster ${SERVER}..." - # The ASB will never create a database smaller than 5 characters. - mysql --defaults-file="$CONFFILE" -se 'show databases;' | grep -Ev "mysql$|_schema$" | grep -E '^.{5,}$' > "/tmp/${SERVER}-databases" - else - shw_err "ERROR: please create $CONFFILE so I can know how to connect to $SERVER" - exit 2 - fi -done - -# For every active project, find out it's database name, and remove this the -# database cluster file (to indicate it has been found). -ERRORS=() -for PROJECT in $(awk '$3 ~ /^dedicated/ {next} {print $1}' /tmp/mariadb-services); do - shw_info "Checking namespace '${PROJECT}'." - - # In the case that there are multiple ASB configs for the 1 project, this will - # return an array with each database in it. - DATABASES=($(oc -n "${PROJECT}" get configmap lagoon-env -o json | jq -r '.data | with_entries(select(.key|match("_DATABASE";"i")))[]' || :)) - - if [ ${#DATABASES[@]} -eq 0 ]; then - shw_err " > Some problem with ${PROJECT}" - ERRORS+=("${PROJECT}") - else - # Iterate over the potential many database names. - for (( i=0; i<${#DATABASES[@]}; i++ )) ; do - # @TODO it would be technically possible to have the 2 databases spread - # across multiple database clusters, this code assumes a single project - # uses a single database cluster. - DBHOST=$(grep --max-count=1 "^${PROJECT}[[:space:]]" /tmp/mariadb-services | awk '{print $3}') - shw_warn " > Found database '${DATABASES[$i]}' on host '${DBHOST}'." - sed -i.bak -e "/${DATABASES[$i]}/d" "/tmp/${DBHOST}-databases" - done - fi -done - -echo; echo -if [ ${#ERRORS[@]} -gt 0 ]; then - shw_info "These projects could not adaquately checked:" - printf "%s\\n" "${ERRORS[@]}" - echo -fi - -for SERVER in $SERVERS; do - CONFFILE="${HOME}/.my.cnf-${SERVER}" - echo - shw_info "Orphaned databases for '${SERVER}'" - - # List servcer uptime. - shw_grey "MySQL uptime (last_update can only ever be this old)" - mysql --defaults-file="${CONFFILE}" -e "SELECT TIME_FORMAT(SEC_TO_TIME(VARIABLE_VALUE ),'%Hh %im') as Uptime from performance_schema.global_status where VARIABLE_NAME='Uptime';" - - rm -f /tmp/${SERVER}-databases-drop - while IFS= read -r line || [[ -n "$line" ]]; do - shw_info " $line" - echo -n " - Last updated: " - mysql --defaults-file="${CONFFILE}" -se "SELECT from_unixtime(UNIX_TIMESTAMP(MAX(UPDATE_TIME))) as last_update FROM information_schema.tables WHERE TABLE_SCHEMA IN ('$line');" - echo -n " - Table count: " - mysql --defaults-file="${CONFFILE}" -se "SELECT COUNT(1) AS TableCount FROM information_schema.tables WHERE table_schema = '$line';" - echo "DROP DATABASE \`$line\`;" >> /tmp/${SERVER}-databases-drop - done < "/tmp/${SERVER}-databases" - - if [ -f "/tmp/${SERVER}-databases-drop" ]; then - shw_grey "To remove these databases:" - cat /tmp/${SERVER}-databases-drop - fi -done diff --git a/helpers/shared-to-shared-migrate.sh b/helpers/shared-to-shared-migrate.sh deleted file mode 100755 index f734e3d663..0000000000 --- a/helpers/shared-to-shared-migrate.sh +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env bash - -# -# What this script is for -# ======================= -# This script will migrate a database user, access, database and contents from -# an existing cluster to a destination cluster. -# -# At the moment, this is geared towards the Ansible Service Broker, but likely -# can be modified in the future to work with the DBaaS operator. -# -# It has been used successfully to migrate databases between RDS clusters. -# -# There are a whole bunch of checks after the migration to check to ensure the -# migration was a success. Likely you should do additional testing as well. -# -# Requirements -# ============ -# * You are logged into OpenShift CLI and have access to the NAMESPACE you want -# to migrate. -# * You have a `.my.cnf` file for the destination database cluster. -# * If your destination database cluster is not directly accessible, then you -# have created SSH tunnels to expose them on a local port. -# -# How to get your existing ASB root credentials -# ============================================= -# oc -n openshift-ansible-service-broker get secret/lagoon-dbaas-db-credentials -o json | jq '.data | map_values(@base64d)' -# -# How to create a `.my.cnf` file -# ============================== -# ~/.my.cnf-shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com -# [client] -# host=127.0.0.1 -# port=33007 -# user=root -# password=banana2 -# -# How to create an SSH tunnel through a jump box to your database cluster -# ======================================================================= -# ssh -L 33007:shared-cluster.cluster-banana.ap-southeast-2.rds.amazonaws.com:3306 jumpbox.aws.amazee.io -# -# Example command 1 -# ================= -# ./helpers/shared-to-shared-migrate.sh \ -# --destination shared-cluster.cluster-apple.ap-southeast-2.rds.amazonaws.com \ -# --replica shared-cluster.cluster-r0-apple.ap-southeast-2.rds.amazonaws.com \ -# --namespace NAMESPACE \ -# --dry-run -# -# Example command 2 -# ================= -# namespaces=" -# foo-example-com-production -# bar-example-com-production -# baz-example-com-production -# quux-example-com-production -# " -# for namespace in $namespaces; do -# ./helpers/shared-to-shared-migrate.sh \ -# --dry-run \ -# --namespace "$namespace" \ -# --destination shared-mysql-production-1-cluster.cluster-plum.ap-southeast-2.rds.amazonaws.com \ -# --replica shared-mysql-production-1-cluster.cluster-ro-plum.ap-southeast-2.rds.amazonaws.com -# done -# -set -euo pipefail - -# Initialize our own variables: -DESTINATION_CLUSTER="" -REPLICA_CLUSTER="" -NAMESPACE="" -DRY_RUN="" -TIMESTAMP=$(date +%s) - -# Colours. -shw_grey () { - tput bold - tput setaf 0 - echo "$@" - tput sgr0 -} -shw_norm () { - tput bold - tput setaf 9 - echo "$@" - tput sgr0 -} -shw_info () { - tput bold - tput setaf 4 - echo "$@" - tput sgr0 -} -shw_warn () { - tput bold - tput setaf 2 - echo "$@" - tput sgr0 -} -shw_err () { - tput bold - tput setaf 1 - echo "$@" - tput sgr0 -} - -# Parse input arguments. -while [[ $# -gt 0 ]] ; do - case $1 in - -d|--destination) - DESTINATION_CLUSTER="$2" - shift # past argument - shift # past value - ;; - -r|--replica) - REPLICA_CLUSTER="$2" - shift # past argument - shift # past value - ;; - -n|--namespace) - NAMESPACE="$2" - shift # past argument - shift # past value - ;; - --dry-run) - DRY_RUN="TRUE" - shift # past argument - ;; - *) - echo "Invalid Argument: $1" - exit 3 - ;; - esac -done - -shw_grey "================================================" -shw_grey " START_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'" -shw_grey "================================================" -shw_grey " DESTINATION_CLUSTER=$DESTINATION_CLUSTER" -shw_grey " REPLICA_CLUSTER=$REPLICA_CLUSTER" -shw_grey " NAMESPACE=$NAMESPACE" -shw_grey "================================================" - -for util in oc jq mysql; do - if ! command -v ${util} > /dev/null; then - shw_err "Please install ${util}" - exit 1 - fi -done - -CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER} -if [ ! -f "$CONF_FILE" ]; then - shw_err "ERROR: please create $CONF_FILE so I can know how to connect to ${DESTINATION_CLUSTER}" - exit 2 -fi - -if [ "$DRY_RUN" ] ; then - shw_warn "Dry run is enabled, so no network service changes will take place." -fi - -# Load the DBaaS credentials for the project -SECRETS=$(oc -n "$NAMESPACE" get secret mariadb-servicebroker-credentials -o json) - -DB_NETWORK_SERVICE=$(echo "$SECRETS" | jq -er '.data.DB_HOST | @base64d') -if echo "$SECRETS" | grep -q DB_READREPLICA_HOSTS ; then - DB_READREPLICA_HOSTS=$(echo "$SECRETS" | jq -er '.data.DB_READREPLICA_HOSTS | @base64d') -else - DB_READREPLICA_HOSTS="" -fi -DB_USER=$(echo "$SECRETS" | jq -er '.data.DB_USER | @base64d') -DB_PASSWORD=$(echo "$SECRETS" | jq -er '.data.DB_PASSWORD | @base64d') -DB_NAME=$(echo "$SECRETS" | jq -er '.data.DB_NAME | @base64d') -DB_PORT=$(echo "$SECRETS" | jq -er '.data.DB_PORT | @base64d') - -shw_grey "================================================" -shw_grey " DB_NETWORK_SERVICE=$DB_NETWORK_SERVICE" -shw_grey " DB_READREPLICA_HOSTS=$DB_READREPLICA_HOSTS" -shw_grey " DB_USER=$DB_USER" -shw_grey " DB_PASSWORD=$DB_PASSWORD" -shw_grey " DB_NAME=$DB_NAME" -shw_grey " DB_PORT=$DB_PORT" -shw_grey "================================================" - -# Ensure there is a database in the destination. -shw_info "> Preparing Database, User, and permissions on destination" -shw_info "================================================" -CONF_FILE=${HOME}/.my.cnf-${DESTINATION_CLUSTER} -mysql --defaults-file="$CONF_FILE" -se "CREATE DATABASE IF NOT EXISTS \`${DB_NAME}\`;" -mysql --defaults-file="$CONF_FILE" -se "CREATE USER IF NOT EXISTS \`${DB_USER}\`@'%' IDENTIFIED BY '${DB_PASSWORD}';" -mysql --defaults-file="$CONF_FILE" -se "GRANT ALL ON \`${DB_NAME}\`.* TO \`${DB_USER}\`@'%';" -mysql --defaults-file="$CONF_FILE" -se "FLUSH PRIVILEGES;" - -# Verify access. -shw_info "> Verify MySQL access for the new user" -shw_info "================================================" -mysql --defaults-file="$CONF_FILE" -e "SELECT * FROM mysql.db WHERE Db = '${DB_NAME}'\G;" - -# Dump the database inside the CLI pod. -POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -r '.items[0].metadata.name // empty') -if [ -z "$POD" ]; then - shw_warn "No running cli pod in namespace $NAMESPACE" - shw_warn "Scaling up 1 cli DeploymentConfig pod" - oc -n "$NAMESPACE" scale dc cli --replicas=1 --timeout=2m - sleep 32 # hope for timely scheduling - POD=$(oc -n "$NAMESPACE" get pods -o json --field-selector=status.phase=Running -l service=cli | jq -er '.items[0].metadata.name') -fi -shw_info "> Dumping database $DB_NAME on pod $POD on host $DB_NETWORK_SERVICE" -shw_info "================================================" -oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysqldump -h '$DB_NETWORK_SERVICE' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' > /tmp/migration.sql" -oc -n "$NAMESPACE" exec "$POD" -- ls -lh /tmp/migration.sql -oc -n "$NAMESPACE" exec "$POD" -- head -n 5 /tmp/migration.sql -oc -n "$NAMESPACE" exec "$POD" -- tail -n 5 /tmp/migration.sql -shw_norm "> Dump is done" -shw_norm "================================================" - -# Import to new database. -shw_info "> Importing the dump into ${DESTINATION_CLUSTER}" -shw_info "================================================" -oc -n "$NAMESPACE" exec "$POD" -- bash -c "time mysql -h '$DESTINATION_CLUSTER' -u '$DB_USER' -p'$DB_PASSWORD' '$DB_NAME' < /tmp/migration.sql" -oc -n "$NAMESPACE" exec "$POD" -- rm /tmp/migration.sql - -shw_norm "> Import is done" -shw_norm "================================================" - -# Alter the network service(s). -shw_info "> Altering the Network Service $DB_NETWORK_SERVICE to point at $DESTINATION_CLUSTER" -shw_info "================================================" -ORIGINAL_DB_HOST=$(oc -n "$NAMESPACE" get "svc/$DB_NETWORK_SERVICE" -o json --export | tee "/tmp/$NAMESPACE-svc.json" | jq -er '.spec.externalName') -if [ "$DRY_RUN" ] ; then - echo "**DRY RUN**" -else - oc -n "$NAMESPACE" patch "svc/$DB_NETWORK_SERVICE" -p "{\"spec\":{\"externalName\": \"$DESTINATION_CLUSTER\"}}" -fi -if [ "$DB_READREPLICA_HOSTS" ]; then - shw_info "> Altering the Network Service $DB_READREPLICA_HOSTS to point at $REPLICA_CLUSTER" - shw_info "================================================" - ORIGINAL_DB_READREPLICA_HOSTS=$(oc -n "$NAMESPACE" get "svc/$DB_READREPLICA_HOSTS" -o json --export | tee "/tmp/$NAMESPACE-svc-replica.json" | jq -er '.spec.externalName') - if [ "$DRY_RUN" ] ; then - echo "**DRY RUN**" - else - oc -n "$NAMESPACE" patch "svc/$DB_READREPLICA_HOSTS" -p "{\"spec\":{\"externalName\": \"$REPLICA_CLUSTER\"}}" - fi -fi - -# Unsure what if any delay there is in this to take effect, but 1 second sounds -# completely reasonable. -sleep 1 - -# Verify the correct RDS cluster. -shw_info "> Output the RDS cluster that Drush is connecting to" -shw_info "================================================" -oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush sqlq 'SELECT @@aurora_server_id;'" - -# Drush status. -shw_info "> Drush status" -shw_info "================================================" -oc -n "$NAMESPACE" exec "$POD" -- bash -c "drush status" - -# Get routes, and ensure a cache bust works. -ROUTE=$(oc -n "$NAMESPACE" get routes -o json | jq -er '.items[0].spec.host') -shw_info "> Testing the route https://${ROUTE}/?${TIMESTAMP}" -shw_info "================================================" -curl -skLIXGET "https://${ROUTE}/?${TIMESTAMP}" \ - -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" \ - --cookie "NO_CACHE=1" | grep -E "HTTP|Cache|Location|LAGOON" || true - -shw_grey "================================================" -shw_grey "" -shw_grey "In order to rollback this change, edit the Network Service(s) like so:" -shw_grey "" -shw_grey "oc -n $NAMESPACE patch svc/$DB_NETWORK_SERVICE -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_HOST\"}}'" -if [ "$DB_READREPLICA_HOSTS" ]; then - shw_grey "oc -n $NAMESPACE patch svc/$DB_READREPLICA_HOSTS -p '{\"spec\":{\"externalName\": \"$ORIGINAL_DB_READREPLICA_HOSTS\"}}'" -fi - -echo "" -shw_grey "================================================" -shw_grey " END_TIMESTAMP='$(date +%Y-%m-%dT%H:%M:%S%z)'" -shw_grey "================================================" -shw_norm "Done in $SECONDS seconds" -exit 0 diff --git a/helpers/sharedmigrate.sh b/helpers/sharedmigrate.sh deleted file mode 100755 index 973ea90645..0000000000 --- a/helpers/sharedmigrate.sh +++ /dev/null @@ -1,200 +0,0 @@ -#!/bin/sh - -for util in oc svcat jq; do -which ${util} > /dev/null -if [ $? -gt 0 ]; then - echo "please install ${util}" - exit 1 -fi -done; - -usage() { - cat << EOF - ${0}: migrate a mariadb servicebroker to another mariadb servicebroker - This script is useful when needing to change either the class or the plan - of and existing service broker. - By default, it will use: - 'lagoon-dbaas-mariadb-apb' as the class, - 'production' as the plan, - current openshift context as the namespace, and - first servicebroker in the namespace. - - when completed, run with -x to delete migration pvc, dc and serviceaccount. - - e.g: $0 -n mysite-devel -c lagoon-dbaas-mariadb-apb -p development -i mariadb - $0 -n mysite-devel -x -EOF -} - -# n- namespace -# c- class ( lagoon-dbaas-mariadb-apb ) -# p- plan ( production / stage ) - -args=`getopt n:c:p:i:xh $*` -if [[ $# -eq 0 ]]; then - usage - exit -fi - -# set some defaults -NAMESPACE=$(oc project -q) -PLAN=production -CLASS=lagoon-dbaas-mariadb-apb - -set -- $args -for i -do - case "$i" in - -n) - NAMESPACE="$2"; shift; - shift;; - -c) - CLASS="$2"; shift; - shift;; - -p) - PLAN="$2"; shift; - shift;; - -i) - INSTANCE="$2"; shift; - shift;; - -h) - usage - exit 0 - shift;; - - -x) - echo "cleaning up " - oc -n ${NAMESPACE} delete dc/migrator - oc -n ${NAMESPACE} delete pvc/migrator - oc -n ${NAMESPACE} adm policy remove-scc-from-user privileged -z migrator - oc -n ${NAMESPACE} delete serviceaccount migrator - exit 0 - shift;; - - --) - shift; break;; - esac -done - -# set a default instance, if not specified. -if [ -z ${INSTANCE+x} ]; then - INSTANCE=$(svcat -n ${NAMESPACE} get instance -o json |jq -r '.items[0].metadata.name') - echo "instance not specified, using $INSTANCE" -fi - -# verify instance exists -svcat -n ${NAMESPACE} get instance $INSTANCE -if [ $? -gt 0 ] ;then - echo "no instance found" - exit 2 -fi - -echo "Verifying secret ${INSTANCE}-servicebroker-credentials " -oc -n ${NAMESPACE} get --insecure-skip-tls-verify secret ${INSTANCE}-servicebroker-credentials || svcat bind $INSTANCE --name ${INSTANCE}-servicebroker-credentials - -# validate $broker - -oc -n ${NAMESPACE} create serviceaccount migrator -oc -n ${NAMESPACE} adm policy add-scc-to-user privileged -z migrator - -oc -n ${NAMESPACE} run --image mariadb --env="MYSQL_RANDOM_ROOT_PASSWORD=yes" migrator - -# pause and make some changes -oc -n ${NAMESPACE} rollout pause deploymentconfig/migrator - -# We don't care about the database in /var/lib/mysql; just privilege it and let it do its thing. -oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"template":{"spec":{"serviceAccountName": "migrator"}}}}' -oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"template":{"spec":{"securityContext":{ "privileged": "true", "runAsUser": 0 }}}}}' -oc -n ${NAMESPACE} patch deploymentconfig/migrator -p '{"spec":{"strategy":{"type":"Recreate"}}}' - - -# create a volume to store the dump. -cat << EOF | oc -n ${NAMESPACE} apply -f - - apiVersion: v1 - items: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: migrator - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi - kind: List - metadata: {} -EOF - -oc -n ${NAMESPACE} volume deploymentconfig/migrator --add --name=migrator --type=persistentVolumeClaim --claim-name=migrator --mount-path=/migrator - - -# look up the secret from the instance and add it to the new container -SECRET=$(svcat -n ${NAMESPACE} get binding -o json |jq -r ".items[] | select (.spec.instanceRef.name == \"$INSTANCE\") | .spec.secretName") -echo secret: $SECRET -oc -n ${NAMESPACE} set env --from=secret/${SECRET} --prefix=OLD_ dc/migrator - -oc -n ${NAMESPACE} rollout resume deploymentconfig/migrator -oc -n ${NAMESPACE} rollout latest deploymentconfig/migrator -oc -n ${NAMESPACE} rollout status deploymentconfig/migrator --watch - -sleep 20; -# Do the dump: -POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l run=migrator | jq -r '.items[].metadata.name') - -oc -n ${NAMESPACE} exec $POD -- bash -c 'time mysqldump -h $OLD_DB_HOST -u $OLD_DB_USER -p${OLD_DB_PASSWORD} $OLD_DB_NAME > /migrator/migration.sql' - -echo "DUMP IS DONE;" -oc -n ${NAMESPACE} exec $POD -- ls -al /migrator/migration.sql || exit 1 -oc -n ${NAMESPACE} exec $POD -- head /migrator/migration.sql -oc -n ${NAMESPACE} exec $POD -- tail /migrator/migration.sql || exit 1 - - -printf "\n\n\nLAST CHANCE TO CANCEL BEFORE I DELETE THE OLD SERVICEBROKER.\n\n" -echo "sleeping 30 seconds..." -sleep 30 - -# delete the old servicebroker -time svcat -n ${NAMESPACE} unbind $INSTANCE -time svcat -n ${NAMESPACE} deprovision $INSTANCE --wait --interval 2s --timeout=1h -echo "===== old instance deprovisioned, waiting 30 seconds." -sleep 30; - -echo "===== provisioning new $CLASS of plan $PLAN" -time svcat -n ${NAMESPACE} provision $INSTANCE --class $CLASS --plan $PLAN --wait -echo " and binding" -time svcat -n ${NAMESPACE} bind $INSTANCE --name ${INSTANCE}-servicebroker-credentials --wait - -until oc get -n ${NAMESPACE} secret ${INSTANCE}-servicebroker-credentials -do - echo "Secret ${SERVICE_NAME}-servicebroker-credentials not available yet, waiting for 5 secs" - sleep 5 -done - - -echo "rolling out migrator again so the secrets get propagated." -oc -n ${NAMESPACE} rollout latest deploymentconfig/migrator -oc -n ${NAMESPACE} rollout status deploymentconfig/migrator --watch - -sleep 10; - -# Do the dump: -POD=$(oc -n ${NAMESPACE} get pods -o json --show-all=false -l run=migrator | jq -r '.items[].metadata.name') - -oc -n ${NAMESPACE} exec $POD -- bash -c 'cat /migrator/migration.sql |sed -e "s/DEFINER[ ]*=[ ]*[^*]*\*/\*/" | mysql -h $OLD_DB_HOST -u $OLD_DB_USER -p${OLD_DB_PASSWORD} $OLD_DB_NAME' - - -# Load credentials out of secret -SECRETS=$(mktemp).yaml -echo "Exporting ${INSTANCE}-servicebroker-credentials into $SECRETS " -oc -n ${NAMESPACE} get --insecure-skip-tls-verify secret ${INSTANCE}-servicebroker-credentials -o yaml > $SECRETS - -DB_HOST=$(cat $SECRETS | shyaml get-value data.DB_HOST | base64 -D) -DB_USER=$(cat $SECRETS | shyaml get-value data.DB_USER | base64 -D) -DB_PASSWORD=$(cat $SECRETS | shyaml get-value data.DB_PASSWORD | base64 -D) -DB_NAME=$(cat $SECRETS | shyaml get-value data.DB_NAME | base64 -D) -DB_PORT=$(cat $SECRETS | shyaml get-value data.DB_PORT | base64 -D) - -SERVICE_NAME_UPPERCASE=$(echo $INSTANCE | tr [:lower:] [:upper:]) -oc -n $NAMESPACE patch configmap lagoon-env \ - -p "{\"data\":{\"${SERVICE_NAME_UPPERCASE}_HOST\":\"${DB_HOST}\", \"${SERVICE_NAME_UPPERCASE}_USERNAME\":\"${DB_USER}\", \"${SERVICE_NAME_UPPERCASE}_PASSWORD\":\"${DB_PASSWORD}\", \"${SERVICE_NAME_UPPERCASE}_DATABASE\":\"${DB_NAME}\", \"${SERVICE_NAME_UPPERCASE}_PORT\":\"${DB_PORT}\"}}" diff --git a/helpers/update-versions.yml b/helpers/update-versions.yml deleted file mode 100644 index db508c68fa..0000000000 --- a/helpers/update-versions.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Lagoon Version Update Helper -# -# Helper to update Version inside Dockerfiles -# Update versions below in `vars` and execute locally -# -# ansible-playbook helpers/update-versions.yml -- name: update versions - hosts: 127.0.0.1 - connection: local - vars: - # Newrelic - https://docs.newrelic.com/docs/release-notes/agent-release-notes/php-release-notes/ - NEWRELIC_VERSION: '9.12.0.268' - # Composer - https://getcomposer.org/download/ - COMPOSER_VERSION: '1.10.9' - COMPOSER_HASH_SHA256: '70d6b9c3e0774b398a372dcb7f89dfe22fc25884e6e09ebf277286dd64cfaf35' - # Drupal Console Launcher - https://github.com/hechoendrupal/drupal-console-launcher/releases - DRUPAL_CONSOLE_LAUNCHER_VERSION: 1.9.4 - DRUPAL_CONSOLE_LAUNCHER_SHA: b7759279668caf915b8e9f3352e88f18e4f20659 - # Drush - https://github.com/drush-ops/drush/releases - DRUSH_VERSION: 8.3.5 - # Drush Launcher Version - https://github.com/drush-ops/drush-launcher/releases - DRUSH_LAUNCHER_VERSION: 0.6.0 - tasks: - - name: update NEWRELIC_VERSION - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/fpm/Dockerfile" - regexp: 'ENV NEWRELIC_VERSION=' - line: 'ENV NEWRELIC_VERSION={{ NEWRELIC_VERSION }}' - - name: update COMPOSER_VERSION - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile" - regexp: 'ENV COMPOSER_VERSION=' - line: 'ENV COMPOSER_VERSION={{ COMPOSER_VERSION }} \' - - name: update COMPOSER_HASH_SHA256 - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli/Dockerfile" - regexp: 'COMPOSER_HASH_SHA256=' - line: ' COMPOSER_HASH_SHA256={{ COMPOSER_HASH_SHA256 }}' - - name: update DRUPAL_CONSOLE_LAUNCHER_VERSION - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" - regexp: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION=' - line: 'ENV DRUPAL_CONSOLE_LAUNCHER_VERSION={{ DRUPAL_CONSOLE_LAUNCHER_VERSION }} \' - - name: update DRUPAL_CONSOLE_LAUNCHER_SHA - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" - regexp: 'DRUPAL_CONSOLE_LAUNCHER_SHA=' - line: ' DRUPAL_CONSOLE_LAUNCHER_SHA={{ DRUPAL_CONSOLE_LAUNCHER_SHA }} \' - - name: update DRUSH_VERSION - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" - regexp: 'DRUSH_VERSION=' - line: ' DRUSH_VERSION={{ DRUSH_VERSION }} \' - - name: update DRUSH_LAUNCHER_VERSION - lineinfile: - path: "{{ lookup('env', 'PWD') }}/images/php/cli-drupal/Dockerfile" - regexp: 'DRUSH_LAUNCHER_VERSION=' - line: ' DRUSH_LAUNCHER_VERSION={{ DRUSH_LAUNCHER_VERSION }} \'