From fda1acd93f68838b49a4548e16c1f3c9d58dd758 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Thu, 21 Oct 2021 09:42:47 +1100 Subject: [PATCH 1/3] refactor: deprecate the `lagoon/service` label selector because lagoon2 now, move k8s label selector first, and fall back to openshift. adjust interval to 5s instead of 1s, but retain the same total duration --- services/ssh/home/rsh.sh | 156 ++++++++++----------------------------- 1 file changed, 41 insertions(+), 115 deletions(-) diff --git a/services/ssh/home/rsh.sh b/services/ssh/home/rsh.sh index 6badac6835..e7b9d92633 100755 --- a/services/ssh/home/rsh.sh +++ b/services/ssh/home/rsh.sh @@ -19,8 +19,8 @@ shift 3 # get the value from an envvar override (can be added to the ssh deployment) # default to false so we don't hold up the ssh for a long time WAIT_TO_UNIDLE_SERVICES=${WAIT_TO_UNIDLE_SERVICES:-false} -# set a timeout of 600 for waiting for a pod to start (the waits are 1 second interval, so 10 minutes timeout) -SSH_CHECK_TIMEOUT=${SSH_CHECK_TIMEOUT:-600} +# set a timeout of 120 for waiting for a pod to start (the waits are 5 second interval, so 10 minutes timeout) +SSH_CHECK_TIMEOUT=${SSH_CHECK_TIMEOUT:-120} # generate a random uuid for this request to help track in logs # also the uuid will be given to users in any errors so they can provide it to help with tracking too if required @@ -80,9 +80,9 @@ ADMIN_GRAPHQL="query getEnvironmentByOpenshiftProjectName { ADMIN_QUERY=$(echo $ADMIN_GRAPHQL | sed 's/"/\\"/g' | sed 's/\\n/\\\\n/g' | awk -F'\n' '{if(NR == 1) {printf $0} else {printf "\\n"$0}}') ADMIN_ENVIRONMENT=$(curl -s -XPOST -H 'Content-Type: application/json' -H "$ADMIN_BEARER" "${GRAPHQL_ENDPOINT:-api:3000/graphql}" -d "{\"query\": \"$ADMIN_QUERY\"}") -OPENSHIFT_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.consoleUrl') -OPENSHIFT_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.token') -OPENSHIFT_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.name') +CLUSTER_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.consoleUrl') +CLUSTER_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.token') +CLUSTER_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.name') ## ## Check if we have a service and container given, if yes use them. @@ -100,42 +100,28 @@ else SERVICE=cli fi -echo "${UUID}: Incoming Remote Shell Connection: project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}' container='${CONTAINER}' command='$*'" >> /proc/1/fd/1 +echo "${UUID}: Incoming Remote Shell Connection: project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}' container='${CONTAINER}' command='$*'" >> /proc/1/fd/1 # This only happens on local development with minishift. # Login as developer:deveeloper and get the token -if [[ $OPENSHIFT_TOKEN == "null" ]]; then - KUBECONFIG="/tmp/kube" /usr/bin/oc --insecure-skip-tls-verify login -p developer -u developer ${OPENSHIFT_CONSOLE} > /dev/null - OPENSHIFT_TOKEN=$(KUBECONFIG="/tmp/kube" oc --insecure-skip-tls-verify whoami -t) +if [[ $CLUSTER_TOKEN == "null" ]]; then + KUBECONFIG="/tmp/kube" /usr/bin/oc --insecure-skip-tls-verify login -p developer -u developer ${CLUSTER_CONSOLE} > /dev/null + CLUSTER_TOKEN=$(KUBECONFIG="/tmp/kube" oc --insecure-skip-tls-verify whoami -t) fi -OC="/usr/bin/oc --insecure-skip-tls-verify -n ${PROJECT} --token=${OPENSHIFT_TOKEN} --server=${OPENSHIFT_CONSOLE} " +OC="/usr/bin/oc --insecure-skip-tls-verify -n ${PROJECT} --token=${CLUSTER_TOKEN} --server=${CLUSTER_CONSOLE} " -# If there is a deploymentconfig for the given service -if [[ $($OC get deploymentconfigs -l service=${SERVICE} 2> /dev/null) ]]; then - DEPLOYMENTCONFIG=$($OC get deploymentconfigs -l service=${SERVICE} -o name) - # If the deploymentconfig is scaled to 0, scale to 1 - if [[ $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.replicas}}') == "0" ]]; then - echo "${UUID}: Attempting to scale deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - $OC scale --replicas=1 ${DEPLOYMENTCONFIG} >/dev/null 2>&1 - - # Wait until the scaling is done - while [[ ! $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.readyReplicas}}') == "1" ]] - do - sleep 1 - done - fi - echo "${UUID}: Deployment is running deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 -fi +IS_KUBERNETES=false # If there is a deployment for the given service searching for lagoon.sh labels if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; then + IS_KUBERNETES=true # get any other deployments that may have been idled by the idler and unidle them if required # this only needs to be done for kubernetes # we do this first to give the services a bit of time to unidle before starting the one that was requested DEPLOYMENTS=$($OC get deployments -l "idling.amazee.io/watch=true" -o name) if [ ! -z "${DEPLOYMENTS}" ]; then - echo "${UUID}: Environment is idled attempting to scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Environment is idled attempting to scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 # loop over the deployments and unidle them for DEP in ${DEPLOYMENTS} do @@ -146,7 +132,7 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t if [ ! -z "$REPLICAS" ]; then REPLICAS=1 fi - echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 $OC scale --replicas=${REPLICAS} ${DEP} >/dev/null 2>&1 fi done @@ -158,19 +144,19 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t # WAIT_TO_UNIDLE_SERVICES will default to false so that it just scales the deployments # and won't wait for them to be ready, but if set to true, it will wait for `readyReplicas` to be 1 if [[ "$WAIT_TO_UNIDLE_SERVICES" =~ [Tt][Rr][Uu][Ee] ]]; then - echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 SSH_CHECK_COUNTER=0 until [[ $($OC get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] do if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 - sleep 1 + sleep 5 else echo "${UUID}: Deployment '${DEP}' took too long to start pods" exit 1 fi done - echo "${UUID}: Environment scaled up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Environment scaled up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 fi done fi @@ -181,7 +167,7 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t # If the deployment is scaled to 0, scale to 1 # .status.replicas doesn't exist on a scaled to 0 deployment in k8s so assume it is 0 if nothing is returned if [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then - echo "${UUID}: Attempting to scale deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Attempting to scale deployment='${DEPLOYMENT}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 $OC scale --replicas=1 ${DEPLOYMENT} >/dev/null 2>&1 fi # Wait until the scaling is done @@ -190,100 +176,40 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t do if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 - sleep 1 + sleep 5 else echo "${UUID}: Pod for ${SERVICE} took too long to start" exit 1 fi done - echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 fi -# If there is a deployment for the given service search for lagoon labels -# @DEPRECATED: Remove with Lagoon 2.0.0 -if [[ $($OC get deployment -l lagoon/service=${SERVICE} 2> /dev/null) ]]; then - # get any other deployments that may have been idled by the idler and unidle them if required - # this only needs to be done for kubernetes - # we do this first to give the services a bit of time to unidle before starting the one that was requested - DEPLOYMENTS=$($OC get deployments -l "idling.amazee.io/watch=true" -o name) - if [ ! -z "${DEPLOYMENTS}" ]; then - echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - # loop over the deployments and unidle them - for DEP in ${DEPLOYMENTS} - do - # if the deployment is idled, unidle it :) - DEP_JSON=$($OC get ${DEP} -o json) - if [ $(echo "$DEP_JSON" | jq -r '.status.replicas // 0') == "0" ]; then - REPLICAS=$(echo "$DEP_JSON" | jq -r '.metadata.annotations."idling.amazee.io/unidle-replicas" // 1') - if [ ! -z "$REPLICAS" ]; then - REPLICAS=1 - fi - echo "${UUID}: Attempting to scale deployment='${DEP}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - $OC scale --replicas=${REPLICAS} ${DEP} >/dev/null 2>&1 - fi - done - # then if we have to wait for them to start, do that here - for DEP in ${DEPLOYMENTS} - do - # for unidling an entire environment and waiting for the number of `readyReplicas` - # to be 1 for each deployment, could add considerable delays for the ssh connection to establish. - # WAIT_TO_UNIDLE_SERVICES will default to false so that it just scales the deployments - # and won't wait for them to be ready, but if set to true, it will wait for `readyReplicas` to be 1 - if [[ "$WAIT_TO_UNIDLE_SERVICES" =~ [Tt][Rr][Uu][Ee] ]]; then - echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - SSH_CHECK_COUNTER=0 - until [[ $($OC get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] - do - if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then - let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 - sleep 1 - else - echo "${UUID}: Deployment '${DEP}' took too long to start pods" - exit 1 - fi - done - echo "${UUID}: Environment scaled up for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - fi - done - fi - # then actually unidle the service that was requested and wait for it to be ready if it wasn't already captured above - # doing this means if the service hasn't been idled with the `idling.amazee.io/watch=true` label - # we can still establish a connection - DEPLOYMENT=$($OC get deployment -l lagoon/service=${SERVICE} -o name) - # If the deployment is scaled to 0, scale to 1 - # .status.replicas doesn't exist on a scaled to 0 deployment in k8s so assume it is 0 if nothing is returned - if [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then - echo "${UUID}: Attempting to scale up deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 - $OC scale --replicas=1 ${DEPLOYMENT} >/dev/null 2>&1 - fi - # Wait until the scaling is done - SSH_CHECK_COUNTER=0 - until [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] - do - if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then - let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 - sleep 1 - else - echo "${UUID}: Pod for ${SERVICE} took too long to start" - exit 1 +# If there is a deploymentconfig for the given service, then it isn't kubernetes, it is openshift. +if [[ "${IS_KUBERNETES}" == "false" ]]; then + if [[ $($OC get deploymentconfigs -l service=${SERVICE} 2> /dev/null) ]]; then + DEPLOYMENTCONFIG=$($OC get deploymentconfigs -l service=${SERVICE} -o name) + # If the deploymentconfig is scaled to 0, scale to 1 + if [[ $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.replicas}}') == "0" ]]; then + echo "${UUID}: Attempting to scale deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + $OC scale --replicas=1 ${DEPLOYMENTCONFIG} >/dev/null 2>&1 + + # Wait until the scaling is done + while [[ ! $($OC get ${DEPLOYMENTCONFIG} -o go-template --template='{{.status.readyReplicas}}') == "1" ]] + do + sleep 1 + done fi - done - echo "${UUID}: Deployment is running deployment='${DEPLOYMENT}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + echo "${UUID}: Deployment is running deploymentconfig='${DEPLOYMENTCONFIG}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 + fi fi - -echo "${UUID}: Getting pod name for exec for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 -POD=$($OC get pods -l service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') - # Check for newer Helm chart "lagoon.sh" labels -if [[ ! $POD ]]; then +echo "${UUID}: Getting pod name for exec for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 +if [[ "${IS_KUBERNETES}" == "true" ]]; then POD=$($OC get pods -l "lagoon.sh/service=${SERVICE}" -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') -fi - -# Check for deprecated Helm chart "lagoon" labels -# @DEPRECATED: Remove with Lagoon 2.0.0 -if [[ ! $POD ]]; then - POD=$($OC get pods -l lagoon/service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') +else + POD=$($OC get pods -l service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') fi if [[ ! $POD ]]; then @@ -302,7 +228,7 @@ else TTY_PARAMETER="" fi -echo "${UUID}: Exec to pod='${POD}' for project='${PROJECT}' openshift='${OPENSHIFT_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 +echo "${UUID}: Exec to pod='${POD}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 if [[ -z "$*" ]]; then exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh else From 3d5f7e3f5cc1abdc7c1e4ce46394f96e9a337f9d Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Thu, 21 Oct 2021 09:54:45 +1100 Subject: [PATCH 2/3] refactor: include backwards compatible check for the target cluster --- services/ssh/home/rsh.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/services/ssh/home/rsh.sh b/services/ssh/home/rsh.sh index e7b9d92633..6ab3f781c4 100755 --- a/services/ssh/home/rsh.sh +++ b/services/ssh/home/rsh.sh @@ -69,6 +69,13 @@ fi ADMIN_BEARER="Authorization: bearer $API_ADMIN_TOKEN" ADMIN_GRAPHQL="query getEnvironmentByOpenshiftProjectName { environmentByOpenshiftProjectName(openshiftProjectName: \"$PROJECT\") { + project { + openshift { + consoleUrl + token + name + } + } openshift { consoleUrl token @@ -83,6 +90,12 @@ ADMIN_ENVIRONMENT=$(curl -s -XPOST -H 'Content-Type: application/json' -H "$ADMI CLUSTER_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.consoleUrl') CLUSTER_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.token') CLUSTER_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.openshift.name') +# if no cluster is found at the environment level (introduced in lagoon2.1) then grab what is at the project level +if [[ -z ${CLUSTER_NAME} ]]; then + CLUSTER_CONSOLE=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.consoleUrl') + CLUSTER_TOKEN=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.token') + CLUSTER_NAME=$(echo $ADMIN_ENVIRONMENT | jq --raw-output '.data.environmentByOpenshiftProjectName.project.openshift.name') +fi ## ## Check if we have a service and container given, if yes use them. From 1c2ff881e345d2c8bc8bc575cc71a91252f5de35 Mon Sep 17 00:00:00 2001 From: Ben Jackson Date: Thu, 21 Oct 2021 10:18:57 +1100 Subject: [PATCH 3/3] refactor: add kubectl support, but retain oc for openshift --- services/ssh/Dockerfile | 7 ++++++- services/ssh/home/rsh.sh | 41 +++++++++++++++++++++++++++------------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/services/ssh/Dockerfile b/services/ssh/Dockerfile index 1ef2882838..f960c657f0 100644 --- a/services/ssh/Dockerfile +++ b/services/ssh/Dockerfile @@ -27,7 +27,8 @@ ENV TMPDIR=/tmp \ ENV LAGOON=ssh \ OC_VERSION=v3.11.0 \ OC_HASH=0cbc58b \ - OC_SHA256=4b0f07428ba854174c58d2e38287e5402964c9a9355f6c359d1242efd0990da3 + OC_SHA256=4b0f07428ba854174c58d2e38287e5402964c9a9355f6c359d1242efd0990da3 \ + KUBECTL_VERSION=v1.20.0 COPY services/ssh/libnss-mysql-1.5.tar.gz /tmp/libnss-mysql-1.5.tar.gz @@ -51,6 +52,10 @@ RUN mkdir -p /openshift-origin-client-tools && \ tar -xzf /tmp/openshift-origin-client-tools.tar -C /tmp/openshift-origin-client-tools --strip-components=1 && \ install /tmp/openshift-origin-client-tools/oc /usr/bin/oc && rm -rf /tmp/openshift-origin-client-tools && rm -rf /tmp/openshift-origin-client-tools.tar +RUN curl -Lo kubectl "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" && \ + chmod +x kubectl && \ + mv kubectl /usr/bin/kubectl + RUN curl -L https://github.com/krallin/tini/releases/download/v0.18.0/tini -o /sbin/tini && chmod a+x /sbin/tini # Reproduce behavior of Alpine: Run Bash as sh diff --git a/services/ssh/home/rsh.sh b/services/ssh/home/rsh.sh index 6ab3f781c4..e618cc0e00 100755 --- a/services/ssh/home/rsh.sh +++ b/services/ssh/home/rsh.sh @@ -123,23 +123,24 @@ if [[ $CLUSTER_TOKEN == "null" ]]; then fi OC="/usr/bin/oc --insecure-skip-tls-verify -n ${PROJECT} --token=${CLUSTER_TOKEN} --server=${CLUSTER_CONSOLE} " +KUBECTL="/usr/bin/kubectl --insecure-skip-tls-verify -n ${PROJECT} --token=${CLUSTER_TOKEN} --server=${CLUSTER_CONSOLE} " IS_KUBERNETES=false # If there is a deployment for the given service searching for lagoon.sh labels -if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; then +if [[ $($KUBECTL get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; then IS_KUBERNETES=true # get any other deployments that may have been idled by the idler and unidle them if required # this only needs to be done for kubernetes # we do this first to give the services a bit of time to unidle before starting the one that was requested - DEPLOYMENTS=$($OC get deployments -l "idling.amazee.io/watch=true" -o name) + DEPLOYMENTS=$($KUBECTL get deployments -l "idling.amazee.io/watch=true" -o name) if [ ! -z "${DEPLOYMENTS}" ]; then echo "${UUID}: Environment is idled attempting to scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 # loop over the deployments and unidle them for DEP in ${DEPLOYMENTS} do # if the deployment is idled, unidle it :) - DEP_JSON=$($OC get ${DEP} -o json) + DEP_JSON=$($KUBECTL get ${DEP} -o json) if [ $(echo "$DEP_JSON" | jq -r '.status.replicas // 0') == "0" ]; then REPLICAS=$(echo "$DEP_JSON" | jq -r '.metadata.annotations."idling.amazee.io/unidle-replicas" // 1') if [ ! -z "$REPLICAS" ]; then @@ -159,7 +160,7 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t if [[ "$WAIT_TO_UNIDLE_SERVICES" =~ [Tt][Rr][Uu][Ee] ]]; then echo "${UUID}: Environment is idled waiting for scale up for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 SSH_CHECK_COUNTER=0 - until [[ $($OC get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] + until [[ $($KUBECTL get ${DEP} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] do if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 @@ -176,16 +177,16 @@ if [[ $($OC get deployment -l "lagoon.sh/service=${SERVICE}" 2> /dev/null) ]]; t # then actually unidle the service that was requested and wait for it to be ready if it wasn't already captured above # doing this means if the service hasn't been idled with the `idling.amazee.io/watch=true` label # we can still establish a connection - DEPLOYMENT=$($OC get deployment -l "lagoon.sh/service=${SERVICE}" -o name) + DEPLOYMENT=$($KUBECTL get deployment -l "lagoon.sh/service=${SERVICE}" -o name) # If the deployment is scaled to 0, scale to 1 # .status.replicas doesn't exist on a scaled to 0 deployment in k8s so assume it is 0 if nothing is returned - if [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then + if [[ $($KUBECTL get ${DEPLOYMENT} -o json | jq -r '.status.replicas // 0') == "0" ]]; then echo "${UUID}: Attempting to scale deployment='${DEPLOYMENT}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 $OC scale --replicas=1 ${DEPLOYMENT} >/dev/null 2>&1 fi # Wait until the scaling is done SSH_CHECK_COUNTER=0 - until [[ $($OC get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] + until [[ $($KUBECTL get ${DEPLOYMENT} -o json | jq -r '.status.readyReplicas // 0') -ne "0" ]] do if [ $SSH_CHECK_COUNTER -lt $SSH_CHECK_TIMEOUT ]; then let SSH_CHECK_COUNTER=SSH_CHECK_COUNTER+1 @@ -220,7 +221,7 @@ fi # Check for newer Helm chart "lagoon.sh" labels echo "${UUID}: Getting pod name for exec for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 if [[ "${IS_KUBERNETES}" == "true" ]]; then - POD=$($OC get pods -l "lagoon.sh/service=${SERVICE}" -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') + POD=$($KUBECTL get pods -l "lagoon.sh/service=${SERVICE}" -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') else POD=$($OC get pods -l service=${SERVICE} -o json | jq -r '[.items[] | select(.metadata.deletionTimestamp == null) | select(.status.phase == "Running")] | first | .metadata.name // empty') fi @@ -232,7 +233,11 @@ fi # If no container defined, load the name of the first container if [[ -z ${CONTAINER} ]]; then - CONTAINER=$($OC get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name') + if [[ "${IS_KUBERNETES}" == "true" ]]; then + CONTAINER=$($KUBECTL get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name') + else + CONTAINER=$($OC get pod ${POD} -o json | jq --raw-output '.spec.containers[0].name') + fi fi if [ -t 1 ]; then @@ -242,8 +247,18 @@ else fi echo "${UUID}: Exec to pod='${POD}' for project='${PROJECT}' cluster='${CLUSTER_NAME}' service='${SERVICE}'" >> /proc/1/fd/1 -if [[ -z "$*" ]]; then - exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh + +if [[ "${IS_KUBERNETES}" == "true" ]]; then + if [[ -z "$*" ]]; then + exec $KUBECTL exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh + else + exec $KUBECTL exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*" + fi else - exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*" -fi + if [[ -z "$*" ]]; then + exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh + else + exec $OC exec ${POD} -c ${CONTAINER} -i ${TTY_PARAMETER} -- sh -c "$*" + fi + +fi \ No newline at end of file