From e4d07ba997443fb47cebdcaa1a34f9b9c3c22ac7 Mon Sep 17 00:00:00 2001 From: Niall Thomson Date: Fri, 22 Dec 2023 21:51:19 -0700 Subject: [PATCH] feat: Improved logging for prepare-environment (#780) --- lab/bin/delete-all-and-wait-if-crd-exists | 2 +- lab/bin/delete-all-if-crd-exists | 2 +- lab/bin/delete-nodegroup | 4 +- lab/bin/reset-environment | 79 +++++++++++++------ lab/bin/uninstall-helm-chart | 6 +- .../aiml/inferentia/.workshop/cleanup.sh | 10 +-- .../controlplanes/ack/.workshop/cleanup.sh | 6 +- .../crossplane/.workshop/cleanup.sh | 12 +-- .../gitops/argocd/.workshop/cleanup.sh | 2 +- .../gitops/flux/.workshop/cleanup.sh | 6 +- .../compute/karpenter/.workshop/cleanup.sh | 6 +- .../workloads/cpa/.workshop/cleanup.sh | 4 +- .../fundamentals/fargate/.workshop/cleanup.sh | 4 +- .../storage/ebs/.workshop/cleanup.sh | 8 +- .../storage/efs/.workshop/cleanup.sh | 4 +- .../custom-networking/.workshop/cleanup.sh | 14 ++-- .../network-policies/.workshop/cleanup.sh | 2 +- .../.workshop/cleanup.sh | 6 +- .../vpc-lattice/.workshop/cleanup.sh | 28 +++---- .../container-insights/.workshop/cleanup.sh | 4 +- .../logging/cluster/.workshop/cleanup.sh | 6 +- .../opensearch/.workshop/cleanup.sh | 6 +- .../oss-metrics/.workshop/cleanup.sh | 4 +- .../sealed-secrets/.workshop/cleanup.sh | 4 +- .../secrets-manager/.workshop/cleanup.sh | 2 +- 25 files changed, 130 insertions(+), 101 deletions(-) diff --git a/lab/bin/delete-all-and-wait-if-crd-exists b/lab/bin/delete-all-and-wait-if-crd-exists index 7eccfef49..db9c587e9 100644 --- a/lab/bin/delete-all-and-wait-if-crd-exists +++ b/lab/bin/delete-all-and-wait-if-crd-exists @@ -5,7 +5,7 @@ set -e crd=$1 if [ -z "$crd" ]; then - echo "Error: You must provide a CRD" + >&2 echo "Error: You must provide a CRD" exit 1 fi diff --git a/lab/bin/delete-all-if-crd-exists b/lab/bin/delete-all-if-crd-exists index 847f29f0c..454fa28c4 100644 --- a/lab/bin/delete-all-if-crd-exists +++ b/lab/bin/delete-all-if-crd-exists @@ -5,7 +5,7 @@ set -e crd=$1 if [ -z "$crd" ]; then - echo "Error: You must provide a CRD" + >&2 echo "Error: You must provide a CRD" exit 1 fi diff --git a/lab/bin/delete-nodegroup b/lab/bin/delete-nodegroup index 7a9a37aa2..bf3f6d567 100644 --- a/lab/bin/delete-nodegroup +++ b/lab/bin/delete-nodegroup @@ -4,14 +4,14 @@ nodegroup=$1 is_eksctl=$2 if [ -z "$nodegroup" ]; then - echo "You must provide a node group name" + >&2 echo "You must provide a node group name" exit 1 fi check=$(aws eks list-nodegroups --cluster-name $EKS_CLUSTER_NAME --query "nodegroups[? @ == '$nodegroup']" --output text) if [ ! -z "$check" ]; then - echo "Deleting node group $nodegroup..." + logmessage "Deleting node group $nodegroup..." if [ ! -z "$is_eksctl" ]; then eksctl delete nodegroup --cluster $EKS_CLUSTER_NAME --name $nodegroup > /dev/null diff --git a/lab/bin/reset-environment b/lab/bin/reset-environment index 2afb81a40..2efd44941 100644 --- a/lab/bin/reset-environment +++ b/lab/bin/reset-environment @@ -1,7 +1,26 @@ #!/bin/bash +mkdir -p /eks-workshop/logs +log_file=/eks-workshop/logs/action-$(date +%s).log + +exec 7>&1 + +logmessage() { + echo "$@" >&7 + echo "$@" >&1 +} +export -f logmessage + +if [ -z "${DEV_MODE}" ]; then + # Redirection for logging + exec >$log_file 2> >(tee >(cat >&7)) +else + # Log the commands in dev mode + set -o xtrace +fi + if [ -z "$EKS_CLUSTER_NAME" ]; then - echo "Error: The EKS_CLUSTER_NAME environment variable must be set. Please run 'use-cluster '" + logmessage "Error: The EKS_CLUSTER_NAME environment variable must be set. Please run 'use-cluster '" exit 1 fi @@ -12,6 +31,15 @@ manifests_path="/eks-workshop/manifests" base_path="$manifests_path/base-application" set -Eeuo pipefail +trap 'catch $? $LINENO' EXIT + +catch() { + if [ "$1" != "0" ]; then + logmessage "An error occurred, please contact your workshop proctor or raise an issue at https://github.com/aws-samples/eks-workshop-v2/issues" + logmessage "The full log can be found here: $log_file" + fi + exec 3<&- +} mkdir -p /eks-workshop @@ -24,12 +52,12 @@ REPOSITORY_REF=${REPOSITORY_REF:-""} if [ ! -z "${REPOSITORY_REF}" ]; then rm -rf $repository_path - echo "Refreshing copy of workshop repository from GitHub..." + logmessage "Refreshing copy of workshop repository from GitHub..." - git clone --quiet https://github.com/$REPOSITORY_OWNER/$REPOSITORY_NAME.git $repository_path > /dev/null - (cd $repository_path && git checkout --quiet "${REPOSITORY_REF}" > /dev/null) + git clone --quiet https://github.com/$REPOSITORY_OWNER/$REPOSITORY_NAME.git $repository_path + (cd $repository_path && git checkout --quiet "${REPOSITORY_REF}") - echo "" + logmessage "" cp -R $repository_path/manifests $manifests_path elif [ -d "/manifests" ]; then @@ -44,15 +72,16 @@ if [ ! -z "$module" ]; then fi fi -echo "Resetting the environment, please wait" +logmessage "Resetting the environment..." +logmessage "Tip: Read the rest of the lab introduction while you wait!" if [ -f "/eks-workshop/hooks/cleanup.sh" ]; then bash /eks-workshop/hooks/cleanup.sh fi -kubectl delete pod load-generator --ignore-not-found > /dev/null +kubectl delete pod load-generator --ignore-not-found -kubectl delete namespace other --ignore-not-found > /dev/null +kubectl delete namespace other --ignore-not-found kubectl apply -k $base_path --prune --all \ --prune-allowlist=autoscaling/v1/HorizontalPodAutoscaler \ @@ -64,14 +93,14 @@ kubectl apply -k $base_path --prune --all \ --prune-allowlist=core/v1/Secret \ --prune-allowlist=core/v1/PersistentVolumeClaim \ --prune-allowlist=scheduling.k8s.io/v1/PriorityClass \ - --prune-allowlist=networking.k8s.io/v1/Ingress > /dev/null + --prune-allowlist=networking.k8s.io/v1/Ingress -echo "Waiting for application to become ready..." +logmessage "Waiting for application to become ready..." sleep 10 -kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null -kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null +kubectl wait --for=condition=available --timeout=240s deployments -l app.kubernetes.io/created-by=eks-workshop -A +kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A # Addons mkdir -p /eks-workshop/terraform @@ -81,12 +110,12 @@ export TF_VAR_eks_cluster_id="$EKS_CLUSTER_NAME" RESOURCES_PRECREATED=${RESOURCES_PRECREATED:-""} -echo "Cleaning up previous lab infrastructure..." +logmessage "Cleaning up previous lab infrastructure..." tf_dir=$(realpath --relative-to="$PWD" '/eks-workshop/terraform') -terraform -chdir="$tf_dir" init -upgrade > /tmp/terraform-destroy-init.log -terraform -chdir="$tf_dir" destroy --auto-approve > /tmp/terraform-destroy.log +terraform -chdir="$tf_dir" init -upgrade +terraform -chdir="$tf_dir" destroy --auto-approve rm -rf /eks-workshop/terraform/addon*.tf @@ -101,7 +130,7 @@ if [ ! -z "$module" ]; then fi if [ -f "$module_path/.workshop/terraform/addon.tf" ]; then - echo "Creating infrastructure for next lab..." + logmessage "Creating infrastructure for next lab..." cp -R $module_path/.workshop/terraform/* /eks-workshop/terraform @@ -109,12 +138,12 @@ if [ ! -z "$module" ]; then rm -f /eks-workshop/terraform/addon_infrastructure.tf fi - terraform -chdir="$tf_dir" init -upgrade > /tmp/terraform-apply-init.log - terraform -chdir="$tf_dir" apply -refresh=false --auto-approve > /tmp/terraform-apply.log + terraform -chdir="$tf_dir" init -upgrade + terraform -chdir="$tf_dir" apply -refresh=false --auto-approve fi if [ -d "$module_path/.workshop/manifests" ]; then - kubectl apply -k "$module_path/.workshop/manifests" > /dev/null + kubectl apply -k "$module_path/.workshop/manifests" fi fi @@ -126,10 +155,10 @@ expected_size_config="$EKS_DEFAULT_MNG_MIN $EKS_DEFAULT_MNG_MAX $EKS_DEFAULT_MNG mng_size_config=$(aws eks describe-nodegroup --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME | jq -r '.nodegroup.scalingConfig | "\(.minSize) \(.maxSize) \(.desiredSize)"') if [[ "$mng_size_config" != "$expected_size_config" ]]; then - echo "Setting EKS Node Group back to initial sizing..." + logmessage "Setting EKS Node Group back to initial sizing..." aws eks update-nodegroup-config --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME \ - --scaling-config desiredSize=$EKS_DEFAULT_MNG_DESIRED,minSize=$EKS_DEFAULT_MNG_MIN,maxSize=$EKS_DEFAULT_MNG_MAX > /dev/null + --scaling-config desiredSize=$EKS_DEFAULT_MNG_DESIRED,minSize=$EKS_DEFAULT_MNG_MIN,maxSize=$EKS_DEFAULT_MNG_MAX aws eks wait nodegroup-active --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_DEFAULT_MNG_NAME sleep 10 @@ -138,7 +167,7 @@ fi asg_size_config=$(aws autoscaling describe-auto-scaling-groups --filters "Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME" "Name=tag:eks:cluster-name,Values=$EKS_CLUSTER_NAME" | jq -r '.AutoScalingGroups[0] | "\(.MinSize) \(.MaxSize) \(.DesiredCapacity)"') if [[ "$asg_size_config" != "$expected_size_config" ]]; then - echo "Setting ASG back to initial sizing..." + logmessage "Setting ASG back to initial sizing..." export ASG_NAME=$(aws autoscaling describe-auto-scaling-groups --filters "Name=tag:eks:nodegroup-name,Values=$EKS_DEFAULT_MNG_NAME" "Name=tag:eks:cluster-name,Values=$EKS_CLUSTER_NAME" --query "AutoScalingGroups[0].AutoScalingGroupName" --output text) aws autoscaling update-auto-scaling-group \ @@ -161,9 +190,9 @@ if [ $EXIT_CODE -ne 0 ]; then fi # Recycle workload pods in case stateful pods got restarted -kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A > /dev/null +kubectl delete pod -l app.kubernetes.io/created-by=eks-workshop -l app.kubernetes.io/component=service -A -kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A > /dev/null +kubectl wait --for=condition=Ready --timeout=240s pods -l app.kubernetes.io/created-by=eks-workshop -A # Finished -echo 'Environment is ready' +logmessage 'Environment is ready' diff --git a/lab/bin/uninstall-helm-chart b/lab/bin/uninstall-helm-chart index 94378dba7..da5d21535 100644 --- a/lab/bin/uninstall-helm-chart +++ b/lab/bin/uninstall-helm-chart @@ -6,19 +6,19 @@ release=$1 namespace=$2 if [ -z "$release" ]; then - echo "You must provide a release name" + >&2 echo "You must provide a release name" exit 1 fi if [ -z "$namespace" ]; then - echo "You must provide a namespace" + >&2 echo "You must provide a namespace" exit 1 fi check=$(helm ls --filter "$release" -n "$namespace" --no-headers) if [ ! -z "$check" ]; then - echo "Uninstalling helm chart $release..." + logmessage "Uninstalling helm chart $release..." helm uninstall $release -n $namespace --wait > /dev/null fi \ No newline at end of file diff --git a/manifests/modules/aiml/inferentia/.workshop/cleanup.sh b/manifests/modules/aiml/inferentia/.workshop/cleanup.sh index 8be6b836d..03e67944e 100644 --- a/manifests/modules/aiml/inferentia/.workshop/cleanup.sh +++ b/manifests/modules/aiml/inferentia/.workshop/cleanup.sh @@ -2,16 +2,16 @@ set -e -echo "Deleting AIML resources..." +logmessage "Deleting AIML resources..." -kubectl delete namespace aiml --ignore-not-found > /dev/null +kubectl delete namespace aiml --ignore-not-found -echo "Deleting Karpenter NodePool and EC2NodeClass..." +logmessage "Deleting Karpenter NodePool and EC2NodeClass..." delete-all-if-crd-exists nodepools.karpenter.sh delete-all-if-crd-exists ec2nodeclasses.karpenter.k8s.aws -echo "Waiting for Karpenter nodes to be removed..." +logmessage "Waiting for Karpenter nodes to be removed..." EXIT_CODE=0 @@ -21,5 +21,5 @@ timeout --foreground -s TERM 30 bash -c \ done' || EXIT_CODE=$? if [ $EXIT_CODE -ne 0 ]; then - echo "Warning: Karpenter nodes did not clean up" + logmessage "Warning: Karpenter nodes did not clean up" fi \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/ack/.workshop/cleanup.sh b/manifests/modules/automation/controlplanes/ack/.workshop/cleanup.sh index 5d6be25dc..7ee4629b4 100755 --- a/manifests/modules/automation/controlplanes/ack/.workshop/cleanup.sh +++ b/manifests/modules/automation/controlplanes/ack/.workshop/cleanup.sh @@ -1,6 +1,6 @@ #!/bin/bash -echo "Deleting resources created by ACK..." +logmessage "Deleting resources created by ACK..." -eksctl delete iamserviceaccount --name carts-ack --namespace carts --cluster $EKS_CLUSTER_NAME -v 0 > /dev/null -kubectl delete table items -n carts --ignore-not-found=true > /dev/null \ No newline at end of file +eksctl delete iamserviceaccount --name carts-ack --namespace carts --cluster $EKS_CLUSTER_NAME -v 0 +kubectl delete table items -n carts --ignore-not-found=true \ No newline at end of file diff --git a/manifests/modules/automation/controlplanes/crossplane/.workshop/cleanup.sh b/manifests/modules/automation/controlplanes/crossplane/.workshop/cleanup.sh index d731d9dc6..90a65222e 100755 --- a/manifests/modules/automation/controlplanes/crossplane/.workshop/cleanup.sh +++ b/manifests/modules/automation/controlplanes/crossplane/.workshop/cleanup.sh @@ -1,15 +1,15 @@ #!/bin/bash -echo "Deleting resources created by Crossplane..." +logmessage "Deleting resources created by Crossplane..." delete-all-and-wait-if-crd-exists dynamodbtables.awsblueprints.io -kubectl delete tables.dynamodb.aws.upbound.io --all --ignore-not-found=true > /dev/null +kubectl delete tables.dynamodb.aws.upbound.io --all --ignore-not-found=true -kubectl wait --for=delete tables.dynamodb.aws.upbound.io --all --timeout=600s > /dev/null +kubectl wait --for=delete tables.dynamodb.aws.upbound.io --all --timeout=600s -kubectl delete -k /eks-workshop/manifests/modules/automation/controlplanes/crossplane/compositions/composition --ignore-not-found=true > /dev/null +kubectl delete -k /eks-workshop/manifests/modules/automation/controlplanes/crossplane/compositions/composition --ignore-not-found=true -kubectl wait --for=delete composition table.dynamodb.awsblueprints.io --timeout=600s > /dev/null +kubectl wait --for=delete composition table.dynamodb.awsblueprints.io --timeout=600s -eksctl delete iamserviceaccount --name carts-crossplane --namespace carts --cluster $EKS_CLUSTER_NAME -v 0 > /dev/null \ No newline at end of file +eksctl delete iamserviceaccount --name carts-crossplane --namespace carts --cluster $EKS_CLUSTER_NAME -v 0 \ No newline at end of file diff --git a/manifests/modules/automation/gitops/argocd/.workshop/cleanup.sh b/manifests/modules/automation/gitops/argocd/.workshop/cleanup.sh index 074349a2e..e1a318ea8 100644 --- a/manifests/modules/automation/gitops/argocd/.workshop/cleanup.sh +++ b/manifests/modules/automation/gitops/argocd/.workshop/cleanup.sh @@ -2,7 +2,7 @@ set -e -echo "Deleting ArgoCD applications..." +logmessage "Deleting ArgoCD applications..." delete-all-and-wait-if-crd-exists applications.argoproj.io diff --git a/manifests/modules/automation/gitops/flux/.workshop/cleanup.sh b/manifests/modules/automation/gitops/flux/.workshop/cleanup.sh index 0a6cdc1c9..e9ce42e8e 100644 --- a/manifests/modules/automation/gitops/flux/.workshop/cleanup.sh +++ b/manifests/modules/automation/gitops/flux/.workshop/cleanup.sh @@ -2,10 +2,10 @@ set -e -echo "Uninstalling flux" +logmessage "Uninstalling flux" -flux uninstall --silent > /dev/null +flux uninstall --silent -kubectl delete namespace ui > /dev/null +kubectl delete namespace ui rm -rf ~/environment/flux \ No newline at end of file diff --git a/manifests/modules/autoscaling/compute/karpenter/.workshop/cleanup.sh b/manifests/modules/autoscaling/compute/karpenter/.workshop/cleanup.sh index fcb53440a..8b2960156 100644 --- a/manifests/modules/autoscaling/compute/karpenter/.workshop/cleanup.sh +++ b/manifests/modules/autoscaling/compute/karpenter/.workshop/cleanup.sh @@ -2,12 +2,12 @@ set -e -echo "Deleting Karpenter NodePool and EC2NodeClass..." +logmessage "Deleting Karpenter NodePool and EC2NodeClass..." delete-all-if-crd-exists nodepools.karpenter.sh delete-all-if-crd-exists ec2nodeclasses.karpenter.k8s.aws -echo "Waiting for Karpenter nodes to be removed..." +logmessage "Waiting for Karpenter nodes to be removed..." EXIT_CODE=0 @@ -17,5 +17,5 @@ timeout --foreground -s TERM 30 bash -c \ done' || EXIT_CODE=$? if [ $EXIT_CODE -ne 0 ]; then - echo "Warning: Karpenter nodes did not clean up" + logmessage "Warning: Karpenter nodes did not clean up" fi diff --git a/manifests/modules/autoscaling/workloads/cpa/.workshop/cleanup.sh b/manifests/modules/autoscaling/workloads/cpa/.workshop/cleanup.sh index a67b33741..5a87b236a 100644 --- a/manifests/modules/autoscaling/workloads/cpa/.workshop/cleanup.sh +++ b/manifests/modules/autoscaling/workloads/cpa/.workshop/cleanup.sh @@ -2,6 +2,6 @@ set -e -echo "Resetting CoreDNS replicas..." +logmessage "Resetting CoreDNS replicas..." -kubectl -n kube-system scale deployment/coredns --replicas=2 > /dev/null \ No newline at end of file +kubectl -n kube-system scale deployment/coredns --replicas=2 \ No newline at end of file diff --git a/manifests/modules/fundamentals/fargate/.workshop/cleanup.sh b/manifests/modules/fundamentals/fargate/.workshop/cleanup.sh index 99a4faa71..ded017cdb 100644 --- a/manifests/modules/fundamentals/fargate/.workshop/cleanup.sh +++ b/manifests/modules/fundamentals/fargate/.workshop/cleanup.sh @@ -1,7 +1,7 @@ check=$(aws eks list-fargate-profiles --cluster-name $EKS_CLUSTER_NAME --query "fargateProfileNames[? @ == 'checkout-profile']" --output text) if [ ! -z "$check" ]; then - echo "Deleting Fargate profile..." + logmessage "Deleting Fargate profile..." - aws eks delete-fargate-profile --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --fargate-profile-name checkout-profile > /dev/null + aws eks delete-fargate-profile --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --fargate-profile-name checkout-profile fi \ No newline at end of file diff --git a/manifests/modules/fundamentals/storage/ebs/.workshop/cleanup.sh b/manifests/modules/fundamentals/storage/ebs/.workshop/cleanup.sh index de833c1d8..7d25ad54b 100644 --- a/manifests/modules/fundamentals/storage/ebs/.workshop/cleanup.sh +++ b/manifests/modules/fundamentals/storage/ebs/.workshop/cleanup.sh @@ -4,12 +4,12 @@ set -e check=$(aws eks list-addons --cluster-name $EKS_CLUSTER_NAME --query "addons[? @ == 'aws-ebs-csi-driver']" --output text) -kubectl delete namespace catalog --wait --ignore-not-found > /dev/null +kubectl delete namespace catalog --wait --ignore-not-found if [ ! -z "$check" ]; then - echo "Deleting EBS CSI driver addon..." + logmessage "Deleting EBS CSI driver addon..." - aws eks delete-addon --cluster-name $EKS_CLUSTER_NAME --addon-name aws-ebs-csi-driver > /dev/null + aws eks delete-addon --cluster-name $EKS_CLUSTER_NAME --addon-name aws-ebs-csi-driver - aws eks wait addon-deleted --cluster-name $EKS_CLUSTER_NAME --addon-name aws-ebs-csi-driver > /dev/null + aws eks wait addon-deleted --cluster-name $EKS_CLUSTER_NAME --addon-name aws-ebs-csi-driver fi \ No newline at end of file diff --git a/manifests/modules/fundamentals/storage/efs/.workshop/cleanup.sh b/manifests/modules/fundamentals/storage/efs/.workshop/cleanup.sh index d1844db22..285b0305b 100644 --- a/manifests/modules/fundamentals/storage/efs/.workshop/cleanup.sh +++ b/manifests/modules/fundamentals/storage/efs/.workshop/cleanup.sh @@ -2,6 +2,6 @@ set -e -echo "Deleting EFS storage class..." +logmessage "Deleting EFS storage class..." -kubectl delete storageclass efs-sc --ignore-not-found > /dev/null +kubectl delete storageclass efs-sc --ignore-not-found diff --git a/manifests/modules/networking/custom-networking/.workshop/cleanup.sh b/manifests/modules/networking/custom-networking/.workshop/cleanup.sh index d1de9f17b..a045798bb 100644 --- a/manifests/modules/networking/custom-networking/.workshop/cleanup.sh +++ b/manifests/modules/networking/custom-networking/.workshop/cleanup.sh @@ -2,23 +2,23 @@ set -e -echo "Deleting ENI configs..." +logmessage "Deleting ENI configs..." -kubectl delete ENIConfig --all -A > /dev/null +kubectl delete ENIConfig --all -A sleep 10 -echo "Resetting VPC CNI configuration..." +logmessage "Resetting VPC CNI configuration..." -kubectl set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=false > /dev/null +kubectl set env daemonset aws-node -n kube-system AWS_VPC_K8S_CNI_CUSTOM_NETWORK_CFG=false sleep 10 custom_nodegroup=$(aws eks list-nodegroups --cluster-name $EKS_CLUSTER_NAME --query "nodegroups[? @ == 'custom-networking']" --output text) if [ ! -z "$custom_nodegroup" ]; then - echo "Deleting custom networking node group..." + logmessage "Deleting custom networking node group..." - aws eks delete-nodegroup --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --nodegroup-name custom-networking > /dev/null - aws eks wait nodegroup-deleted --cluster-name $EKS_CLUSTER_NAME --nodegroup-name custom-networking > /dev/null + aws eks delete-nodegroup --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --nodegroup-name custom-networking + aws eks wait nodegroup-deleted --cluster-name $EKS_CLUSTER_NAME --nodegroup-name custom-networking fi diff --git a/manifests/modules/networking/network-policies/.workshop/cleanup.sh b/manifests/modules/networking/network-policies/.workshop/cleanup.sh index cedfb4b4f..0bb7c2ac7 100644 --- a/manifests/modules/networking/network-policies/.workshop/cleanup.sh +++ b/manifests/modules/networking/network-policies/.workshop/cleanup.sh @@ -1,3 +1,3 @@ #!/bin/bash -kubectl delete networkpolicy -A --all > /dev/null \ No newline at end of file +kubectl delete networkpolicy -A --all \ No newline at end of file diff --git a/manifests/modules/networking/securitygroups-for-pods/.workshop/cleanup.sh b/manifests/modules/networking/securitygroups-for-pods/.workshop/cleanup.sh index 297e359e7..fa170b961 100644 --- a/manifests/modules/networking/securitygroups-for-pods/.workshop/cleanup.sh +++ b/manifests/modules/networking/securitygroups-for-pods/.workshop/cleanup.sh @@ -2,11 +2,11 @@ set -e -echo "Deleting Security Group policies..." +logmessage "Deleting Security Group policies..." -kubectl delete SecurityGroupPolicy --all -A > /dev/null +kubectl delete SecurityGroupPolicy --all -A sleep 5 # Clear the catalog pods so the SG can be deleted -kubectl rollout restart -n catalog deployment/catalog > /dev/null \ No newline at end of file +kubectl rollout restart -n catalog deployment/catalog \ No newline at end of file diff --git a/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh b/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh index 9fcdb2ed9..73867acfb 100644 --- a/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh +++ b/manifests/modules/networking/vpc-lattice/.workshop/cleanup.sh @@ -2,17 +2,17 @@ set -e -echo "Deleting VPC Lattice routes and gateway..." +logmessage "Deleting VPC Lattice routes and gateway..." -kubectl delete namespace checkoutv2 --ignore-not-found > /dev/null +kubectl delete namespace checkoutv2 --ignore-not-found delete-all-if-crd-exists targetgrouppolicies.application-networking.k8s.aws -kubectl delete -f ~/environment/eks-workshop/modules/networking/vpc-lattice/routes --ignore-not-found > /dev/null -cat ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml | envsubst | kubectl delete --ignore-not-found -f - > /dev/null -kubectl delete -f ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/gatewayclass.yaml --ignore-not-found > /dev/null +kubectl delete -f ~/environment/eks-workshop/modules/networking/vpc-lattice/routes --ignore-not-found +cat ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/eks-workshop-gw.yaml | envsubst | kubectl delete --ignore-not-found -f - +kubectl delete -f ~/environment/eks-workshop/modules/networking/vpc-lattice/controller/gatewayclass.yaml --ignore-not-found -echo "Waiting for VPC Lattice target groups to be deleted..." +logmessage "Waiting for VPC Lattice target groups to be deleted..." timeout -s TERM 300 bash -c \ 'while [[ ! -z "$(aws vpc-lattice list-target-groups --output text | grep 'checkout' || true)" ]];\ @@ -22,9 +22,9 @@ timeout -s TERM 300 bash -c \ helm_check=$(helm ls -A | grep 'gateway-api-controller' || true) if [ ! -z "$helm_check" ]; then - echo "Uninstalling Gateway API Controller helm chart..." + logmessage "Uninstalling Gateway API Controller helm chart..." - helm delete gateway-api-controller --namespace gateway-api-controller > /dev/null + helm delete gateway-api-controller --namespace gateway-api-controller fi CLUSTER_SG=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --output json| jq -r '.cluster.resourcesVpcConfig.clusterSecurityGroupId') @@ -35,24 +35,24 @@ PREFIX_LIST_ID_IPV6=$(aws ec2 describe-managed-prefix-lists --query "PrefixLists ipv4_sg_check=$(aws ec2 describe-security-group-rules --filters Name="group-id",Values="$CLUSTER_SG" --query "SecurityGroupRules[?PrefixListId=='$PREFIX_LIST_ID'].SecurityGroupRuleId" --output text) if [ ! -z "$ipv4_sg_check" ]; then - aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions "PrefixListIds=[{PrefixListId=${PREFIX_LIST_ID}}],IpProtocol=-1" > /dev/null + aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions "PrefixListIds=[{PrefixListId=${PREFIX_LIST_ID}}],IpProtocol=-1" fi ipv6_sg_check=$(aws ec2 describe-security-group-rules --filters Name="group-id",Values="$CLUSTER_SG" --query "SecurityGroupRules[?PrefixListId=='$PREFIX_LIST_ID_IPV6'].SecurityGroupRuleId" --output text) if [ ! -z "$ipv6_sg_check" ]; then - aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions "PrefixListIds=[{PrefixListId=${PREFIX_LIST_ID_IPV6}}],IpProtocol=-1" > /dev/null + aws ec2 revoke-security-group-ingress --group-id $CLUSTER_SG --ip-permissions "PrefixListIds=[{PrefixListId=${PREFIX_LIST_ID_IPV6}}],IpProtocol=-1" fi service_network=$(aws vpc-lattice list-service-networks --query "items[?name=="\'$EKS_CLUSTER_NAME\'"].id" | jq -r '.[]') if [ ! -z "$service_network" ]; then association_id=$(aws vpc-lattice list-service-network-vpc-associations --service-network-identifier $service_network --vpc-identifier $VPC_ID --query 'items[].id' | jq -r '.[]') if [ ! -z "$association_id" ]; then - echo "Deleting Lattice VPC association..." - aws vpc-lattice delete-service-network-vpc-association --service-network-vpc-association-identifier $association_id > /dev/null + logmessage "Deleting Lattice VPC association..." + aws vpc-lattice delete-service-network-vpc-association --service-network-vpc-association-identifier $association_id sleep 30 # Todo replace with wait fi - echo "Deleting Lattice service network..." - aws vpc-lattice delete-service-network --service-network-identifier $service_network > /dev/null + logmessage "Deleting Lattice service network..." + aws vpc-lattice delete-service-network --service-network-identifier $service_network fi \ No newline at end of file diff --git a/manifests/modules/observability/container-insights/.workshop/cleanup.sh b/manifests/modules/observability/container-insights/.workshop/cleanup.sh index dfc7ddda3..4d1c1a89c 100644 --- a/manifests/modules/observability/container-insights/.workshop/cleanup.sh +++ b/manifests/modules/observability/container-insights/.workshop/cleanup.sh @@ -2,8 +2,8 @@ set -e -echo "Deleting OpenTelemetry collectors..." +logmessage "Deleting OpenTelemetry collectors..." delete-all-if-crd-exists opentelemetrycollectors.opentelemetry.io -kubectl delete -n other pod load-generator --ignore-not-found > /dev/null \ No newline at end of file +kubectl delete -n other pod load-generator --ignore-not-found \ No newline at end of file diff --git a/manifests/modules/observability/logging/cluster/.workshop/cleanup.sh b/manifests/modules/observability/logging/cluster/.workshop/cleanup.sh index 945ea0791..f19eb8139 100644 --- a/manifests/modules/observability/logging/cluster/.workshop/cleanup.sh +++ b/manifests/modules/observability/logging/cluster/.workshop/cleanup.sh @@ -2,11 +2,11 @@ set -e -echo "Disabling EKS control plane logs..." +logmessage "Disabling EKS control plane logs..." aws eks update-cluster-config \ --region $AWS_REGION \ --name $EKS_CLUSTER_NAME \ - --logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":false}]}' || true > /dev/null + --logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":false}]}' || true -aws eks wait cluster-active --name $EKS_CLUSTER_NAME > /dev/null \ No newline at end of file +aws eks wait cluster-active --name $EKS_CLUSTER_NAME \ No newline at end of file diff --git a/manifests/modules/observability/opensearch/.workshop/cleanup.sh b/manifests/modules/observability/opensearch/.workshop/cleanup.sh index 416bac0b1..ce1e851e4 100644 --- a/manifests/modules/observability/opensearch/.workshop/cleanup.sh +++ b/manifests/modules/observability/opensearch/.workshop/cleanup.sh @@ -1,9 +1,9 @@ #!/bin/bash -echo "Deleting OpenSearch exporter and test workloads..." +logmessage "Deleting OpenSearch exporter and test workloads..." uninstall-helm-chart events-to-opensearch opensearch-exporter uninstall-helm-chart fluentbit opensearch-exporter -kubectl delete ns opensearch-exporter --ignore-not-found > /dev/null -kubectl delete ns test --ignore-not-found > /dev/null \ No newline at end of file +kubectl delete ns opensearch-exporter --ignore-not-found +kubectl delete ns test --ignore-not-found \ No newline at end of file diff --git a/manifests/modules/observability/oss-metrics/.workshop/cleanup.sh b/manifests/modules/observability/oss-metrics/.workshop/cleanup.sh index dfc7ddda3..4d1c1a89c 100644 --- a/manifests/modules/observability/oss-metrics/.workshop/cleanup.sh +++ b/manifests/modules/observability/oss-metrics/.workshop/cleanup.sh @@ -2,8 +2,8 @@ set -e -echo "Deleting OpenTelemetry collectors..." +logmessage "Deleting OpenTelemetry collectors..." delete-all-if-crd-exists opentelemetrycollectors.opentelemetry.io -kubectl delete -n other pod load-generator --ignore-not-found > /dev/null \ No newline at end of file +kubectl delete -n other pod load-generator --ignore-not-found \ No newline at end of file diff --git a/manifests/modules/security/sealed-secrets/.workshop/cleanup.sh b/manifests/modules/security/sealed-secrets/.workshop/cleanup.sh index 002a01665..8aced3f79 100644 --- a/manifests/modules/security/sealed-secrets/.workshop/cleanup.sh +++ b/manifests/modules/security/sealed-secrets/.workshop/cleanup.sh @@ -2,6 +2,6 @@ set -e -kubectl delete -f https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.18.0/controller.yaml --ignore-not-found > /dev/null +kubectl delete -f https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.18.0/controller.yaml --ignore-not-found -kubectl delete -n catalog secret catalog-sealed-db --ignore-not-found > /dev/null \ No newline at end of file +kubectl delete -n catalog secret catalog-sealed-db --ignore-not-found \ No newline at end of file diff --git a/manifests/modules/security/secrets-manager/.workshop/cleanup.sh b/manifests/modules/security/secrets-manager/.workshop/cleanup.sh index 52cdb4175..8932d4fb4 100644 --- a/manifests/modules/security/secrets-manager/.workshop/cleanup.sh +++ b/manifests/modules/security/secrets-manager/.workshop/cleanup.sh @@ -13,6 +13,6 @@ delete-all-if-crd-exists externalsecrets.external-secrets.io check=$(aws secretsmanager list-secrets --filters Key="name",Values="${SECRET_NAME}" --output text) if [ ! -z "$check" ]; then - echo "Deleting Secrets Manager data..." + logmessage "Deleting Secrets Manager data..." aws secretsmanager delete-secret --secret-id ${SECRET_NAME} fi \ No newline at end of file