Skip to content

Commit

Permalink
move restic removal to normal install codepath (#4804)
Browse files Browse the repository at this point in the history
* move restic removal to normal install codepath

* rookver

* rookver12

* registry

* cpu

* add timeouts increase cpu

* limit testing again

* ensure latest version still works

* wait for the deployment to be rolled out before continuing

* restore all tests
  • Loading branch information
laverya authored Sep 5, 2023
1 parent 93e17dc commit f9a73bb
Show file tree
Hide file tree
Showing 3 changed files with 78 additions and 27 deletions.
20 changes: 11 additions & 9 deletions addons/velero/1.11.1/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ function velero() {

velero_change_storageclass "$src" "$dst"

# Remove restic resources since they've been replaced by node agent
kubectl delete daemonset -n "$VELERO_NAMESPACE" restic --ignore-not-found
kubectl delete secret -n "$VELERO_NAMESPACE" velero-restic-credentials --ignore-not-found
kubectl delete crd resticrepositories.velero.io --ignore-not-found

# If we already migrated, or we on a new install that has the disableS3 flag set, we need a PVC attached
if kubernetes_resource_exists "$VELERO_NAMESPACE" pvc velero-internal-snapshots || [ "$KOTSADM_DISABLE_S3" == "1" ]; then
velero_patch_internal_pvc_snapshots "$src" "$dst"
Expand Down Expand Up @@ -80,6 +85,8 @@ function velero() {
velero_pv_name=$(kubectl get pvc velero-internal-snapshots -n ${VELERO_NAMESPACE} -ojsonpath='{.spec.volumeName}')
kubectl patch pv "$velero_pv_name" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
fi

spinner_until 120 deployment_fully_updated velero velero
}

function velero_join() {
Expand All @@ -88,7 +95,7 @@ function velero_join() {
}

function velero_host_init() {
velero_install_nfs_utils_if_missing
velero_install_nfs_utils_if_missing
}

function velero_install_nfs_utils_if_missing() {
Expand Down Expand Up @@ -159,7 +166,7 @@ function velero_install() {
--namespace $VELERO_NAMESPACE \
--plugins velero/velero-plugin-for-aws:v1.7.1,velero/velero-plugin-for-gcp:v1.7.1,velero/velero-plugin-for-microsoft-azure:v1.7.1,replicated/local-volume-provider:v0.5.4,"$KURL_UTIL_IMAGE" \
--use-volume-snapshots=false \
--dry-run -o yaml > "$dst/velero.yaml"
--dry-run -o yaml > "$dst/velero.yaml"

rm -f velero-credentials
}
Expand All @@ -169,15 +176,15 @@ function velero_already_applied() {
local src="$DIR/addons/velero/$VELERO_VERSION"
local dst="$DIR/kustomize/velero"

# If we need to migrate, we're going to need to basically reconstruct the original install
# If we need to migrate, we're going to need to basically reconstruct the original install
# underneath the migration
if velero_should_migrate_from_object_store; then

render_yaml_file "$src/tmpl-kustomization.yaml" > "$dst/kustomization.yaml"

determine_velero_pvc_size

velero_binary
velero_binary
velero_install "$src" "$dst"
velero_patch_node_agent_privilege "$src" "$dst"
velero_patch_args "$src" "$dst"
Expand All @@ -196,11 +203,6 @@ function velero_already_applied() {
kubectl apply -k "$dst"
fi

# Remove restic resources since they've been replaced by node agent
kubectl delete daemonset -n "$VELERO_NAMESPACE" restic --ignore-not-found
kubectl delete secret -n "$VELERO_NAMESPACE" velero-restic-credentials --ignore-not-found
kubectl delete crd resticrepositories.velero.io --ignore-not-found

# Bail if the migration fails, preventing the original object store from being deleted
if velero_did_migrate_from_object_store; then
logWarn "Velero will migrate from object store to pvc"
Expand Down
12 changes: 7 additions & 5 deletions addons/velero/template/base/install.tmpl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ function velero() {

velero_change_storageclass "$src" "$dst"

# Remove restic resources since they've been replaced by node agent
kubectl delete daemonset -n "$VELERO_NAMESPACE" restic --ignore-not-found
kubectl delete secret -n "$VELERO_NAMESPACE" velero-restic-credentials --ignore-not-found
kubectl delete crd resticrepositories.velero.io --ignore-not-found

# If we already migrated, or we on a new install that has the disableS3 flag set, we need a PVC attached
if kubernetes_resource_exists "$VELERO_NAMESPACE" pvc velero-internal-snapshots || [ "$KOTSADM_DISABLE_S3" == "1" ]; then
velero_patch_internal_pvc_snapshots "$src" "$dst"
Expand Down Expand Up @@ -80,6 +85,8 @@ function velero() {
velero_pv_name=$(kubectl get pvc velero-internal-snapshots -n ${VELERO_NAMESPACE} -ojsonpath='{.spec.volumeName}')
kubectl patch pv "$velero_pv_name" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
fi

spinner_until 120 deployment_fully_updated velero velero
}

function velero_join() {
Expand Down Expand Up @@ -196,11 +203,6 @@ function velero_already_applied() {
kubectl apply -k "$dst"
fi

# Remove restic resources since they've been replaced by node agent
kubectl delete daemonset -n "$VELERO_NAMESPACE" restic --ignore-not-found
kubectl delete secret -n "$VELERO_NAMESPACE" velero-restic-credentials --ignore-not-found
kubectl delete crd resticrepositories.velero.io --ignore-not-found

# Bail if the migration fails, preventing the original object store from being deleted
if velero_did_migrate_from_object_store; then
logWarn "Velero will migrate from object store to pvc"
Expand Down
73 changes: 60 additions & 13 deletions addons/velero/template/testgrid/k8s-docker.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
- name: "Velero Minimal"
cpu: 6
installerSpec:
kubernetes:
version: "1.27.x"
flannel:
version: latest
rook:
version: "latest"
version: 1.12.x
containerd:
version: "latest"
kotsadm:
Expand All @@ -16,17 +17,18 @@
postInstallScript: |
source /opt/kurl-testgrid/testhelpers.sh
install_and_customize_kurl_integration_test_application
kubectl kots backup
kubectl kots backup ls
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
- name: "Velero Minimal Airgap"
cpu: 6
airgap: true
installerSpec:
kubernetes:
version: "latest"
flannel:
version: latest
rook:
version: "latest"
version: 1.12.x
containerd:
version: "latest"
kotsadm:
Expand All @@ -42,8 +44,8 @@
postInstallScript: |
source /opt/kurl-testgrid/testhelpers.sh
install_and_customize_kurl_integration_test_application
kubectl kots backup
kubectl kots backup ls
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
- name: "Velero DisableS3 - Rook"
installerSpec:
kubernetes:
Expand All @@ -63,8 +65,8 @@
postInstallScript: |
source /opt/kurl-testgrid/testhelpers.sh
install_and_customize_kurl_integration_test_application
kubectl kots backup
kubectl kots backup ls
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
- name: "Velero OpenEBS only"
installerSpec:
kubernetes:
Expand All @@ -90,7 +92,6 @@
flannel:
version: latest
rook:
isBlockStorageEnabled: true
version: 1.12.x
registry:
version: latest
Expand Down Expand Up @@ -126,9 +127,55 @@
postInstallScript: |
source /opt/kurl-testgrid/testhelpers.sh
install_and_customize_kurl_integration_test_application
kubectl kots backup
kubectl kots backup ls
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
postUpgradeScript: |
source /opt/kurl-testgrid/testhelpers.sh
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
- name: "Velero remove restic"
cpu: 8
installerSpec:
kubernetes:
version: "1.27.x"
flannel:
version: latest
rook:
version: 1.12.x
containerd:
version: "latest"
kotsadm:
version: latest
velero:
version: "1.9.x"
upgradeSpec:
kubernetes:
version: "1.27.x"
flannel:
version: latest
rook:
version: 1.12.x
containerd:
version: "latest"
kotsadm:
version: latest
velero:
version: "__testver__"
s3Override: "__testdist__"
postInstallScript: |
source /opt/kurl-testgrid/testhelpers.sh
install_and_customize_kurl_integration_test_application
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
postUpgradeScript: |
source /opt/kurl-testgrid/testhelpers.sh
kubectl kots backup
kubectl kots backup ls
kubectl get pods -n velero
timeout 5m kubectl kots backup
timeout 5m kubectl kots backup ls
# if restic pods are still running, fail the test
if kubectl get pods -n velero | grep restic; then
echo "Restic pods still running after upgrade"
exit 1
fi

0 comments on commit f9a73bb

Please sign in to comment.