diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 0179051f..c03638c3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -85,6 +85,11 @@ jobs: exit 1 fi + if ! echo $output | grep -q 'statusInformers: null'; then + printf "default statusInformers should be null:\n\n%s\n\n" "$output" + exit 1 + fi + output=$(helm template oci://ttl.sh/automated-${{ github.run_id }}/replicated --version 0.0.0 --set integration.enabled=true) if ! echo $output | grep -q integration-enabled; then @@ -121,7 +126,7 @@ jobs: exit 1 fi - cat << EOF >> test-values.yaml + cat << EOF > test-values.yaml extraEnv: - name: TEST_EXTRA_ENV value: test-extra-env @@ -134,6 +139,17 @@ jobs: exit 1 fi + cat << EOF > test-values.yaml + statusInformers: [] + EOF + + output=$(helm template oci://ttl.sh/automated-${{ github.run_id }}/replicated --version 0.0.0 --values test-values.yaml) + + if ! echo $output | grep -q 'statusInformers: \[\]'; then + printf "user-set empty statusInformers should exist:\n\n%s\n\n" "$output" + exit 1 + fi + create-test-release: runs-on: ubuntu-22.04 needs: [ build-and-push-e2e ] @@ -296,7 +312,7 @@ jobs: helm upgrade test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false --set replicated.versionLabel=1.0.0 --wait --timeout 2m COUNTER=1 - while [ kubectl get pods -l app.kubernetes.io/name=replicated -o jsonpath='{.items[0].metadata.name}' | grep -q $oldpodname ]; do + while kubectl get pods -l app.kubernetes.io/name=replicated -o jsonpath='{.items[0].metadata.name}' | grep -q $oldpodname; do ((COUNTER += 1)) if [ $COUNTER -gt 60 ]; then echo "Pod did not restart after upgrade" @@ -340,7 +356,7 @@ jobs: kubectl rollout status deployment replicated --timeout=2m COUNTER=1 - while [ kubectl get pods -l app.kubernetes.io/name=replicated -o jsonpath='{.items[0].metadata.name}' | grep -q $oldpodname ]; do + while kubectl get pods -l app.kubernetes.io/name=replicated -o jsonpath='{.items[0].metadata.name}' | grep -q $oldpodname; do ((COUNTER += 1)) if [ $COUNTER -gt 60 ]; then echo "Pod did not restart after upgrade" @@ -364,6 +380,87 @@ jobs: kubectl wait --for=delete deployment/test-chart --timeout=2m kubectl wait --for=delete deployment/replicated --timeout=2m + # validate status informers + - name: Create empty status informers for validation + run: | + cat << EOF > test-values.yaml + replicated: + statusInformers: [] + EOF + + - name: Install via Helm as subchart in production mode and pass empty status informers + run: | + helm install test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false -f test-values.yaml --wait --timeout 2m + + COUNTER=1 + while ! kubectl logs deploy/replicated | grep -q 'Generated informers from Helm release'; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "Did not receive empty status informers" + kubectl logs deploy/replicated + exit 1 + fi + sleep 1 + done + + - name: Upgrade via Helm as subchart in production mode to use default status informers + run: | + helm upgrade test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false --wait --timeout 2m + + COUNTER=1 + while ! kubectl logs deploy/replicated | grep -qv 'Generated informers from Helm release'; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "Did not receive default status informers" + kubectl logs deploy/replicated + exit 1 + fi + sleep 1 + done + + - name: Uninstall test-chart via Helm + run: helm uninstall test-chart --wait --timeout 2m + + - name: Install via kubectl as subchart in production mode and pass empty status informers + run: | + helm template test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false -f test-values.yaml | kubectl apply -f - + kubectl rollout status deployment test-chart --timeout=2m + kubectl rollout status deployment replicated --timeout=2m + + COUNTER=1 + while ! kubectl logs deploy/replicated | grep -q 'Generated informers from Helm release'; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "Did not receive empty status informers" + kubectl logs deploy/replicated + exit 1 + fi + sleep 1 + done + + - name: Upgrade via kubectl as subchart in production mode to use default status informers + run: | + helm template test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false | kubectl apply -f - + kubectl rollout status deployment test-chart --timeout=2m + kubectl rollout status deployment replicated --timeout=2m + + COUNTER=1 + while ! kubectl logs deploy/replicated | grep -qv 'Generated informers from Helm release'; do + ((COUNTER += 1)) + if [ $COUNTER -gt 60 ]; then + echo "Did not receive default status informers" + kubectl logs deploy/replicated + exit 1 + fi + sleep 1 + done + + - name: Uninstall test-chart via kubectl + run: | + helm template test-chart oci://registry.replicated.com/$APP_SLUG/$CHANNEL_SLUG/test-chart --set replicated.integration.enabled=false -f test-values.yaml | kubectl delete -f - + kubectl wait --for=delete deployment/test-chart --timeout=2m + kubectl wait --for=delete deployment/replicated --timeout=2m + - name: Remove Cluster uses: replicatedhq/replicated-actions/remove-cluster@v1.1.1 if: ${{ success() || cancelled() }} diff --git a/pkg/apiserver/bootstrap.go b/pkg/apiserver/bootstrap.go index 478fc2eb..039441ad 100644 --- a/pkg/apiserver/bootstrap.go +++ b/pkg/apiserver/bootstrap.go @@ -139,6 +139,7 @@ func bootstrap(params APIServerParams) error { } if helmRelease != nil { informers = appstate.GenerateStatusInformersForManifest(helmRelease.Manifest) + logger.Infof("Generated informers from Helm release: %v", informers) } }