From 06e64eb03b1c25b940d407f7380d8f746bee1ee1 Mon Sep 17 00:00:00 2001 From: Mikalai Radchuk Date: Wed, 10 Jul 2024 17:07:21 +0200 Subject: [PATCH] fixup! fixup! Add upgrade E2E Signed-off-by: Mikalai Radchuk --- test/upgrade-e2e/upgrade-e2e_suite_test.go | 20 ++++-- test/upgrade-e2e/upgrade_test.go | 77 ++++++++++++++++++++++ 2 files changed, 91 insertions(+), 6 deletions(-) diff --git a/test/upgrade-e2e/upgrade-e2e_suite_test.go b/test/upgrade-e2e/upgrade-e2e_suite_test.go index f09644196..40293fda5 100644 --- a/test/upgrade-e2e/upgrade-e2e_suite_test.go +++ b/test/upgrade-e2e/upgrade-e2e_suite_test.go @@ -5,8 +5,7 @@ import ( "os" "testing" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/rest" + "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -19,8 +18,8 @@ const ( ) var ( - cfg *rest.Config - c client.Client + c client.Client + kclientset kubernetes.Interface testClusterCatalogName string testClusterExtensionName string @@ -39,11 +38,20 @@ func TestMain(m *testing.M) { os.Exit(1) } - cfg = ctrl.GetConfigOrDie() + cfg := ctrl.GetConfigOrDie() var err error c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - utilruntime.Must(err) + if err != nil { + fmt.Printf("failed to create client: %s\n", err) + os.Exit(1) + } + + kclientset, err = kubernetes.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + fmt.Printf("failed to create kubernetes clientset: %s\n", err) + os.Exit(1) + } os.Exit(m.Run()) } diff --git a/test/upgrade-e2e/upgrade_test.go b/test/upgrade-e2e/upgrade_test.go index 785bc55df..480136f4f 100644 --- a/test/upgrade-e2e/upgrade_test.go +++ b/test/upgrade-e2e/upgrade_test.go @@ -1,25 +1,37 @@ package upgradee2e import ( + "bufio" "context" + "fmt" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" catalogdv1alpha1 "github.com/operator-framework/catalogd/api/core/v1alpha1" ocv1alpha1 "github.com/operator-framework/operator-controller/api/v1alpha1" ) +// TODO: Get rid of hardcoded namespace +const olmNamespace = "olmv1-system" + func TestClusterExtensionAfterOLMUpgrade(t *testing.T) { t.Log("Starting checks after OLM upgrade") ctx := context.Background() + managerLabelSelector := labels.Set{"control-plane": "controller-manager"} + var clusterCatalog catalogdv1alpha1.ClusterCatalog var clusterExtension ocv1alpha1.ClusterExtension @@ -67,4 +79,69 @@ func TestClusterExtensionAfterOLMUpgrade(t *testing.T) { assert.Equal(ct, &ocv1alpha1.BundleMetadata{Name: "prometheus-operator.1.0.1", Version: "1.0.1"}, clusterExtension.Status.InstalledBundle) assert.NotEqual(ct, previousVersion, clusterExtension.Status.InstalledBundle.Version) }, time.Minute, time.Second) + + t.Log("Checking that the controller-manager deployment is updated") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + var managerDeployments appsv1.DeploymentList + assert.NoError(ct, c.List(ctx, &managerDeployments, client.MatchingLabelsSelector{Selector: managerLabelSelector.AsSelector()})) + assert.Len(ct, managerDeployments.Items, 1) + managerDeployment := managerDeployments.Items[0] + + assert.True(ct, + managerDeployment.Status.UpdatedReplicas == *managerDeployment.Spec.Replicas && + managerDeployment.Status.Replicas == *managerDeployment.Spec.Replicas && + managerDeployment.Status.AvailableReplicas == *managerDeployment.Spec.Replicas && + managerDeployment.Status.ReadyReplicas == *managerDeployment.Spec.Replicas, + ) + }, time.Minute, time.Second) + + var managerPod *corev1.Pod + t.Log("Waiting for only one pod controller-manager pod to remain") + require.EventuallyWithT(t, func(ct *assert.CollectT) { + managerPods, err := kclientset.CoreV1().Pods(olmNamespace).List(ctx, metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(managerLabelSelector)), + }) + assert.NoError(ct, err) + assert.Len(ct, managerPods.Items, 1) + + managerPod = &managerPods.Items[0] + }, time.Minute, time.Second) + + logCtx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + substring := fmt.Sprintf(`"ClusterExtension": {"name":"%s"}`, testClusterExtensionName) + found, err := watchPodLogsForSubstring(logCtx, managerPod, substring) + require.NoError(t, err) + require.True(t, found) +} + +func watchPodLogsForSubstring(ctx context.Context, pod *corev1.Pod, substring string) (bool, error) { + podLogOpts := corev1.PodLogOptions{ + Follow: true, + // We are only interested in manager logs + Container: "manager", + } + + req := kclientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts) + podLogs, err := req.Stream(ctx) + if err != nil { + return false, err + } + defer podLogs.Close() + + scanner := bufio.NewScanner(podLogs) + for scanner.Scan() { + line := scanner.Text() + + if strings.Contains(line, substring) { + return true, nil + } + } + + if err := scanner.Err(); err != nil { + return false, err + } + + return false, nil }