From ce51386d50d43a566e3fadf42ccf26c2ce38c585 Mon Sep 17 00:00:00 2001 From: pablochacin Date: Wed, 13 Sep 2023 16:10:50 +0200 Subject: [PATCH] Complete k8s integration tests (#340) * Add kubernetes version test * Add k8s integration test target * Remove superseded kubernetes e2e tests --------- Signed-off-by: Pablo Chacin --- Makefile | 8 +- e2e/kubernetes/kubernetes_e2e_test.go | 231 -------------------------- pkg/kubernetes/integration_test.go | 43 +++++ pull_request_template.md | 6 +- 4 files changed, 49 insertions(+), 239 deletions(-) delete mode 100644 e2e/kubernetes/kubernetes_e2e_test.go diff --git a/Makefile b/Makefile index ca53ca8e..fa36fef4 100644 --- a/Makefile +++ b/Makefile @@ -33,9 +33,6 @@ e2e-disruptors: agent-image e2e-setup e2e-cluster: go test -tags e2e ./e2e/cluster/... -e2e-kubernetes: - go test -tags e2e ./e2e/kubernetes/... - e2e-setup: build-e2e build/e2e-cluster setup @@ -45,7 +42,10 @@ format: integration-agent: agent-image go test -tags integration ./pkg/agent/... -integration: integration-agent +integration-kubernetes: + go test -tags integration ./pkg/kubernetes/... + +integration: integration-agent integration-kubernetes # Running with -buildvcs=false works around the issue of `go list all` failing when git, which runs as root inside # the container, refuses to operate on the disruptor source tree as it is not owned by the same user (root). diff --git a/e2e/kubernetes/kubernetes_e2e_test.go b/e2e/kubernetes/kubernetes_e2e_test.go deleted file mode 100644 index 497ed416..00000000 --- a/e2e/kubernetes/kubernetes_e2e_test.go +++ /dev/null @@ -1,231 +0,0 @@ -//go:build e2e -// +build e2e - -package e2e - -import ( - "context" - "testing" - "time" - - "github.com/grafana/xk6-disruptor/pkg/kubernetes" - "github.com/grafana/xk6-disruptor/pkg/kubernetes/helpers" - kindcluster "github.com/grafana/xk6-disruptor/pkg/testutils/cluster" - "github.com/grafana/xk6-disruptor/pkg/testutils/e2e/cluster" - "github.com/grafana/xk6-disruptor/pkg/testutils/e2e/deploy" - "github.com/grafana/xk6-disruptor/pkg/testutils/e2e/fixtures" - "github.com/grafana/xk6-disruptor/pkg/testutils/e2e/kubernetes/namespace" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func Test_Kubernetes(t *testing.T) { - t.Parallel() - - cluster, err := cluster.BuildE2eCluster( - cluster.DefaultE2eClusterConfig(), - ) - if err != nil { - t.Errorf("failed to create cluster: %v", err) - return - } - t.Cleanup(func() { - _ = cluster.Cleanup() - }) - - k8s, err := kubernetes.NewFromKubeconfig(cluster.Kubeconfig()) - if err != nil { - t.Errorf("error creating kubernetes client: %v", err) - return - } - - // Test Wait Pod Running - t.Run("Wait Pod is Running", func(t *testing.T) { - namespace, err := namespace.CreateTestNamespace(context.TODO(), t, k8s.Client()) - if err != nil { - t.Errorf("failed to create test namespace: %v", err) - return - } - - // Deploy nginx - nginx := fixtures.BuildNginxPod() - _, err = k8s.Client().CoreV1().Pods(namespace).Create( - context.TODO(), - &nginx, - metav1.CreateOptions{}, - ) - if err != nil { - t.Errorf("failed to create pod: %v", err) - return - } - - // wait for the service to be ready for accepting requests - running, err := k8s.PodHelper(namespace).WaitPodRunning(context.TODO(), "nginx", time.Second*20) - if err != nil { - t.Errorf("error waiting for pod %v", err) - return - } - if !running { - t.Errorf("timeout expired waiting for pod ready") - return - } - }) - - // Test Wait Service Ready helper - t.Run("Wait Service Ready", func(t *testing.T) { - namespace, err := namespace.CreateTestNamespace(context.TODO(), t, k8s.Client()) - if err != nil { - t.Errorf("failed to create test namespace: %v", err) - return - } - - // Deploy nginx and expose it as a service. Intentionally not using e2e fixures - // because these functions rely on WaitPodRunnin and WaitServiceReady which we - // are testing here. - nginxPod := fixtures.BuildNginxPod() - _, err = k8s.Client().CoreV1().Pods(namespace).Create( - context.TODO(), - &nginxPod, - metav1.CreateOptions{}, - ) - if err != nil { - t.Errorf("failed to create pod: %v", err) - return - } - - nginxSvc := fixtures.BuildNginxService() - _, err = k8s.Client().CoreV1().Services(namespace).Create( - context.TODO(), - &nginxSvc, - metav1.CreateOptions{}, - ) - if err != nil { - t.Errorf("failed to create nginx service: %v", err) - return - } - - // wait for the service to be ready for accepting requests - err = k8s.ServiceHelper(namespace).WaitServiceReady(context.TODO(), "nginx", time.Second*20) - if err != nil { - t.Errorf("error waiting for service nginx: %v", err) - return - } - }) - - t.Run("Exec Command", func(t *testing.T) { - namespace, err := namespace.CreateTestNamespace(context.TODO(), t, k8s.Client()) - if err != nil { - t.Errorf("failed to create test namespace: %v", err) - return - } - - err = deploy.RunPod(k8s, namespace, fixtures.BuildBusyBoxPod(), 10*time.Second) - if err != nil { - t.Errorf("error creating pod: %v", err) - return - } - - stdout, _, err := k8s.PodHelper(namespace).Exec( - context.TODO(), - "busybox", - "busybox", - []string{"echo", "-n", "hello", "world"}, - nil, - ) - if err != nil { - t.Errorf("error executing command in pod: %v", err) - return - } - - greetings := "hello world" - if string(stdout) != "hello world" { - t.Errorf("stdout does not match expected result:\nexpected: %s\nactual%s\n", greetings, string(stdout)) - return - } - }) - - t.Run("Attach Ephemeral Container", func(t *testing.T) { - namespace, err := namespace.CreateTestNamespace(context.TODO(), t, k8s.Client()) - if err != nil { - t.Errorf("failed to create test namespace: %v", err) - return - } - - err = deploy.RunPod(k8s, namespace, fixtures.BuildPausedPod(), 10*time.Second) - if err != nil { - t.Errorf("error running pod %v: ", err) - return - } - - ephemeral := corev1.EphemeralContainer{ - EphemeralContainerCommon: corev1.EphemeralContainerCommon{ - Name: "ephemeral", - Image: "busybox", - Command: []string{"sleep", "300"}, - TTY: true, - Stdin: true, - }, - } - - err = k8s.PodHelper(namespace).AttachEphemeralContainer( - context.TODO(), - "paused", - ephemeral, - helpers.AttachOptions{ - Timeout: 15 * time.Second, - }, - ) - - if err != nil { - t.Errorf("error attaching ephemeral container to pod: %v", err) - return - } - - stdout, _, err := k8s.PodHelper(namespace).Exec( - context.TODO(), - "paused", - "ephemeral", - []string{"echo", "-n", "hello", "world"}, - nil, - ) - if err != nil { - t.Errorf("error executing command in pod: %v", err) - return - } - - greetings := "hello world" - if string(stdout) != "hello world" { - t.Errorf("stdout does not match expected result:\nexpected: %s\nactual%s\n", greetings, string(stdout)) - return - } - }) -} - -func Test_UnsupportedKubernetesVersion(t *testing.T) { - // TODO: use e2e cluster. This will require an option for setting the K8s version in the e2e cluster - config, err := kindcluster.NewConfig( - "e2e-v1-22-0-cluster", - kindcluster.Options{ - Version: "v1.22.0", - Wait: time.Second * 60, - }, - ) - if err != nil { - t.Errorf("failed creating cluster configuration: %v", err) - return - } - - cluster, err := config.Create() - if err != nil { - t.Errorf("failed to create cluster: %v", err) - return - } - defer cluster.Delete() - - _, err = kubernetes.NewFromKubeconfig(cluster.Kubeconfig()) - if err == nil { - t.Errorf("should had failed creating kubernetes client") - return - } -} diff --git a/pkg/kubernetes/integration_test.go b/pkg/kubernetes/integration_test.go index 3416d273..d79e09e3 100644 --- a/pkg/kubernetes/integration_test.go +++ b/pkg/kubernetes/integration_test.go @@ -306,3 +306,46 @@ func Test_Kubernetes(t *testing.T) { } }) } + +func Test_UnsupportedKubernetesVersion(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + container, err := k3s.RunContainer(ctx, testcontainers.WithImage("docker.io/rancher/k3s:v1.22.17-k3s1")) + if err != nil { + t.Fatal(err) + } + + // wait for the api server to complete initialization. + // see this issue for more details: + // https://github.com/testcontainers/testcontainers-go/issues/1547 + timeout := time.Second * 30 + err = waitForRegex(ctx, container, ".*Node controller sync successful.*", timeout) + if err != nil { + t.Fatalf("failed waiting for cluster ready: %s", err) + } + + // Clean up the container after the test is complete + t.Cleanup(func() { + if err = container.Terminate(ctx); err != nil { + t.Fatalf("failed to terminate container: %s", err) + } + }) + + kubeConfigYaml, err := container.GetKubeConfig(ctx) + if err != nil { + t.Fatalf("failed to get kube-config : %s", err) + } + + restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml) + if err != nil { + t.Fatalf("failed to create rest client for kubernetes : %s", err) + } + + _, err = newFromConfig(restcfg) + if err == nil { + t.Errorf("should had failed creating kubernetes client") + return + } +} diff --git a/pull_request_template.md b/pull_request_template.md index 1f99eb65..a22c6820 100644 --- a/pull_request_template.md +++ b/pull_request_template.md @@ -15,8 +15,6 @@ Fixes # (issue) - [ ] I have added tests that prove my fix is effective or that my feature works. - [ ] I have run linter locally (`make lint`) and all checks pass. - [ ] I have run tests locally (`make test`) and all tests pass. -- [ ] I have run relevant integration test locally (`make integration-xxx` for `agent` related changes) -- [ ] I have run relevant e2e test locally (`make e2e-xxx` for `disruptors`, `kubernetes` or `cluster` related changes) +- [ ] I have run relevant integration test locally (`make integration-xxx` for affected packages) +- [ ] I have run relevant e2e test locally (`make e2e-xxx` for `disruptors`, or `cluster` related changes) - [ ] Any dependent changes have been merged and published in downstream modules
- -