From 252ee03cb6921eb3158d30336e6e0c4b3577a3d0 Mon Sep 17 00:00:00 2001 From: Ekaterina Kazakova Date: Tue, 12 Nov 2024 00:21:21 +0400 Subject: [PATCH] [e2e] Support to provide e2e testing configuration Closes #641 --- .github/workflows/build_test.yml | 20 +- Makefile | 6 +- docs/dev.md | 10 +- test/e2e/config/config.go | 135 +++++++++ test/e2e/e2e_suite_test.go | 97 +----- test/e2e/kubeclient/kubeclient.go | 14 +- test/e2e/logs/logs.go | 140 +++++++++ test/e2e/managedcluster/constants.go | 10 +- test/e2e/managedcluster/managedcluster.go | 62 ++-- test/e2e/managedcluster/providervalidator.go | 19 +- .../resources/aws-hosted-cp.yaml.tpl | 2 +- .../resources/aws-standalone-cp.yaml.tpl | 2 +- .../resources/azure-hosted-cp.yaml.tpl | 2 +- .../resources/azure-standalone-cp.yaml.tpl | 2 +- .../resources/vsphere-hosted-cp.yaml.tpl | 2 +- .../resources/vsphere-standalone-cp.yaml.tpl | 2 +- test/e2e/provider_aws_test.go | 276 ++++++++++-------- test/e2e/provider_azure_test.go | 259 +++++++++------- test/e2e/provider_vsphere_test.go | 98 ++++--- test/e2e/templates/templates.go | 26 ++ 20 files changed, 757 insertions(+), 427 deletions(-) create mode 100644 test/e2e/config/config.go create mode 100644 test/e2e/logs/logs.go create mode 100644 test/e2e/templates/templates.go diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index fe2fa81cd..6e158b607 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -29,7 +29,7 @@ jobs: runs-on: ubuntu-latest outputs: version: ${{ steps.vars.outputs.version }} - clustername: ${{ steps.vars.outputs.clustername }} + clusterprefix: ${{ steps.vars.outputs.clusterprefix }} pr: ${{ steps.pr.outputs.result }} steps: - name: Get PR ref @@ -72,7 +72,7 @@ jobs: run: | GIT_VERSION=$(git describe --tags --always) echo "version=${GIT_VERSION:1}" >> $GITHUB_OUTPUT - echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT + echo "clusterprefix=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT - name: Build and push HMC controller image uses: docker/build-push-action@v6 with: @@ -98,7 +98,7 @@ jobs: group: controller-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} + clusterprefix: ${{ needs.build.outputs.clusterprefix }} version: ${{ needs.build.outputs.version }} pr: ${{ needs.build.outputs.pr }} steps: @@ -112,7 +112,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'controller' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + MANAGED_CLUSTER_PREFIX: ${{ needs.build.outputs.clusterprefix }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | @@ -134,7 +134,7 @@ jobs: group: cloud-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} + clusterprefix: ${{ needs.build.outputs.clusterprefix }} version: ${{ needs.build.outputs.version }} pr: ${{ needs.build.outputs.pr }} env: @@ -162,7 +162,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:cloud' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + MANAGED_CLUSTER_PREFIX: ${{ needs.build.outputs.clusterprefix }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | @@ -184,7 +184,7 @@ jobs: group: onprem-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} + clusterprefix: ${{ needs.build.outputs.clusterprefix }} version: ${{ needs.build.outputs.version }} pr: ${{ needs.build.outputs.pr }} env: @@ -215,7 +215,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:onprem' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + MANAGED_CLUSTER_PREFIX: ${{ needs.build.outputs.clusterprefix }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | @@ -237,7 +237,7 @@ jobs: if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.build.result, 'success') }} timeout-minutes: 15 outputs: - clustername: ${{ needs.build.outputs.clustername }} + clusterprefix: ${{ needs.build.outputs.clusterprefix }} version: ${{ needs.build.outputs.version }} pr: ${{ needs.build.outputs.pr }} steps: @@ -260,7 +260,7 @@ jobs: AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} - CLUSTER_NAME: '${{ needs.build.outputs.clustername }}' + CLUSTER_NAME: '${{ needs.build.outputs.clusterprefix }}' run: | make dev-aws-nuke make dev-azure-nuke diff --git a/Makefile b/Makefile index 561d85d38..b09e75e60 100644 --- a/Makefile +++ b/Makefile @@ -108,6 +108,9 @@ tidy: test: generate-all fmt vet envtest tidy external-crd ## Run tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out +# E2E_CONFIG_B64 contains the configuration for e2e testing. +E2E_CONFIG_B64 ?= "" + # Utilize Kind or modify the e2e tests to load the image locally, enabling # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. @@ -115,7 +118,8 @@ test-e2e: cli-install @if [ "$$GINKGO_LABEL_FILTER" ]; then \ ginkgo_label_flag="-ginkgo.label-filter=$$GINKGO_LABEL_FILTER"; \ fi; \ - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -ginkgo.timeout=3h -timeout=3h $$ginkgo_label_flag + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) E2E_CONFIG_B64=$(E2E_CONFIG_B64) \ + go test ./test/e2e/ -v -ginkgo.v -ginkgo.timeout=3h -timeout=3h $$ginkgo_label_flag .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint diff --git a/docs/dev.md b/docs/dev.md index 1484eebfc..e019a39cf 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -140,13 +140,13 @@ IMG="ghcr.io/mirantis/hmc/controller-ci:v0.0.1-179-ga5bdf29" \ Optionally, the `NO_CLEANUP=1` env var can be used to disable `After` nodes from running within some specs, this will allow users to debug tests by re-running them without the need to wait a while for an infrastructure deployment to occur. -For subsequent runs the `MANAGED_CLUSTER_NAME=` env var should be -passed to tell the test what cluster name to use so that it does not try to -generate a new name and deploy a new cluster. +For subsequent runs the `MANAGED_CLUSTER_PREFIX=` env var should be +passed to tell the test what cluster prefix to use so that it does not try to +generate a new cluster name and deploy a new cluster. -Tests that run locally use autogenerated names like `12345678-e2e-test` while +Tests that run locally use autogenerated names prefixes like `e2e-test-12345678` while tests that run in CI use names such as `ci-1234567890-e2e-test`. You can always -pass `MANAGED_CLUSTER_NAME=` from the get-go to customize the name used by the +pass `MANAGED_CLUSTER_PREFIX=` from the get-go to customize the prefix used by the test. ### Filtering test runs diff --git a/test/e2e/config/config.go b/test/e2e/config/config.go new file mode 100644 index 000000000..ff8c2d18c --- /dev/null +++ b/test/e2e/config/config.go @@ -0,0 +1,135 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "encoding/base64" + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" +) + +type TestingProvider string + +const ( + envVarE2EConfig = "E2E_CONFIG_B64" + + TestingProviderAWS TestingProvider = "aws" + TestingProviderAzure TestingProvider = "azure" + TestingProviderVsphere TestingProvider = "vsphere" +) + +var ( + Config TestingConfig + + defaultConfig = map[TestingProvider][]ProviderTestingConfig{ + TestingProviderAWS: {}, + TestingProviderAzure: {}, + TestingProviderVsphere: {}, + } + + defaultStandaloneTemplates = map[TestingProvider]string{ + TestingProviderAWS: "aws-standalone-cp-0-0-3", + TestingProviderAzure: "azure-standalone-cp-0-0-3", + TestingProviderVsphere: "vsphere-standalone-cp-0-0-3", + } + + defaultHostedTemplates = map[TestingProvider]string{ + TestingProviderAWS: "aws-hosted-cp-0-0-3", + TestingProviderAzure: "azure-hosted-cp-0-0-3", + TestingProviderVsphere: "vsphere-hosted-cp-0-0-3", + } +) + +type TestingConfig = map[TestingProvider][]ProviderTestingConfig + +type ProviderTestingConfig struct { + // Standalone contains the testing configuration for the standalone cluster deployment. + Standalone *ClusterTestingConfig `yaml:"standalone,omitempty"` + // Standalone contains the testing configuration for the hosted cluster deployment. + Hosted *ClusterTestingConfig `yaml:"hosted,omitempty"` +} + +type ClusterTestingConfig struct { + // Upgrade is a boolean parameter that specifies whether the managed cluster upgrade should be tested. + Upgrade bool `yaml:"upgrade,omitempty"` + // Template is the name of the template to use when deploying a managed cluster. + // If unset: + // * The latest available template will be chosen + // * If upgrade is triggered, the latest available template with available upgrades will be chosen. + Template string `yaml:"template,omitempty"` + // UpgradeTemplate specifies the name of the template to upgrade to. Ignored if upgrade is set to false. + // If unset, the latest template available for the upgrade will be chosen. + UpgradeTemplate string `yaml:"upgradeTemplate,omitempty"` +} + +func Parse() error { + decodedConfig, err := base64.StdEncoding.DecodeString(os.Getenv(envVarE2EConfig)) + if err != nil { + return err + } + + err = yaml.Unmarshal(decodedConfig, &Config) + if err != nil { + return err + } + + setDefaults() + _, _ = fmt.Fprintf(GinkgoWriter, "E2e testing configuration:\n%s\n", Show()) + return nil +} + +func setDefaults() { + if len(Config) == 0 { + Config = defaultConfig + } + for provider, configs := range Config { + if len(configs) == 0 { + Config[provider] = []ProviderTestingConfig{ + { + Standalone: &ClusterTestingConfig{}, + Hosted: &ClusterTestingConfig{}, + }, + } + } + for i := range Config[provider] { + config := Config[provider][i] + if config.Standalone != nil && config.Standalone.Template == "" { + config.Standalone.Template = defaultStandaloneTemplates[provider] + } + if config.Hosted != nil && config.Hosted.Template == "" { + config.Hosted.Template = defaultHostedTemplates[provider] + } + Config[provider][i] = config + } + } +} + +func Show() string { + prettyConfig, err := yaml.Marshal(Config) + Expect(err).NotTo(HaveOccurred()) + + return string(prettyConfig) +} + +func (c *ProviderTestingConfig) String() string { + prettyConfig, err := yaml.Marshal(c) + Expect(err).NotTo(HaveOccurred()) + + return string(prettyConfig) +} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index e76a4c245..b3fbc2b57 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -15,26 +15,24 @@ package e2e import ( - "bufio" "context" "fmt" - "net/url" "os" "os/exec" - "path/filepath" "strings" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/logs" "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) @@ -46,12 +44,15 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) + err := config.Parse() + Expect(err).NotTo(HaveOccurred()) + GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) By("building and deploying the controller-manager") cmd := exec.Command("make", "kind-deploy") - _, err := utils.Run(cmd) + _, err = utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) + cmd = exec.Command("make", "test-apply") _, err = utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) @@ -72,7 +73,7 @@ var _ = AfterSuite(func() { if !noCleanup() { By("collecting logs from local controllers") kc := kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) - collectLogArtifacts(kc, "") + logs.Collector{Client: kc}.CollectProvidersLogs() By("removing the controller-manager") cmd := exec.Command("make", "dev-destroy") @@ -144,89 +145,11 @@ func validateController(kc *kubeclient.KubeClient, labelSelector, name string) e // templateBy wraps a Ginkgo By with a block describing the template being // tested. -func templateBy(t managedcluster.Template, description string) { +func templateBy(t templates.Type, description string) { GinkgoHelper() By(fmt.Sprintf("[%s] %s", t, description)) } -// collectLogArtifacts collects log output from each the HMC controller, -// CAPI controller and the provider controller(s) as well as output from clusterctl -// and stores them in the test/e2e directory as artifacts. clusterName can be -// optionally provided, passing an empty string will prevent clusterctl output -// from being fetched. If collectLogArtifacts fails it produces a warning -// message to the GinkgoWriter, but does not fail the test. -func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { - GinkgoHelper() - - filterLabels := []string{utils.HMCControllerLabel} - - var host string - hostURL, err := url.Parse(kc.Config.Host) - if err != nil { - utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) - } else { - host = strings.ReplaceAll(hostURL.Host, ":", "_") - } - - if providerTypes == nil { - filterLabels = managedcluster.FilterAllProviders() - } else { - for _, providerType := range providerTypes { - filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) - } - } - - for _, label := range filterLabels { - pods, _ := kc.Client.CoreV1().Pods(kc.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: label, - }) - - for _, pod := range pods.Items { - req := kc.Client.CoreV1().Pods(kc.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - TailLines: ptr.To(int64(1000)), - }) - podLogs, err := req.Stream(context.Background()) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) - continue - } - - output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) - if err != nil { - utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) - continue - } - - r := bufio.NewReader(podLogs) - _, err = r.WriteTo(output) - if err != nil { - utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) - } - - if err = podLogs.Close(); err != nil { - utils.WarnError(fmt.Errorf("failed to close log stream for pod %s: %w", pod.Name, err)) - } - if err = output.Close(); err != nil { - utils.WarnError(fmt.Errorf("failed to close log file for pod %s: %w", pod.Name, err)) - } - } - } - - if clusterName != "" { - cmd := exec.Command("./bin/clusterctl", - "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") - output, err := utils.Run(cmd) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) - return - } - err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) - if err != nil { - utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) - } - } -} - func noCleanup() bool { noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) if noCleanup != "" { diff --git a/test/e2e/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go index e3801e4e0..003c3017a 100644 --- a/test/e2e/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "path/filepath" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -32,6 +33,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + crclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/Mirantis/hmc/internal/utils/status" ) @@ -200,11 +202,15 @@ func (kc *KubeClient) CreateManagedCluster( } return func() error { - err := client.Delete(ctx, managedcluster.GetName(), metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - return nil + name := managedcluster.GetName() + if err := client.Delete(ctx, name, metav1.DeleteOptions{}); crclient.IgnoreNotFound(err) != nil { + return err } - return err + Eventually(func() bool { + _, err := client.Get(ctx, name, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }, 30*time.Minute, 1*time.Minute).Should(BeTrue()) + return nil } } diff --git a/test/e2e/logs/logs.go b/test/e2e/logs/logs.go new file mode 100644 index 000000000..f7e4355d5 --- /dev/null +++ b/test/e2e/logs/logs.go @@ -0,0 +1,140 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "bufio" + "context" + "errors" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/utils" +) + +type Collector struct { + Client *kubeclient.KubeClient + ProviderTypes []managedcluster.ProviderType + ClusterNames []string +} + +func (c Collector) CollectAll() { + if c.Client == nil { + utils.WarnError(errors.New("failed to collect logs: client is nil")) + return + } + c.CollectProvidersLogs() + c.CollectClustersInfo() +} + +// CollectProvidersLogs collects log output from each the HMC controller, +// CAPI controller and the provider controller(s) and stores them in the +// test/e2e directory as artifacts. If CollectLogs fails it produces a warning +// message to the GinkgoWriter, but does not fail the test. +func (c Collector) CollectProvidersLogs() { + GinkgoHelper() + + filterLabels := []string{utils.HMCControllerLabel} + + var host string + hostURL, err := url.Parse(c.Client.Config.Host) + if err != nil { + utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) + } else { + host = strings.ReplaceAll(hostURL.Host, ":", "_") + } + + if c.ProviderTypes == nil { + filterLabels = managedcluster.FilterAllProviders() + } else { + for _, providerType := range c.ProviderTypes { + filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) + } + } + + client := c.Client + for _, label := range filterLabels { + pods, _ := client.Client.CoreV1().Pods(client.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: label, + }) + + for _, pod := range pods.Items { + req := client.Client.CoreV1().Pods(client.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + TailLines: ptr.To(int64(1000)), + }) + podLogs, err := req.Stream(context.Background()) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) + continue + } + + output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) + if err != nil { + utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) + continue + } + + r := bufio.NewReader(podLogs) + _, err = r.WriteTo(output) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) + } + + if err = podLogs.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log stream for pod %s: %w", pod.Name, err)) + } + if err = output.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log file for pod %s: %w", pod.Name, err)) + } + } + } +} + +func (c Collector) CollectClustersInfo() { + for _, clusterName := range c.ClusterNames { + cmd := exec.Command("./bin/clusterctl", + "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") + output, err := utils.Run(cmd) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) + return + } + err = os.WriteFile(filepath.Join("test/e2e", c.getKubeconfigHost()+"-"+clusterName+"-"+"clusterctl.log"), output, 0o644) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) + } + } +} + +func (c Collector) getKubeconfigHost() string { + hostURL, err := url.Parse(c.Client.Config.Host) + if err == nil { + return strings.ReplaceAll(hostURL.Host, ":", "_") + } + utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) + return "" +} diff --git a/test/e2e/managedcluster/constants.go b/test/e2e/managedcluster/constants.go index 4f18a7832..6182dc0dc 100644 --- a/test/e2e/managedcluster/constants.go +++ b/test/e2e/managedcluster/constants.go @@ -16,10 +16,12 @@ package managedcluster const ( // Common - EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" - EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" - EnvVarWorkerNumber = "WORKER_NUMBER" - EnvVarNamespace = "NAMESPACE" + EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" + EnvVarManagedClusterPrefix = "MANAGED_CLUSTER_PREFIX" + EnvVarManagedClusterTemplate = "MANAGED_CLUSTER_TEMPLATE" + EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" + EnvVarWorkerNumber = "WORKER_NUMBER" + EnvVarNamespace = "NAMESPACE" // EnvVarNoCleanup disables After* cleanup in provider specs to allow for // debugging of test failures. EnvVarNoCleanup = "NO_CLEANUP" diff --git a/test/e2e/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go index 72efa96f8..35e1cde27 100644 --- a/test/e2e/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -18,7 +18,6 @@ import ( _ "embed" "fmt" "os" - "strings" "github.com/a8m/envsubst" "github.com/google/uuid" @@ -27,6 +26,7 @@ import ( "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) @@ -41,17 +41,6 @@ const ( providerLabel = "cluster.x-k8s.io/provider" ) -type Template string - -const ( - TemplateAWSStandaloneCP Template = "aws-standalone-cp" - TemplateAWSHostedCP Template = "aws-hosted-cp" - TemplateAzureHostedCP Template = "azure-hosted-cp" - TemplateAzureStandaloneCP Template = "azure-standalone-cp" - TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" - TemplateVSphereHostedCP Template = "vsphere-hosted-cp" -) - //go:embed resources/aws-standalone-cp.yaml.tpl var awsStandaloneCPManagedClusterTemplateBytes []byte @@ -84,38 +73,39 @@ func GetProviderLabel(provider ProviderType) string { return fmt.Sprintf("%s=%s", providerLabel, provider) } -func setClusterName(templateName Template) { - var generatedName string - - mcName := os.Getenv(EnvVarManagedClusterName) - if mcName == "" { - mcName = "e2e-test-" + uuid.New().String()[:8] +func GenerateClusterName(postfix string) string { + mcPrefix := os.Getenv(EnvVarManagedClusterPrefix) + if mcPrefix == "" { + mcPrefix = "e2e-test-" + uuid.New().String()[:8] } - providerName := strings.Split(string(templateName), "-")[0] - - // Append the provider name to the cluster name to ensure uniqueness between - // different deployed ManagedClusters. - generatedName = fmt.Sprintf("%s-%s", mcName, providerName) - if strings.Contains(string(templateName), "hosted") { - generatedName = fmt.Sprintf("%s-%s", mcName, "hosted") + if postfix != "" { + return fmt.Sprintf("%s-%s", mcPrefix, postfix) } + return mcPrefix +} + +func setClusterName(name string) { + GinkgoT().Setenv(EnvVarManagedClusterName, name) +} - GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) +func setTemplate(templateName string) { + GinkgoT().Setenv(EnvVarManagedClusterTemplate, templateName) } // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. -func GetUnstructured(templateName Template) *unstructured.Unstructured { +func GetUnstructured(templateType templates.Type, clusterName, template string) *unstructured.Unstructured { GinkgoHelper() - setClusterName(templateName) + setClusterName(clusterName) + setTemplate(template) var managedClusterTemplateBytes []byte - switch templateName { - case TemplateAWSStandaloneCP: + switch templateType { + case templates.TemplateAWSStandaloneCP: managedClusterTemplateBytes = awsStandaloneCPManagedClusterTemplateBytes - case TemplateAWSHostedCP: + case templates.TemplateAWSHostedCP: // Validate environment vars that do not have defaults are populated. // We perform this validation here instead of within a Before block // since we populate the vars from standalone prior to this step. @@ -126,16 +116,16 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { EnvVarAWSSecurityGroupID, }) managedClusterTemplateBytes = awsHostedCPManagedClusterTemplateBytes - case TemplateVSphereStandaloneCP: + case templates.TemplateVSphereStandaloneCP: managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes - case TemplateVSphereHostedCP: + case templates.TemplateVSphereHostedCP: managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes - case TemplateAzureHostedCP: + case templates.TemplateAzureHostedCP: managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes - case TemplateAzureStandaloneCP: + case templates.TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("Unsupported template: %s", templateName)) + Fail(fmt.Sprintf("Unsupported template type: %s", templateType)) } managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) diff --git a/test/e2e/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go index 4df0fa84b..add6676ab 100644 --- a/test/e2e/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -21,14 +21,15 @@ import ( . "github.com/onsi/ginkgo/v2" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/templates" ) // ProviderValidator is a struct that contains the necessary information to // validate a provider's resources. Some providers do not support all of the // resources that can potentially be validated. type ProviderValidator struct { - // Template is the name of the template being validated. - template Template + // Template is the type of the template being validated. + templateType templates.Type // ClusterName is the name of the cluster to validate. clusterName string // ResourcesToValidate is a map of resource names to their validation @@ -46,7 +47,7 @@ const ( ValidationActionDelete ValidationAction = "delete" ) -func NewProviderValidator(template Template, clusterName string, action ValidationAction) *ProviderValidator { +func NewProviderValidator(templateType templates.Type, clusterName string, action ValidationAction) *ProviderValidator { var ( resourcesToValidate map[string]resourceValidationFunc resourceOrder []string @@ -61,11 +62,11 @@ func NewProviderValidator(template Template, clusterName string, action Validati } resourceOrder = []string{"clusters", "machines", "control-planes", "csi-driver"} - switch template { - case TemplateAWSStandaloneCP, TemplateAWSHostedCP: + switch templateType { + case templates.TemplateAWSStandaloneCP, templates.TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") - case TemplateAzureStandaloneCP, TemplateVSphereStandaloneCP: + case templates.TemplateAzureStandaloneCP, templates.TemplateVSphereStandaloneCP: delete(resourcesToValidate, "csi-driver") } } else { @@ -78,7 +79,7 @@ func NewProviderValidator(template Template, clusterName string, action Validati } return &ProviderValidator{ - template: template, + templateType: templateType, clusterName: clusterName, resourcesToValidate: resourcesToValidate, resourceOrder: resourceOrder, @@ -104,11 +105,11 @@ func (p *ProviderValidator) Validate(ctx context.Context, kc *kubeclient.KubeCli } if err := validator(ctx, kc, p.clusterName); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation error: %v\n", p.template, name, err) + _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation error: %v\n", p.templateType, name, err) return err } - _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation succeeded\n", p.template, name) + _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation succeeded\n", p.templateType, name) delete(p.resourcesToValidate, name) } diff --git a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 8a2700c63..b8108cce9 100644 --- a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: diff --git a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index 24c449bc0..ed09f8080 100644 --- a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-standalone-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: diff --git a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl index 76da17cbb..5474ea82b 100644 --- a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AZURE_CLUSTER_IDENTITY}-cred config: location: "${AZURE_REGION}" diff --git a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl index 3894b7b39..a68891ce0 100644 --- a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-standalone-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AZURE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: 1 diff --git a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index c0475f3f4..42b5efc7c 100644 --- a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index cc5fa87b3..95a77c9e1 100644 --- a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-standalone-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go index 6614698b4..2925f59d4 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/provider_aws_test.go @@ -25,24 +25,37 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/logs" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/aws" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Ordered, func() { var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - clusterName string + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + hostedDeleteFuncs []func() error + standaloneDeleteFuncs []func() error + kubeconfigDeleteFuncs []func() error + standaloneClusterNames []string + hostedClusterNames []string + + providerConfigs []config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + providerConfigs = config.Config[config.TestingProviderAWS] + + if len(providerConfigs) == 0 { + Skip("AWS ManagedCluster testing is skipped") + } + By("providing cluster identity") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) ci := clusteridentity.New(kc, managedcluster.ProviderAWS) @@ -53,19 +66,28 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order // If we failed collect logs from each of the affiliated controllers // as well as the output of clusterctl to store as artifacts. if CurrentSpecReport().Failed() && !noCleanup() { + if kc != nil { + By("collecting failure logs from the management controllers") + logs.Collector{ + Client: kc, + ProviderTypes: []managedcluster.ProviderType{managedcluster.ProviderAWS, managedcluster.ProviderCAPI}, + ClusterNames: standaloneClusterNames, + }.CollectAll() + } if standaloneClient != nil { By("collecting failure logs from hosted controllers") - collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) + logs.Collector{ + Client: standaloneClient, + ProviderTypes: []managedcluster.ProviderType{managedcluster.ProviderAWS, managedcluster.ProviderCAPI}, + ClusterNames: hostedClusterNames, + }.CollectAll() } } - By("deleting resources") - for _, deleteFunc := range []func() error{ - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { + if !noCleanup() { + By("deleting resources") + deleteFuncs := append(hostedDeleteFuncs, append(standaloneDeleteFuncs, kubeconfigDeleteFuncs...)...) + for _, deleteFunc := range deleteFuncs { err := deleteFunc() Expect(err).NotTo(HaveOccurred()) } @@ -73,117 +95,131 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order }) It("should work with an AWS provider", func() { - // Deploy a standalone cluster and verify it is running/ready. - // Deploy standalone with an xlarge instance since it will also be - // hosting the hosted cluster. - GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") - - templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) - clusterName = sd.GetName() - - standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) - - templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") - - // Download the KUBECONFIG for the standalone cluster and load it - // so we can call Make targets against this cluster. - // TODO(#472): Ideally we shouldn't use Make here and should just - // convert these Make targets into Go code, but this will require a - // helmclient. - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) - - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("make", "test-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - - templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") - standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, clusterName) - Eventually(func() error { - err := verifyControllersUp(standaloneClient) - if err != nil { - _, _ = fmt.Fprintf( - GinkgoWriter, "[%s] controller validation failed: %v\n", - string(managedcluster.TemplateAWSHostedCP), err) - return err + for i, providerConfig := range providerConfigs { + _, _ = fmt.Fprintf(GinkgoWriter, "Testing configuration:\n%s\n", providerConfig.String()) + // Deploy a standalone cluster and verify it is running/ready. + // Deploy standalone with an xlarge instance since it will also be + // hosting the hosted cluster. + GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") + + sdName := managedcluster.GenerateClusterName(fmt.Sprintf("aws-%d", i)) + sdTemplate := providerConfig.Standalone.Template + templateBy(templates.TemplateAWSStandaloneCP, fmt.Sprintf("creating a ManagedCluster %s with template %s", sdName, sdTemplate)) + + sd := managedcluster.GetUnstructured(templates.TemplateAWSStandaloneCP, sdName, sdTemplate) + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFuncs = append(standaloneDeleteFuncs, standaloneDeleteFunc) + standaloneClusterNames = append(standaloneClusterNames, sd.GetName()) + + templateBy(templates.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + templates.TemplateAWSStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + if providerConfig.Hosted == nil { + continue } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Ensure AWS credentials are set in the standalone cluster. - clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) - - // Populate the environment variables required for the hosted - // cluster. - aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) - - templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) - hdName := hd.GetName() - - // Deploy the hosted cluster on top of the standalone cluster. - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, hdName) - - // Verify the hosted cluster is running/ready. - templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Delete the hosted ManagedCluster and verify it is removed. - templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") - err = hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - // Now delete the standalone ManagedCluster and verify it is - // removed, it is deleted last since it is the basis for the hosted - // cluster. - /* - FIXME(#339): This is currently disabled as the deletion of the - standalone cluster is failing due to outstanding issues. - templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") - err = standaloneDeleteFunc() + + templateBy(templates.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") + + // Download the KUBECONFIG for the standalone cluster and load it + // so we can call Make targets against this cluster. + // TODO(#472): Ideally we shouldn't use Make here and should just + // convert these Make targets into Go code, but this will require a + // helmclient. + kubeCfgPath, kubecfgDeleteFunc := kc.WriteKubeconfig(context.Background(), sdName) + kubeconfigDeleteFuncs = append(kubeconfigDeleteFuncs, kubecfgDeleteFunc) + + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) + + templateBy(templates.TemplateAWSHostedCP, "validating that the controller is ready") + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf( + GinkgoWriter, "[%s] controller validation failed: %v\n", + templates.TemplateAWSHostedCP, err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Ensure AWS credentials are set in the standalone cluster. + clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) + + // Populate the environment variables required for the hosted + // cluster. + aws.PopulateHostedTemplateVars(context.Background(), kc, sdName) + + hdName := managedcluster.GenerateClusterName(fmt.Sprintf("aws-hosted-%d", i)) + hdTemplate := providerConfig.Hosted.Template + templateBy(templates.TemplateAWSHostedCP, fmt.Sprintf("creating a hosted ManagedCluster %s with template %s", hdName, hdTemplate)) + hd := managedcluster.GetUnstructured(templates.TemplateAWSHostedCP, hdName, hdTemplate) + + // Deploy the hosted cluster on top of the standalone cluster. + hostedDeleteFunc := standaloneClient.CreateManagedCluster(context.Background(), hd) + hostedDeleteFuncs = append(hostedDeleteFuncs, hostedDeleteFunc) + hostedClusterNames = append(hostedClusterNames, hd.GetName()) + + templateBy(templates.TemplateAWSHostedCP, "Patching AWSCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, hdName) + + // Verify the hosted cluster is running/ready. + templateBy(templates.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = managedcluster.NewProviderValidator( + templates.TemplateAWSHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Delete the hosted ManagedCluster and verify it is removed. + templateBy(templates.TemplateAWSHostedCP, "deleting the ManagedCluster") + err = hostedDeleteFunc() Expect(err).NotTo(HaveOccurred()) - deletionValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, + deletionValidator := managedcluster.NewProviderValidator( + templates.TemplateAWSHostedCP, + hdName, managedcluster.ValidationActionDelete, ) Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * - time.Second).Should(Succeed()) - */ + return deletionValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + // Now delete the standalone ManagedCluster and verify it is + // removed, it is deleted last since it is the basis for the hosted + // cluster. + /* + FIXME(#339): This is currently disabled as the deletion of the + standalone cluster is failing due to outstanding issues. + templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deletionValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * + time.Second).Should(Succeed()) + */ + } }) }) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go index fcdbe27c7..15683988d 100644 --- a/test/e2e/provider_azure_test.go +++ b/test/e2e/provider_azure_test.go @@ -25,52 +25,69 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/logs" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/azure" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - hostedKubecfgDeleteFunc func() error - sdName string + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + hostedDeleteFuncs []func() error + standaloneDeleteFuncs []func() error + kubeconfigDeleteFuncs []func() error + standaloneClusterNames []string + hostedClusterNames []string + + providerConfigs []config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + providerConfigs = config.Config[config.TestingProviderAzure] + + if len(providerConfigs) == 0 { + Skip("Azure ManagedCluster testing is skipped") + } + By("ensuring Azure credentials are set") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) ci := clusteridentity.New(kc, managedcluster.ProviderAzure) Expect(os.Setenv(managedcluster.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) }) - AfterEach(func() { + AfterAll(func() { // If we failed collect logs from each of the affiliated controllers // as well as the output of clusterctl to store as artifacts. if CurrentSpecReport().Failed() && !noCleanup() { - By("collecting failure logs from controllers") if kc != nil { - collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + By("collecting failure logs from the management controllers") + logs.Collector{ + Client: kc, + ProviderTypes: []managedcluster.ProviderType{managedcluster.ProviderAzure, managedcluster.ProviderCAPI}, + ClusterNames: standaloneClusterNames, + }.CollectAll() } if standaloneClient != nil { - collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + By("collecting failure logs from hosted controllers") + logs.Collector{ + Client: standaloneClient, + ProviderTypes: []managedcluster.ProviderType{managedcluster.ProviderAzure, managedcluster.ProviderCAPI}, + ClusterNames: hostedClusterNames, + }.CollectAll() } } - By("deleting resources") - for _, deleteFunc := range []func() error{ - hostedKubecfgDeleteFunc, - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { + if !noCleanup() { + By("deleting resources") + deleteFuncs := append(hostedDeleteFuncs, append(standaloneDeleteFuncs, kubeconfigDeleteFuncs...)...) + for _, deleteFunc := range deleteFuncs { err := deleteFunc() Expect(err).NotTo(HaveOccurred()) } @@ -78,99 +95,117 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or }) It("should work with an Azure provider", func() { - templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) - sdName = sd.GetName() - - standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) - - // verify the standalone cluster is deployed correctly - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - sdName, - managedcluster.ValidationActionDeploy, - ) - - templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // setup environment variables for deploying the hosted template (subnet name, etc) - azure.SetAzureEnvironmentVariables(sdName, kc) - - hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) - hdName := hd.GetName() - - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) - - By("Deploy onto standalone cluster") - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("make", "test-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - - standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) - // verify the cluster is ready prior to creating credentials - Eventually(func() error { - err := verifyControllersUp(standaloneClient) - if err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) - return err + for i, providerConfig := range providerConfigs { + _, _ = fmt.Fprintf(GinkgoWriter, "Testing configuration:\n%s\n", providerConfig.String()) + + sdName := managedcluster.GenerateClusterName(fmt.Sprintf("azure-%d", i)) + sdTemplate := providerConfig.Standalone.Template + templateBy(templates.TemplateAzureStandaloneCP, fmt.Sprintf("creating a ManagedCluster %s with template %s", sdName, sdTemplate)) + + templateBy(templates.TemplateAzureStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(templates.TemplateAzureStandaloneCP, sdName, sdTemplate) + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFuncs = append(standaloneDeleteFuncs, standaloneDeleteFunc) + standaloneClusterNames = append(standaloneClusterNames, sdName) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + templates.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + templateBy(templates.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + if providerConfig.Hosted == nil { + continue } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("Create azure credential secret") - clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) - - By("Create default storage class for azure-disk CSI driver") - azure.CreateDefaultStorageClass(standaloneClient) - - templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - templateBy(managedcluster.TemplateAzureHostedCP, "Patching AzureCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, hdName) - - templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("verify the deployment deletes successfully") - err = hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - err = standaloneDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + azure.SetAzureEnvironmentVariables(sdName, kc) + + kubeCfgPath, kubecfgDeleteFunc := kc.WriteKubeconfig(context.Background(), sdName) + kubeconfigDeleteFuncs = append(kubeconfigDeleteFuncs, kubecfgDeleteFunc) + + By("Deploy onto standalone cluster") + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) + + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) + + By("Create default storage class for azure-disk CSI driver") + azure.CreateDefaultStorageClass(standaloneClient) + + hdName := managedcluster.GenerateClusterName(fmt.Sprintf("azure-hosted-%d", i)) + hdTemplate := providerConfig.Hosted.Template + templateBy(templates.TemplateAzureHostedCP, fmt.Sprintf("creating a hosted ManagedCluster %s with template %s", hdName, hdTemplate)) + + hd := managedcluster.GetUnstructured(templates.TemplateAzureHostedCP, hdName, hdTemplate) + + templateBy(templates.TemplateAzureHostedCP, "creating a ManagedCluster") + hostedDeleteFunc := standaloneClient.CreateManagedCluster(context.Background(), hd) + hostedDeleteFuncs = append(hostedDeleteFuncs, hostedDeleteFunc) + hostedClusterNames = append(hostedClusterNames, hdName) + + templateBy(templates.TemplateAzureHostedCP, "Patching AzureCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, hdName) + + templateBy(templates.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = managedcluster.NewProviderValidator( + templates.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err = hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + templates.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + templates.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } }) }) diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go index 202d7deda..4fe2edac4 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/provider_vsphere_test.go @@ -16,6 +16,7 @@ package e2e import ( "context" + "fmt" "os" "time" @@ -23,21 +24,33 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/logs" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" + "github.com/Mirantis/hmc/test/e2e/templates" ) var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { var ( - kc *kubeclient.KubeClient - deleteFunc func() error - clusterName string - err error + kc *kubeclient.KubeClient + standaloneDeleteFuncs map[string]func() error + standaloneClusterNames []string + err error + + providerConfigs []config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + providerConfigs = config.Config[config.TestingProviderVsphere] + + if len(providerConfigs) == 0 { + Skip("Vsphere ManagedCluster testing is skipped") + } + By("ensuring that env vars are set correctly") vsphere.CheckEnv() By("creating kube client") @@ -48,12 +61,18 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" Expect(os.Setenv(managedcluster.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) }) - AfterEach(func() { + AfterAll(func() { // If we failed collect logs from each of the affiliated controllers // as well as the output of clusterctl to store as artifacts. if CurrentSpecReport().Failed() { By("collecting failure logs from controllers") - collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) + if kc != nil { + logs.Collector{ + Client: kc, + ProviderTypes: []managedcluster.ProviderType{managedcluster.ProviderVSphere, managedcluster.ProviderCAPI}, + ClusterNames: standaloneClusterNames, + }.CollectAll() + } } // Run the deletion as part of the cleanup and validate it here. @@ -63,36 +82,49 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" // TODO(#473) Add an exterior cleanup mechanism for VSphere like // 'dev-aws-nuke' to clean up resources in the event that the test // fails to do so. - if deleteFunc != nil && !noCleanup() { - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDelete, - ) + if !noCleanup() { + for clusterName, deleteFunc := range standaloneDeleteFuncs { + if deleteFunc != nil { + deletionValidator := managedcluster.NewProviderValidator( + templates.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) - err = deleteFunc() - Expect(err).NotTo(HaveOccurred()) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + } } }) - It("should deploy standalone managed cluster", func() { - By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) - clusterName = d.GetName() - - deleteFunc = kc.CreateManagedCluster(context.Background(), d) - - By("waiting for infrastructure providers to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + It("should work with Vsphere provider", func() { + for i, providerConfig := range providerConfigs { + By("creating a managed cluster") + + sdName := managedcluster.GenerateClusterName(fmt.Sprintf("vsphere-%d", i)) + sdTemplate := providerConfig.Standalone.Template + templateBy(templates.TemplateVSphereStandaloneCP, fmt.Sprintf("creating a ManagedCluster %s with template %s", sdName, sdTemplate)) + + d := managedcluster.GetUnstructured(templates.TemplateVSphereStandaloneCP, sdName, sdTemplate) + clusterName := d.GetName() + + deleteFunc := kc.CreateManagedCluster(context.Background(), d) + standaloneDeleteFuncs[clusterName] = deleteFunc + standaloneClusterNames = append(standaloneClusterNames, clusterName) + + By("waiting for infrastructure providers to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + templates.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } }) }) diff --git a/test/e2e/templates/templates.go b/test/e2e/templates/templates.go new file mode 100644 index 000000000..6b33fa989 --- /dev/null +++ b/test/e2e/templates/templates.go @@ -0,0 +1,26 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +type Type string + +const ( + TemplateAWSStandaloneCP Type = "aws-standalone-cp" + TemplateAWSHostedCP Type = "aws-hosted-cp" + TemplateAzureHostedCP Type = "azure-hosted-cp" + TemplateAzureStandaloneCP Type = "azure-standalone-cp" + TemplateVSphereStandaloneCP Type = "vsphere-standalone-cp" + TemplateVSphereHostedCP Type = "vsphere-hosted-cp" +)