diff --git a/Makefile b/Makefile index 4b13c28d0..9db933928 100644 --- a/Makefile +++ b/Makefile @@ -71,6 +71,7 @@ set-hmc-version: yq $(YQ) eval '.spec.version = "$(VERSION)"' -i $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/release.yaml $(YQ) eval '.metadata.name = "hmc-$(FQDN_VERSION)"' -i $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/release.yaml $(YQ) eval '.spec.hmc.template = "hmc-$(FQDN_VERSION)"' -i $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/release.yaml + $(YQ) eval '.metadata.name = "hmc-$(FQDN_VERSION)"' -i $(PROVIDER_TEMPLATES_DIR)/hmc-templates/templates/clustertemplatechain.yaml .PHONY: hmc-chart-release hmc-chart-release: set-hmc-version templates-generate ## Generate hmc helm chart @@ -106,7 +107,10 @@ tidy: .PHONY: test test: generate-all envtest tidy external-crd ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e/scenarios) -coverprofile cover.out + +# E2E_CONFIG_B64 contains the configuration for e2e testing. +E2E_CONFIG_B64 ?= "" # Utilize Kind or modify the e2e tests to load the image locally, enabling # compatibility with other vendors. @@ -115,7 +119,8 @@ test-e2e: cli-install @if [ "$$GINKGO_LABEL_FILTER" ]; then \ ginkgo_label_flag="-ginkgo.label-filter=$$GINKGO_LABEL_FILTER"; \ fi; \ - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -ginkgo.timeout=3h -timeout=3h $$ginkgo_label_flag + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) E2E_CONFIG_B64=$(E2E_CONFIG_B64) \ + go test ./test/e2e/scenarios/ -v -ginkgo.v -ginkgo.timeout=3h -timeout=3h $$ginkgo_label_flag .PHONY: lint lint: golangci-lint fmt vet ## Run golangci-lint linter & yamllint @@ -329,6 +334,7 @@ dev-push: docker-build helm-push .PHONY: dev-templates dev-templates: templates-generate $(KUBECTL) -n $(NAMESPACE) apply --force -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/templates + $(KUBECTL) -n $(NAMESPACE) apply --force -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/templates/clustertemplatechain.yaml .PHONY: dev-release dev-release: diff --git a/docs/dev.md b/docs/dev.md index 1484eebfc..fe39b0d9c 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -166,7 +166,7 @@ GINKGO_LABEL_FILTER="provider:cloud" make test-e2e would run all cloud provider tests. To see a list of all available labels run: ```bash -ginkgo labels ./test/e2e +ginkgo labels ./test/e2e/scenarios ``` ### Nuke created resources diff --git a/templates/provider/hmc-templates/templates/clustertemplatechain.yaml b/templates/provider/hmc-templates/templates/clustertemplatechain.yaml new file mode 100644 index 000000000..36338a1e9 --- /dev/null +++ b/templates/provider/hmc-templates/templates/clustertemplatechain.yaml @@ -0,0 +1,15 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ClusterTemplateChain +metadata: + name: hmc-0-0-5 + annotations: + helm.sh/resource-policy: keep +spec: + supportedTemplates: + - name: aws-eks-0-0-2 + - name: aws-hosted-cp-0-0-3 + - name: aws-standalone-cp-0-0-4 + - name: azure-hosted-cp-0-0-3 + - name: azure-standalone-cp-0-0-4 + - name: vsphere-hosted-cp-0-0-3 + - name: vsphere-standalone-cp-0-0-3 diff --git a/test/e2e/config/config.go b/test/e2e/config/config.go new file mode 100644 index 000000000..9770a2c5b --- /dev/null +++ b/test/e2e/config/config.go @@ -0,0 +1,104 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "encoding/base64" + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v3" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/test/e2e/templates" +) + +type TestingProvider string + +const ( + envVarE2EConfig = "E2E_CONFIG_B64" + + TestingProviderAWS TestingProvider = "aws" + TestingProviderAzure TestingProvider = "azure" + TestingProviderVsphere TestingProvider = "vsphere" +) + +var Config TestingConfig + +type TestingConfig = map[TestingProvider]ProviderTestingConfig + +type ProviderTestingConfig struct { + // Standalone contains the testing configuration for the standalone cluster deployment. + Standalone ClusterTestingConfig `yaml:"standalone,omitempty"` + // Standalone contains the testing configuration for the hosted cluster deployment. + Hosted ClusterTestingConfig `yaml:"hosted,omitempty"` +} + +type ClusterTestingConfig struct { + // Upgrade is a boolean parameter that specifies whether the managed cluster upgrade should be tested. + Upgrade bool `yaml:"upgrade,omitempty"` + // Template is the name of the template to use when deploying a managed cluster. + // If unset: + // * The latest available template will be chosen + // * If upgrade is triggered, the latest available template with available upgrades will be chosen. + Template string `yaml:"template,omitempty"` + // UpgradeTemplate specifies the name of the template to upgrade to. Ignored if upgrade is set to false. + // If unset, the latest template available for the upgrade will be chosen. + UpgradeTemplate string `yaml:"upgradeTemplate,omitempty"` +} + +func Parse() error { + decodedConfig, err := base64.StdEncoding.DecodeString(os.Getenv(envVarE2EConfig)) + if err != nil { + return err + } + _, _ = fmt.Fprintf(GinkgoWriter, "E2e testing configuration:\n%s\n", decodedConfig) + + err = yaml.Unmarshal(decodedConfig, &Config) + if err != nil { + return err + } + return nil +} + +func (c *ClusterTestingConfig) SetDefaults(clusterTemplates map[string][]hmc.AvailableUpgrade, templateType templates.Type) error { + var err error + if !c.Upgrade { + if c.Template == "" { + c.Template, err = templates.FindTemplate(clusterTemplates, templateType) + if err != nil { + return err + } + } + return templates.ValidateTemplate(clusterTemplates, c.Template) + } + if c.Template != "" && c.UpgradeTemplate != "" { + return templates.ValidateUpgradeSequence(clusterTemplates, c.Template, c.UpgradeTemplate) + } + c.Template, c.UpgradeTemplate, err = templates.FindTemplatesToUpgrade(clusterTemplates, templateType, c.Template) + if err != nil { + return err + } + return nil +} + +func (c *ProviderTestingConfig) String() string { + prettyConfig, err := yaml.Marshal(c) + Expect(err).NotTo(HaveOccurred()) + + return string(prettyConfig) +} diff --git a/test/e2e/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go index e3801e4e0..281060be6 100644 --- a/test/e2e/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -21,23 +21,30 @@ import ( "os" "path/filepath" + hcv2 "github.com/fluxcd/helm-controller/api/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + hmcmirantiscomv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/utils/status" ) +var scheme = runtime.NewScheme() + type KubeClient struct { Client kubernetes.Interface + CrClient crclient.Client ExtendedClient apiextensionsclientset.Interface Config *rest.Config @@ -56,16 +63,16 @@ func NewFromLocal(namespace string) *KubeClient { // the kubeconfig from secret it needs an existing kubeclient. func (kc *KubeClient) NewFromCluster(ctx context.Context, namespace, clusterName string) *KubeClient { GinkgoHelper() - return newKubeClient(kc.getKubeconfigSecretData(ctx, clusterName), namespace) + return newKubeClient(kc.getKubeconfigSecretData(ctx, namespace, clusterName), namespace) } // WriteKubeconfig writes the kubeconfig for the given clusterName to the // test/e2e directory returning the path to the file and a function to delete // it later. -func (kc *KubeClient) WriteKubeconfig(ctx context.Context, clusterName string) (string, func() error) { +func (kc *KubeClient) WriteKubeconfig(ctx context.Context, namespace, clusterName string) (string, func() error) { GinkgoHelper() - secretData := kc.getKubeconfigSecretData(ctx, clusterName) + secretData := kc.getKubeconfigSecretData(ctx, namespace, clusterName) dir, err := os.Getwd() Expect(err).NotTo(HaveOccurred()) @@ -89,11 +96,11 @@ func (kc *KubeClient) WriteKubeconfig(ctx context.Context, clusterName string) ( return path, deleteFunc } -func (kc *KubeClient) getKubeconfigSecretData(ctx context.Context, clusterName string) []byte { +func (kc *KubeClient) getKubeconfigSecretData(ctx context.Context, namespace, clusterName string) []byte { GinkgoHelper() - secret, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, clusterName+"-kubeconfig", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get cluster: %q kubeconfig secret", clusterName) + secret, err := kc.Client.CoreV1().Secrets(namespace).Get(ctx, clusterName+"-kubeconfig", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to get cluster: %q kubeconfig secret in %s namespace", clusterName, namespace) secretData, ok := secret.Data["value"] Expect(ok).To(BeTrue(), "kubeconfig secret %q has no 'value' key", clusterName) @@ -132,35 +139,45 @@ func newKubeClient(configBytes []byte, namespace string) *KubeClient { clientSet, err := kubernetes.NewForConfig(config) Expect(err).NotTo(HaveOccurred(), "failed to initialize kubernetes client") + err = hmcmirantiscomv1alpha1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred(), "failed to add HMC API to scheme") + + err = hcv2.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred(), "failed to add Flux helm controller API to scheme") + + crClient, err := crclient.New(config, crclient.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred(), "failed to create controller runtime client") + extendedClientSet, err := apiextensionsclientset.NewForConfig(config) Expect(err).NotTo(HaveOccurred(), "failed to initialize apiextensions clientset") return &KubeClient{ Namespace: namespace, Client: clientSet, + CrClient: crClient, ExtendedClient: extendedClientSet, Config: config, } } // GetDynamicClient returns a dynamic client for the given GroupVersionResource. -func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource, namespaced bool) dynamic.ResourceInterface { //nolint:revive +func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource, namespace string) dynamic.ResourceInterface { GinkgoHelper() client, err := dynamic.NewForConfig(kc.Config) Expect(err).NotTo(HaveOccurred(), "failed to create dynamic client for resource: %s", gvr.String()) - if !namespaced { + if namespace == "" { return client.Resource(gvr) } - return client.Resource(gvr).Namespace(kc.Namespace) + return client.Resource(gvr).Namespace(namespace) } -func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionResource, obj *unstructured.Unstructured, namespaced bool) { +func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionResource, obj *unstructured.Unstructured, namespace string) { GinkgoHelper() - client := kc.GetDynamicClient(gvr, namespaced) + client := kc.GetDynamicClient(gvr, namespace) kind, name := status.ObjKindName(obj) @@ -181,18 +198,22 @@ func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionRe // namespace and returns a DeleteFunc to clean up the deployment. // The DeleteFunc is a no-op if the deployment has already been deleted. func (kc *KubeClient) CreateManagedCluster( - ctx context.Context, managedcluster *unstructured.Unstructured, + ctx context.Context, managedcluster *unstructured.Unstructured, namespace string, ) func() error { GinkgoHelper() kind := managedcluster.GetKind() Expect(kind).To(Equal("ManagedCluster")) + if namespace != "" { + managedcluster.SetNamespace(namespace) + } + client := kc.GetDynamicClient(schema.GroupVersionResource{ Group: "hmc.mirantis.com", Version: "v1alpha1", Resource: "managedclusters", - }, true) + }, namespace) _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { @@ -209,14 +230,14 @@ func (kc *KubeClient) CreateManagedCluster( } // GetCluster returns a Cluster resource by name. -func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unstructured.Unstructured, error) { +func (kc *KubeClient) GetCluster(ctx context.Context, namespace, clusterName string) (*unstructured.Unstructured, error) { gvr := schema.GroupVersionResource{ Group: "cluster.x-k8s.io", Version: "v1beta1", Resource: "clusters", } - client := kc.GetDynamicClient(gvr, true) + client := kc.GetDynamicClient(gvr, namespace) cluster, err := client.Get(ctx, clusterName, metav1.GetOptions{}) if err != nil { @@ -229,9 +250,9 @@ func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unst // listResource returns a list of resources for the given GroupVersionResource // affiliated with the given clusterName. func (kc *KubeClient) listResource( - ctx context.Context, gvr schema.GroupVersionResource, clusterName string, + ctx context.Context, gvr schema.GroupVersionResource, namespace, clusterName string, ) ([]unstructured.Unstructured, error) { - client := kc.GetDynamicClient(gvr, true) + client := kc.GetDynamicClient(gvr, namespace) resources, err := client.List(ctx, metav1.ListOptions{ LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, @@ -244,20 +265,20 @@ func (kc *KubeClient) listResource( } // ListMachines returns a list of Machine resources for the given cluster. -func (kc *KubeClient) ListMachines(ctx context.Context, clusterName string) ([]unstructured.Unstructured, error) { +func (kc *KubeClient) ListMachines(ctx context.Context, namespace, clusterName string) ([]unstructured.Unstructured, error) { GinkgoHelper() return kc.listResource(ctx, schema.GroupVersionResource{ Group: "cluster.x-k8s.io", Version: "v1beta1", Resource: "machines", - }, clusterName) + }, namespace, clusterName) } // ListMachineDeployments returns a list of MachineDeployment resources for the // given cluster. func (kc *KubeClient) ListMachineDeployments( - ctx context.Context, clusterName string, + ctx context.Context, namespace, clusterName string, ) ([]unstructured.Unstructured, error) { GinkgoHelper() @@ -265,11 +286,11 @@ func (kc *KubeClient) ListMachineDeployments( Group: "cluster.x-k8s.io", Version: "v1beta1", Resource: "machinedeployments", - }, clusterName) + }, namespace, clusterName) } func (kc *KubeClient) ListK0sControlPlanes( - ctx context.Context, clusterName string, + ctx context.Context, namespace, clusterName string, ) ([]unstructured.Unstructured, error) { GinkgoHelper() @@ -277,5 +298,5 @@ func (kc *KubeClient) ListK0sControlPlanes( Group: "controlplane.cluster.x-k8s.io", Version: "v1beta1", Resource: "k0scontrolplanes", - }, clusterName) + }, namespace, clusterName) } diff --git a/test/e2e/managedcluster/aws/aws.go b/test/e2e/managedcluster/aws/aws.go index 441cfc499..56e236b25 100644 --- a/test/e2e/managedcluster/aws/aws.go +++ b/test/e2e/managedcluster/aws/aws.go @@ -39,7 +39,7 @@ func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta2", Resource: "awsclusters", - }, true) + }, managedcluster.Namespace) awsCluster, err := c.Get(ctx, clusterName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster") diff --git a/test/e2e/managedcluster/azure/azure.go b/test/e2e/managedcluster/azure/azure.go index 2880badcf..3be788a4d 100644 --- a/test/e2e/managedcluster/azure/azure.go +++ b/test/e2e/managedcluster/azure/azure.go @@ -31,6 +31,7 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" ) func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) map[string]any { @@ -41,7 +42,7 @@ func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) m Resource: "azureclusters", } - dc := kc.GetDynamicClient(resourceID, true) + dc := kc.GetDynamicClient(resourceID, managedcluster.Namespace) list, err := dc.List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), }) diff --git a/test/e2e/managedcluster/clusteridentity/clusteridentity.go b/test/e2e/managedcluster/clusteridentity/clusteridentity.go index 41e01c77e..f8ea0aa57 100644 --- a/test/e2e/managedcluster/clusteridentity/clusteridentity.go +++ b/test/e2e/managedcluster/clusteridentity/clusteridentity.go @@ -39,13 +39,13 @@ type ClusterIdentity struct { IdentityName string SecretData map[string]string Spec map[string]any - Namespaced bool + Namespace string } // New creates a ClusterIdentity resource, credential and associated secret for // the given provider using the provided KubeClient and returns details about // the created ClusterIdentity. -func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *ClusterIdentity { +func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType, namespace string) *ClusterIdentity { GinkgoHelper() var ( @@ -126,14 +126,17 @@ func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *Clust IdentityName: identityName, SecretData: secretStringData, Spec: spec, - Namespaced: namespaced, + } + + if namespaced { + ci.Namespace = namespace } validateSecretDataPopulated(secretStringData) ci.waitForResourceCRD(kc) ci.createSecret(kc) ci.createClusterIdentity(kc) - ci.createCredential(kc) + ci.createCredential(kc, namespace) return &ci } @@ -200,7 +203,7 @@ func (ci *ClusterIdentity) createSecret(kc *kubeclient.KubeClient) { } } -func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient) { +func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient, namespace string) { GinkgoHelper() credName := fmt.Sprintf("%s-cred", ci.IdentityName) @@ -212,14 +215,14 @@ func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient) { "kind": "Credential", "metadata": map[string]any{ "name": credName, - "namespace": kc.Namespace, + "namespace": namespace, }, "spec": map[string]any{ "identityRef": map[string]any{ "apiVersion": ci.GroupVersionResource.Group + "/" + ci.GroupVersionResource.Version, "kind": ci.Kind, "name": ci.IdentityName, - "namespace": kc.Namespace, + "namespace": ci.Namespace, }, }, }, @@ -229,7 +232,7 @@ func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient) { Group: "hmc.mirantis.com", Version: "v1alpha1", Resource: "credentials", - }, cred, true) + }, cred, namespace) } // createClusterIdentity creates a ClusterIdentity resource. @@ -244,11 +247,11 @@ func (ci *ClusterIdentity) createClusterIdentity(kc *kubeclient.KubeClient) { "kind": ci.Kind, "metadata": map[string]any{ "name": ci.IdentityName, - "namespace": kc.Namespace, + "namespace": ci.Namespace, }, "spec": ci.Spec, }, } - kc.CreateOrUpdateUnstructuredObject(ci.GroupVersionResource, id, ci.Namespaced) + kc.CreateOrUpdateUnstructuredObject(ci.GroupVersionResource, id, ci.Namespace) } diff --git a/test/e2e/managedcluster/common.go b/test/e2e/managedcluster/common.go index 5fabf7fde..65298cf58 100644 --- a/test/e2e/managedcluster/common.go +++ b/test/e2e/managedcluster/common.go @@ -34,7 +34,7 @@ import ( // See: https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider // Use Eventually as the resource might not be available immediately following // a ManagedCluster creation. -func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, clusterName string) { +func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, namespace, clusterName string) { GinkgoHelper() ctx := context.Background() @@ -61,7 +61,7 @@ func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, c Group: "infrastructure.cluster.x-k8s.io", Version: version, Resource: resource, - }, true) + }, namespace) trueStatus := map[string]any{ "status": map[string]any{ diff --git a/test/e2e/managedcluster/constants.go b/test/e2e/managedcluster/constants.go index 4f18a7832..d6809f071 100644 --- a/test/e2e/managedcluster/constants.go +++ b/test/e2e/managedcluster/constants.go @@ -16,10 +16,11 @@ package managedcluster const ( // Common - EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" - EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" - EnvVarWorkerNumber = "WORKER_NUMBER" - EnvVarNamespace = "NAMESPACE" + EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" + EnvVarManagedClusterTemplate = "MANAGED_CLUSTER_TEMPLATE" + EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" + EnvVarWorkerNumber = "WORKER_NUMBER" + EnvVarNamespace = "NAMESPACE" // EnvVarNoCleanup disables After* cleanup in provider specs to allow for // debugging of test failures. EnvVarNoCleanup = "NO_CLEANUP" diff --git a/test/e2e/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go index 72efa96f8..35e5f2e15 100644 --- a/test/e2e/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -27,6 +27,7 @@ import ( "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) @@ -39,17 +40,8 @@ const ( ProviderVSphere ProviderType = "infrastructure-vsphere" providerLabel = "cluster.x-k8s.io/provider" -) - -type Template string -const ( - TemplateAWSStandaloneCP Template = "aws-standalone-cp" - TemplateAWSHostedCP Template = "aws-hosted-cp" - TemplateAzureHostedCP Template = "azure-hosted-cp" - TemplateAzureStandaloneCP Template = "azure-standalone-cp" - TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" - TemplateVSphereHostedCP Template = "vsphere-hosted-cp" + Namespace = "default" ) //go:embed resources/aws-standalone-cp.yaml.tpl @@ -84,7 +76,7 @@ func GetProviderLabel(provider ProviderType) string { return fmt.Sprintf("%s=%s", providerLabel, provider) } -func setClusterName(templateName Template) { +func setClusterName(templateType templates.Type) { var generatedName string mcName := os.Getenv(EnvVarManagedClusterName) @@ -92,30 +84,35 @@ func setClusterName(templateName Template) { mcName = "e2e-test-" + uuid.New().String()[:8] } - providerName := strings.Split(string(templateName), "-")[0] + providerName := strings.Split(string(templateType), "-")[0] // Append the provider name to the cluster name to ensure uniqueness between // different deployed ManagedClusters. generatedName = fmt.Sprintf("%s-%s", mcName, providerName) - if strings.Contains(string(templateName), "hosted") { + if strings.Contains(string(templateType), "hosted") { generatedName = fmt.Sprintf("%s-%s", mcName, "hosted") } GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) } +func setTemplate(templateName string) { + GinkgoT().Setenv(EnvVarManagedClusterTemplate, templateName) +} + // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. -func GetUnstructured(templateName Template) *unstructured.Unstructured { +func GetUnstructured(templateType templates.Type, templateName string) *unstructured.Unstructured { GinkgoHelper() - setClusterName(templateName) + setClusterName(templateType) + setTemplate(templateName) var managedClusterTemplateBytes []byte - switch templateName { - case TemplateAWSStandaloneCP: + switch templateType { + case templates.TemplateAWSStandaloneCP: managedClusterTemplateBytes = awsStandaloneCPManagedClusterTemplateBytes - case TemplateAWSHostedCP: + case templates.TemplateAWSHostedCP: // Validate environment vars that do not have defaults are populated. // We perform this validation here instead of within a Before block // since we populate the vars from standalone prior to this step. @@ -126,16 +123,16 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { EnvVarAWSSecurityGroupID, }) managedClusterTemplateBytes = awsHostedCPManagedClusterTemplateBytes - case TemplateVSphereStandaloneCP: + case templates.TemplateVSphereStandaloneCP: managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes - case TemplateVSphereHostedCP: + case templates.TemplateVSphereHostedCP: managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes - case TemplateAzureHostedCP: + case templates.TemplateAzureHostedCP: managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes - case TemplateAzureStandaloneCP: + case templates.TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("Unsupported template: %s", templateName)) + Fail(fmt.Sprintf("Unsupported template type: %s", templateType)) } managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) diff --git a/test/e2e/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go index 4df0fa84b..0af08a534 100644 --- a/test/e2e/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -21,14 +21,17 @@ import ( . "github.com/onsi/ginkgo/v2" "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/templates" ) // ProviderValidator is a struct that contains the necessary information to // validate a provider's resources. Some providers do not support all of the // resources that can potentially be validated. type ProviderValidator struct { - // Template is the name of the template being validated. - template Template + // Template is the type of the template being validated. + template templates.Type + // Namespace is the namespace of the cluster to validate. + namespace string // ClusterName is the name of the cluster to validate. clusterName string // ResourcesToValidate is a map of resource names to their validation @@ -46,7 +49,7 @@ const ( ValidationActionDelete ValidationAction = "delete" ) -func NewProviderValidator(template Template, clusterName string, action ValidationAction) *ProviderValidator { +func NewProviderValidator(templateType templates.Type, namespace, clusterName string, action ValidationAction) *ProviderValidator { var ( resourcesToValidate map[string]resourceValidationFunc resourceOrder []string @@ -61,11 +64,11 @@ func NewProviderValidator(template Template, clusterName string, action Validati } resourceOrder = []string{"clusters", "machines", "control-planes", "csi-driver"} - switch template { - case TemplateAWSStandaloneCP, TemplateAWSHostedCP: + switch templateType { + case templates.TemplateAWSStandaloneCP, templates.TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") - case TemplateAzureStandaloneCP, TemplateVSphereStandaloneCP: + case templates.TemplateAzureStandaloneCP, templates.TemplateVSphereStandaloneCP: delete(resourcesToValidate, "csi-driver") } } else { @@ -78,7 +81,8 @@ func NewProviderValidator(template Template, clusterName string, action Validati } return &ProviderValidator{ - template: template, + template: templateType, + namespace: namespace, clusterName: clusterName, resourcesToValidate: resourcesToValidate, resourceOrder: resourceOrder, @@ -103,12 +107,12 @@ func (p *ProviderValidator) Validate(ctx context.Context, kc *kubeclient.KubeCli continue } - if err := validator(ctx, kc, p.clusterName); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation error: %v\n", p.template, name, err) + if err := validator(ctx, kc, p.namespace, p.clusterName); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Template %s [%s/%s] validation error: %v\n", p.template, p.namespace, name, err) return err } - _, _ = fmt.Fprintf(GinkgoWriter, "[%s/%s] validation succeeded\n", p.template, name) + _, _ = fmt.Fprintf(GinkgoWriter, "Template %s [%s/%s] validation succeeded\n", p.template, p.namespace, name) delete(p.resourcesToValidate, name) } diff --git a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 8a2700c63..b8108cce9 100644 --- a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: diff --git a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index 7f49cb388..ed09f8080 100644 --- a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-standalone-cp-0-0-4 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: diff --git a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl index 76da17cbb..5474ea82b 100644 --- a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AZURE_CLUSTER_IDENTITY}-cred config: location: "${AZURE_REGION}" diff --git a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl index 4b68cd4c9..a68891ce0 100644 --- a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-standalone-cp-0-0-4 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${AZURE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: 1 diff --git a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index c0475f3f4..42b5efc7c 100644 --- a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-hosted-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index cc5fa87b3..95a77c9e1 100644 --- a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-standalone-cp-0-0-3 + template: ${MANAGED_CLUSTER_TEMPLATE} credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/e2e/managedcluster/upgrade.go b/test/e2e/managedcluster/upgrade.go new file mode 100644 index 000000000..ae04ded76 --- /dev/null +++ b/test/e2e/managedcluster/upgrade.go @@ -0,0 +1,89 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package managedcluster + +import ( + "context" + "fmt" + "time" + + hcv2 "github.com/fluxcd/helm-controller/api/v2" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +func Upgrade(ctx context.Context, cl crclient.Client, clusterNamespace, clusterName, newTemplate string) { + cluster := &hmc.ManagedCluster{} + err := cl.Get(ctx, types.NamespacedName{ + Namespace: clusterNamespace, + Name: clusterName, + }, cluster) + Expect(err).NotTo(HaveOccurred()) + + patch := crclient.MergeFrom(cluster.DeepCopy()) + cluster.Spec.Template = newTemplate + err = cl.Patch(ctx, cluster, patch) + Expect(err).NotTo(HaveOccurred()) + + template := &hmc.ClusterTemplate{} + err = cl.Get(ctx, types.NamespacedName{ + Namespace: clusterNamespace, + Name: newTemplate, + }, template) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + errorMessage, upgraded := validateClusterUpgraded(ctx, cl, clusterNamespace, clusterName, template.Status.ChartRef.Name) + if !upgraded { + _, _ = fmt.Fprintf(GinkgoWriter, errorMessage, "\n") + return false + } + return true + }, 20*time.Minute, 20*time.Second).Should(BeTrue()) +} + +func validateClusterUpgraded(ctx context.Context, cl crclient.Client, clusterNamespace, clusterName, chartName string) (string, bool) { + hr := &hcv2.HelmRelease{} + err := cl.Get(ctx, types.NamespacedName{ + Namespace: clusterNamespace, + Name: clusterName, + }, hr) + if err != nil { + return fmt.Sprintf("failed to get %s/%s HelmRelease %v", clusterNamespace, clusterName, err), false + } + if hr.Spec.ChartRef.Name != chartName { + return fmt.Sprintf("waiting for chartName to be updated in %s/%s HelmRelease", clusterNamespace, clusterName), false + } + readyCondition := apimeta.FindStatusCondition(hr.GetConditions(), hmc.ReadyCondition) + if readyCondition == nil { + return fmt.Sprintf("waiting for %s/%s HelmRelease to have Ready condition", clusterNamespace, clusterName), false + } + if readyCondition.ObservedGeneration != hr.Generation { + return "waiting for status.observedGeneration to be updated", false + } + if readyCondition.Status != metav1.ConditionTrue { + return "waiting for Ready condition to have status: true", false + } + if readyCondition.Reason != hcv2.UpgradeSucceededReason { + return "waiting for Ready condition to have `UpgradeSucceeded` reason", false + } + return "", true +} diff --git a/test/e2e/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go index e09d4c254..8d2e7b94e 100644 --- a/test/e2e/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -28,9 +28,9 @@ import ( ) // validateClusterDeleted validates that the Cluster resource has been deleted. -func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { +func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { // Validate that the Cluster resource has been deleted - cluster, err := kc.GetCluster(ctx, clusterName) + cluster, err := kc.GetCluster(ctx, namespace, clusterName) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -64,8 +64,8 @@ func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clus // validateMachineDeploymentsDeleted validates that all MachineDeployments have // been deleted. -func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - machineDeployments, err := kc.ListMachineDeployments(ctx, clusterName) +func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + machineDeployments, err := kc.ListMachineDeployments(ctx, namespace, clusterName) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -84,8 +84,8 @@ func validateMachineDeploymentsDeleted(ctx context.Context, kc *kubeclient.KubeC // validateK0sControlPlanesDeleted validates that all k0scontrolplanes have // been deleted. -func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - controlPlanes, err := kc.ListK0sControlPlanes(ctx, clusterName) +func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + controlPlanes, err := kc.ListK0sControlPlanes(ctx, namespace, clusterName) if err != nil && !apierrors.IsNotFound(err) { return err } diff --git a/test/e2e/managedcluster/validate_deployed.go b/test/e2e/managedcluster/validate_deployed.go index bae823f75..07dd7470d 100644 --- a/test/e2e/managedcluster/validate_deployed.go +++ b/test/e2e/managedcluster/validate_deployed.go @@ -34,10 +34,10 @@ import ( // resourceValidationFunc is intended to validate a specific kubernetes // resource. -type resourceValidationFunc func(context.Context, *kubeclient.KubeClient, string) error +type resourceValidationFunc func(context.Context, *kubeclient.KubeClient, string, string) error -func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - cluster, err := kc.GetCluster(ctx, clusterName) +func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + cluster, err := kc.GetCluster(ctx, namespace, clusterName) if err != nil { return err } @@ -58,8 +58,8 @@ func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, clusterName return utils.ValidateConditionsTrue(cluster) } -func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - machines, err := kc.ListMachines(ctx, clusterName) +func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + machines, err := kc.ListMachines(ctx, namespace, clusterName) if err != nil { return err } @@ -67,7 +67,7 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam if len(machines) == 0 { // No machines have been created yet, check for MachineDeployments to // provide some debug information as to why no machines are present. - md, err := kc.ListMachineDeployments(ctx, clusterName) + md, err := kc.ListMachineDeployments(ctx, namespace, clusterName) if err != nil { return fmt.Errorf("failed to list machine deployments: %w", err) } @@ -98,8 +98,8 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam return nil } -func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - controlPlanes, err := kc.ListK0sControlPlanes(ctx, clusterName) +func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + controlPlanes, err := kc.ListK0sControlPlanes(ctx, namespace, clusterName) if err != nil { return err } @@ -141,8 +141,8 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl // validateCSIDriver validates that the provider CSI driver is functioning // by creating a PVC and verifying it enters "Bound" status. -func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - clusterKC := kc.NewFromCluster(ctx, "default", clusterName) +func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + clusterKC := kc.NewFromCluster(ctx, namespace, clusterName) pvcName := clusterName + "-csi-test-pvc" @@ -228,8 +228,8 @@ func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterNa // validateCCM validates that the provider's cloud controller manager is // functional by creating a LoadBalancer service and verifying it is assigned // an external IP. -func validateCCM(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - clusterKC := kc.NewFromCluster(ctx, "default", clusterName) +func validateCCM(ctx context.Context, kc *kubeclient.KubeClient, namespace, clusterName string) error { + clusterKC := kc.NewFromCluster(ctx, namespace, clusterName) createdServiceName := "loadbalancer-" + clusterName diff --git a/test/e2e/controller_test.go b/test/e2e/scenarios/controller_test.go similarity index 98% rename from test/e2e/controller_test.go rename to test/e2e/scenarios/controller_test.go index 7b3897a0b..05c18daf9 100644 --- a/test/e2e/controller_test.go +++ b/test/e2e/scenarios/controller_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package scenarios import ( . "github.com/onsi/ginkgo/v2" diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/scenarios/e2e_suite_test.go similarity index 92% rename from test/e2e/e2e_suite_test.go rename to test/e2e/scenarios/e2e_suite_test.go index e76a4c245..a7364aed0 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/scenarios/e2e_suite_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package scenarios import ( "bufio" @@ -32,9 +32,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" + hmc "github.com/Mirantis/hmc/api/v1alpha1" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) @@ -45,12 +48,19 @@ func TestE2E(t *testing.T) { RunSpecs(t, "e2e suite") } +var clusterTemplates map[string][]hmc.AvailableUpgrade + var _ = BeforeSuite(func() { + err := config.Parse() + Expect(err).NotTo(HaveOccurred()) + GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) + ctx := context.Background() + By("building and deploying the controller-manager") cmd := exec.Command("make", "kind-deploy") - _, err := utils.Run(cmd) + _, err = utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) cmd = exec.Command("make", "test-apply") _, err = utils.Run(cmd) @@ -66,6 +76,9 @@ var _ = BeforeSuite(func() { } return nil }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By(fmt.Sprintf("applying access rules for ClusterTemplates in %s namespace", managedcluster.Namespace)) + clusterTemplates = templates.ApplyClusterTemplateAccessRules(ctx, kc.CrClient, managedcluster.Namespace) }) var _ = AfterSuite(func() { @@ -144,7 +157,7 @@ func validateController(kc *kubeclient.KubeClient, labelSelector, name string) e // templateBy wraps a Ginkgo By with a block describing the template being // tested. -func templateBy(t managedcluster.Template, description string) { +func templateBy(t templates.Type, description string) { GinkgoHelper() By(fmt.Sprintf("[%s] %s", t, description)) } diff --git a/test/e2e/provider_aws_test.go b/test/e2e/scenarios/provider_aws_test.go similarity index 62% rename from test/e2e/provider_aws_test.go rename to test/e2e/scenarios/provider_aws_test.go index 6614698b4..155c9c263 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/scenarios/provider_aws_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package scenarios import ( "context" @@ -25,14 +25,18 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/aws" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Ordered, func() { + ctx := context.Background() + var ( kc *kubeclient.KubeClient standaloneClient *kubeclient.KubeClient @@ -40,12 +44,26 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order hostedDeleteFunc func() error kubecfgDeleteFunc func() error clusterName string + + testingConfig config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + testingConfig = config.Config[config.TestingProviderAWS] + + By("set defaults and validate testing configuration") + err := testingConfig.Standalone.SetDefaults(clusterTemplates, templates.TemplateAWSStandaloneCP) + Expect(err).NotTo(HaveOccurred()) + + err = testingConfig.Hosted.SetDefaults(clusterTemplates, templates.TemplateAWSHostedCP) + Expect(err).NotTo(HaveOccurred()) + + _, _ = fmt.Fprintf(GinkgoWriter, "Final AWS testing configuration:\n%s\n", testingConfig.String()) + By("providing cluster identity") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) - ci := clusteridentity.New(kc, managedcluster.ProviderAWS) + ci := clusteridentity.New(kc, managedcluster.ProviderAWS, managedcluster.Namespace) Expect(os.Setenv(managedcluster.EnvVarAWSClusterIdentity, ci.IdentityName)).Should(Succeed()) }) @@ -78,15 +96,17 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order // hosting the hosted cluster. GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") - templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) + templateBy(templates.TemplateAWSStandaloneCP, fmt.Sprintf("creating a ManagedCluster with %s template", testingConfig.Standalone.Template)) + sd := managedcluster.GetUnstructured(templates.TemplateAWSStandaloneCP, testingConfig.Standalone.Template) + clusterName = sd.GetName() - standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd, managedcluster.Namespace) - templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") + templateBy(templates.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, + templates.TemplateAWSStandaloneCP, + managedcluster.Namespace, clusterName, managedcluster.ValidationActionDeploy, ) @@ -95,7 +115,7 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order return deploymentValidator.Validate(context.Background(), kc) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") + templateBy(templates.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") // Download the KUBECONFIG for the standalone cluster and load it // so we can call Make targets against this cluster. @@ -103,7 +123,7 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order // convert these Make targets into Go code, but this will require a // helmclient. var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), managedcluster.Namespace, clusterName) GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) cmd := exec.Command("make", "test-apply") @@ -111,40 +131,44 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order Expect(err).NotTo(HaveOccurred()) Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") - standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, clusterName) + templateBy(templates.TemplateAWSHostedCP, "validating that the controller is ready") + standaloneClient = kc.NewFromCluster(context.Background(), managedcluster.Namespace, clusterName) Eventually(func() error { err := verifyControllersUp(standaloneClient) if err != nil { _, _ = fmt.Fprintf( GinkgoWriter, "[%s] controller validation failed: %v\n", - string(managedcluster.TemplateAWSHostedCP), err) + string(templates.TemplateAWSHostedCP), err) return err } return nil }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + By(fmt.Sprintf("applying access rules for ClusterTemplates in %s namespace", managedcluster.Namespace)) + templates.ApplyClusterTemplateAccessRules(ctx, standaloneClient.CrClient, managedcluster.Namespace) + // Ensure AWS credentials are set in the standalone cluster. - clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) + clusteridentity.New(standaloneClient, managedcluster.ProviderAWS, managedcluster.Namespace) // Populate the environment variables required for the hosted // cluster. aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) - templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) + templateBy(templates.TemplateAWSHostedCP, fmt.Sprintf("creating a ManagedCluster with %s template", testingConfig.Hosted.Template)) + hd := managedcluster.GetUnstructured(templates.TemplateAWSHostedCP, testingConfig.Hosted.Template) hdName := hd.GetName() // Deploy the hosted cluster on top of the standalone cluster. - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd, managedcluster.Namespace) - templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, hdName) + templateBy(templates.TemplateAWSHostedCP, "Patching AWSCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, managedcluster.Namespace, hdName) // Verify the hosted cluster is running/ready. - templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") + templateBy(templates.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, + templates.TemplateAWSHostedCP, + managedcluster.Namespace, hdName, managedcluster.ValidationActionDeploy, ) @@ -152,13 +176,32 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order return deploymentValidator.Validate(context.Background(), standaloneClient) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + if testingConfig.Standalone.Upgrade { + managedcluster.Upgrade(ctx, kc.CrClient, managedcluster.Namespace, clusterName, testingConfig.Standalone.UpgradeTemplate) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Validate hosted deployment + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + if testingConfig.Hosted.Upgrade { + managedcluster.Upgrade(ctx, standaloneClient.CrClient, managedcluster.Namespace, hdName, testingConfig.Hosted.UpgradeTemplate) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + // Delete the hosted ManagedCluster and verify it is removed. - templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") + templateBy(templates.TemplateAWSHostedCP, "deleting the ManagedCluster") err = hostedDeleteFunc() Expect(err).NotTo(HaveOccurred()) deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, + templates.TemplateAWSHostedCP, + managedcluster.Namespace, hdName, managedcluster.ValidationActionDelete, ) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/scenarios/provider_azure_test.go similarity index 61% rename from test/e2e/provider_azure_test.go rename to test/e2e/scenarios/provider_azure_test.go index fcdbe27c7..281ab27d4 100644 --- a/test/e2e/provider_azure_test.go +++ b/test/e2e/scenarios/provider_azure_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package scenarios import ( "context" @@ -25,14 +25,18 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/azure" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/templates" "github.com/Mirantis/hmc/test/utils" ) var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { + ctx := context.Background() + var ( kc *kubeclient.KubeClient standaloneClient *kubeclient.KubeClient @@ -41,12 +45,26 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or kubecfgDeleteFunc func() error hostedKubecfgDeleteFunc func() error sdName string + + testingConfig config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + testingConfig = config.Config[config.TestingProviderAzure] + + By("set defaults and validate testing configuration") + err := testingConfig.Standalone.SetDefaults(clusterTemplates, templates.TemplateAzureStandaloneCP) + Expect(err).NotTo(HaveOccurred()) + + err = testingConfig.Hosted.SetDefaults(clusterTemplates, templates.TemplateAzureHostedCP) + Expect(err).NotTo(HaveOccurred()) + + _, _ = fmt.Fprintf(GinkgoWriter, "Final Azure testing configuration:\n%s\n", testingConfig.String()) + By("ensuring Azure credentials are set") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) - ci := clusteridentity.New(kc, managedcluster.ProviderAzure) + ci := clusteridentity.New(kc, managedcluster.ProviderAzure, managedcluster.Namespace) Expect(os.Setenv(managedcluster.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) }) @@ -78,20 +96,21 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or }) It("should work with an Azure provider", func() { - templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + templateBy(templates.TemplateAzureStandaloneCP, fmt.Sprintf("creating a ManagedCluster with %s template", testingConfig.Standalone.Template)) + sd := managedcluster.GetUnstructured(templates.TemplateAzureStandaloneCP, testingConfig.Standalone.Template) sdName = sd.GetName() - standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd, managedcluster.Namespace) // verify the standalone cluster is deployed correctly deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, + templates.TemplateAzureStandaloneCP, + managedcluster.Namespace, sdName, managedcluster.ValidationActionDeploy, ) - templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + templateBy(templates.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) @@ -99,11 +118,11 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or // setup environment variables for deploying the hosted template (subnet name, etc) azure.SetAzureEnvironmentVariables(sdName, kc) - hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hd := managedcluster.GetUnstructured(templates.TemplateAzureHostedCP, testingConfig.Hosted.Template) hdName := hd.GetName() var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), managedcluster.Namespace, sdName) By("Deploy onto standalone cluster") GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) @@ -112,7 +131,7 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or Expect(err).NotTo(HaveOccurred()) Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) + standaloneClient = kc.NewFromCluster(context.Background(), managedcluster.Namespace, sdName) // verify the cluster is ready prior to creating credentials Eventually(func() error { err := verifyControllersUp(standaloneClient) @@ -123,21 +142,25 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or return nil }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + By(fmt.Sprintf("applying access rules for ClusterTemplates in %s namespace", managedcluster.Namespace)) + templates.ApplyClusterTemplateAccessRules(ctx, standaloneClient.CrClient, managedcluster.Namespace) + By("Create azure credential secret") - clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) + clusteridentity.New(standaloneClient, managedcluster.ProviderAzure, managedcluster.Namespace) By("Create default storage class for azure-disk CSI driver") azure.CreateDefaultStorageClass(standaloneClient) - templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + templateBy(templates.TemplateAzureHostedCP, fmt.Sprintf("creating a ManagedCluster with %s template", testingConfig.Hosted.Template)) + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd, managedcluster.Namespace) - templateBy(managedcluster.TemplateAzureHostedCP, "Patching AzureCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, hdName) + templateBy(templates.TemplateAzureHostedCP, "Patching AzureCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, managedcluster.Namespace, hdName) - templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + templateBy(templates.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, + templates.TemplateAzureHostedCP, + managedcluster.Namespace, hdName, managedcluster.ValidationActionDeploy, ) @@ -146,6 +169,24 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or return deploymentValidator.Validate(context.Background(), standaloneClient) }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + if testingConfig.Standalone.Upgrade { + managedcluster.Upgrade(ctx, kc.CrClient, managedcluster.Namespace, sdName, testingConfig.Standalone.UpgradeTemplate) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Validate hosted deployment + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + if testingConfig.Hosted.Upgrade { + managedcluster.Upgrade(ctx, standaloneClient.CrClient, managedcluster.Namespace, hdName, testingConfig.Hosted.UpgradeTemplate) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + By("verify the deployment deletes successfully") err = hostedDeleteFunc() Expect(err).NotTo(HaveOccurred()) @@ -154,7 +195,8 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or Expect(err).NotTo(HaveOccurred()) deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, + templates.TemplateAzureHostedCP, + managedcluster.Namespace, hdName, managedcluster.ValidationActionDelete, ) @@ -164,7 +206,8 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, + templates.TemplateAzureStandaloneCP, + managedcluster.Namespace, hdName, managedcluster.ValidationActionDelete, ) diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/scenarios/provider_vsphere_test.go similarity index 66% rename from test/e2e/provider_vsphere_test.go rename to test/e2e/scenarios/provider_vsphere_test.go index 202d7deda..3a7cd8f61 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/scenarios/provider_vsphere_test.go @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package e2e +package scenarios import ( "context" + "fmt" "os" "time" @@ -23,27 +24,45 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/config" "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" + "github.com/Mirantis/hmc/test/e2e/templates" ) var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { + ctx := context.Background() + var ( kc *kubeclient.KubeClient deleteFunc func() error clusterName string err error + + testingConfig config.ProviderTestingConfig ) BeforeAll(func() { + By("get testing configuration") + testingConfig = config.Config[config.TestingProviderVsphere] + + By("set defaults and validate testing configuration") + err := testingConfig.Standalone.SetDefaults(clusterTemplates, templates.TemplateVSphereStandaloneCP) + Expect(err).NotTo(HaveOccurred()) + + err = testingConfig.Hosted.SetDefaults(clusterTemplates, templates.TemplateVSphereHostedCP) + Expect(err).NotTo(HaveOccurred()) + + _, _ = fmt.Fprintf(GinkgoWriter, "Final Vsphere testing configuration:\n%s\n", testingConfig.String()) + By("ensuring that env vars are set correctly") vsphere.CheckEnv() By("creating kube client") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) By("providing cluster identity") - ci := clusteridentity.New(kc, managedcluster.ProviderVSphere) + ci := clusteridentity.New(kc, managedcluster.ProviderVSphere, managedcluster.Namespace) By("setting VSPHERE_CLUSTER_IDENTITY env variable") Expect(os.Setenv(managedcluster.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) }) @@ -65,7 +84,8 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" // fails to do so. if deleteFunc != nil && !noCleanup() { deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, + templates.TemplateVSphereStandaloneCP, + managedcluster.Namespace, clusterName, managedcluster.ValidationActionDelete, ) @@ -79,20 +99,28 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" }) It("should deploy standalone managed cluster", func() { - By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + By(fmt.Sprintf("creating a managed cluster with %s template", testingConfig.Standalone.Template)) + d := managedcluster.GetUnstructured(templates.TemplateVSphereStandaloneCP, testingConfig.Standalone.Template) clusterName = d.GetName() - deleteFunc = kc.CreateManagedCluster(context.Background(), d) + deleteFunc = kc.CreateManagedCluster(context.Background(), d, managedcluster.Namespace) By("waiting for infrastructure providers to deploy successfully") deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, + templates.TemplateVSphereStandaloneCP, + managedcluster.Namespace, clusterName, managedcluster.ValidationActionDeploy, ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + if testingConfig.Standalone.Upgrade { + managedcluster.Upgrade(ctx, kc.CrClient, managedcluster.Namespace, clusterName, testingConfig.Standalone.UpgradeTemplate) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } }) }) diff --git a/test/e2e/templates/templates.go b/test/e2e/templates/templates.go new file mode 100644 index 000000000..e0bf1307b --- /dev/null +++ b/test/e2e/templates/templates.go @@ -0,0 +1,215 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "context" + "fmt" + "slices" + "sort" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + internalutils "github.com/Mirantis/hmc/internal/utils" +) + +type Type string + +const ( + TemplateAWSStandaloneCP Type = "aws-standalone-cp" + TemplateAWSHostedCP Type = "aws-hosted-cp" + TemplateAzureHostedCP Type = "azure-hosted-cp" + TemplateAzureStandaloneCP Type = "azure-standalone-cp" + TemplateVSphereStandaloneCP Type = "vsphere-standalone-cp" + TemplateVSphereHostedCP Type = "vsphere-hosted-cp" +) + +func ApplyClusterTemplateAccessRules(ctx context.Context, client crclient.Client, namespace string) map[string][]hmc.AvailableUpgrade { + ctChains := &metav1.PartialObjectMetadataList{} + gvk := hmc.GroupVersion.WithKind(hmc.ClusterTemplateChainKind) + ctChains.SetGroupVersionKind(gvk) + + err := client.List(ctx, ctChains, crclient.InNamespace(internalutils.DefaultSystemNamespace)) + Expect(err).NotTo(HaveOccurred()) + Expect(ctChains.Items).NotTo(BeEmpty()) + + chainNames := make([]string, 0, len(ctChains.Items)) + for _, chain := range ctChains.Items { + chainNames = append(chainNames, chain.Name) + } + + tm := &hmc.AccessManagement{ + ObjectMeta: metav1.ObjectMeta{ + Name: hmc.AccessManagementName, + }, + } + accessRules := []hmc.AccessRule{ + { + TargetNamespaces: hmc.TargetNamespaces{ + List: []string{namespace}, + }, + ClusterTemplateChains: chainNames, + }, + } + + _, err = ctrl.CreateOrUpdate(ctx, client, tm, func() error { + tm.Spec.AccessRules = accessRules + return nil + }) + Expect(err).NotTo(HaveOccurred()) + + clusterTemplateChains := make([]*hmc.ClusterTemplateChain, 0, len(chainNames)) + Eventually(func() error { + var err error + clusterTemplateChains, err = checkClusterTemplateChains(ctx, client, namespace, chainNames) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Not all ClusterTemplateChains were created in the target namespace: %v\n", err) + } + return err + }, 5*time.Minute, 10*time.Second).Should(Succeed()) + + clusterTemplates := getClusterTemplates(clusterTemplateChains) + Eventually(func() error { + err := checkClusterTemplates(ctx, client, namespace, clusterTemplates) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Not all ClusterTemplates were created in the target namespace: %v\n", err) + } + return err + }, 15*time.Minute, 10*time.Second).Should(Succeed()) + return clusterTemplates +} + +func checkClusterTemplateChains(ctx context.Context, client crclient.Client, namespace string, chainNames []string) ([]*hmc.ClusterTemplateChain, error) { + chains := make([]*hmc.ClusterTemplateChain, 0, len(chainNames)) + for _, chainName := range chainNames { + chain := &hmc.ClusterTemplateChain{} + if err := client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: chainName}, chain); err != nil { + return nil, fmt.Errorf("failed to get ClusterTemplateChain %s/%s: %w", namespace, chainName, err) + } + chains = append(chains, chain) + } + return chains, nil +} + +func getClusterTemplates(chains []*hmc.ClusterTemplateChain) map[string][]hmc.AvailableUpgrade { + templates := make(map[string][]hmc.AvailableUpgrade) + for _, chain := range chains { + for _, supportedTemplate := range chain.Spec.SupportedTemplates { + templates[supportedTemplate.Name] = append(templates[supportedTemplate.Name], supportedTemplate.AvailableUpgrades...) + } + } + return templates +} + +func checkClusterTemplates(ctx context.Context, client crclient.Client, namespace string, clusterTemplates map[string][]hmc.AvailableUpgrade) error { + for templateName := range clusterTemplates { + template := &metav1.PartialObjectMetadata{} + gvk := hmc.GroupVersion.WithKind(hmc.ClusterTemplateKind) + template.SetGroupVersionKind(gvk) + if err := client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: templateName}, template); err != nil { + return fmt.Errorf("failed to get ClusterTemplate %s/%s: %w", namespace, templateName, err) + } + } + return nil +} + +func FindTemplate(clusterTemplates map[string][]hmc.AvailableUpgrade, templateType Type) (string, error) { + templates := filterByType(clusterTemplates, templateType) + if len(templates) == 0 { + return "", fmt.Errorf("no Template of the %s type is supported", templateType) + } + return templates[0], nil +} + +func FindTemplatesToUpgrade( + clusterTemplates map[string][]hmc.AvailableUpgrade, + templateType Type, + sourceTemplate string, +) (template, upgradeTemplate string, err error) { + templates := filterByType(clusterTemplates, templateType) + if len(templates) == 0 { + return "", "", fmt.Errorf("no Template of the %s type is supported", templateType) + } + if sourceTemplate != "" { + // Template should be in the list of supported + if !slices.Contains(templates, sourceTemplate) { + return "", "", fmt.Errorf("invalid templates configuration. Template %s is not in the list of supported templates", sourceTemplate) + } + // Template should have available upgrades + availableUpgrades := clusterTemplates[sourceTemplate] + if len(availableUpgrades) == 0 { + return "", "", fmt.Errorf("invalid templates configuration. No upgrades are available from the Template %s", sourceTemplate) + } + // Find latest available template for the upgrade + sort.Slice(availableUpgrades, func(i, j int) bool { + return availableUpgrades[i].Name < availableUpgrades[j].Name + }) + return sourceTemplate, availableUpgrades[len(availableUpgrades)-1].Name, nil + } + + // find template with available upgrades + for _, templateName := range templates { + template = templateName + for _, au := range clusterTemplates[template] { + if upgradeTemplate < au.Name { + upgradeTemplate = au.Name + } + } + if template != "" && upgradeTemplate != "" { + return template, upgradeTemplate, nil + } + } + if template == "" || upgradeTemplate == "" { + return "", "", fmt.Errorf("invalid templates configuration. No %s templates are available for the upgrade", templateType) + } + return template, upgradeTemplate, nil +} + +func ValidateTemplate(clusterTemplates map[string][]hmc.AvailableUpgrade, template string) error { + if _, ok := clusterTemplates[template]; ok { + return nil + } + return fmt.Errorf("template %s is not in the list of supported templates", template) +} + +func ValidateUpgradeSequence(clusterTemplates map[string][]hmc.AvailableUpgrade, source, target string) error { + availableUpgrades := clusterTemplates[source] + if _, ok := clusterTemplates[source]; ok && + slices.Contains(availableUpgrades, hmc.AvailableUpgrade{Name: target}) { + return nil + } + return fmt.Errorf("upgrade sequence %s -> %s is not supported", source, target) +} + +func filterByType(clusterTemplates map[string][]hmc.AvailableUpgrade, templateType Type) []string { + var templates []string + for template := range clusterTemplates { + if strings.HasPrefix(template, string(templateType)) { + templates = append(templates, template) + } + } + sort.Slice(templates, func(i, j int) bool { + return templates[i] > templates[j] + }) + return templates +} diff --git a/test/e2e/templates/templates_test.go b/test/e2e/templates/templates_test.go new file mode 100644 index 000000000..06e4c9688 --- /dev/null +++ b/test/e2e/templates/templates_test.go @@ -0,0 +1,131 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package templates + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +func Test_Find(t *testing.T) { + templates := map[string][]hmc.AvailableUpgrade{ + "aws-standalone-cp-0-0-1": { + {Name: "aws-standalone-cp-0-0-3"}, + {Name: "aws-standalone-cp-0-0-2"}, + }, + "aws-standalone-cp-0-0-3": {}, + "aws-standalone-cp-0-0-2": {}, + "aws-hosted-cp-0-0-1": { + {Name: "aws-hosted-cp-0-0-4"}, + {Name: "aws-hosted-cp-0-0-2"}, + }, + "aws-hosted-cp-0-0-4": {}, + "aws-hosted-cp-0-0-2": {}, + "azure-standalone-cp-0-0-1": { + {Name: "azure-standalone-cp-0-0-2"}, + }, + "azure-hosted-cp-0-0-1": { + {Name: "azure-hosted-cp-0-0-2"}, + }, + "vsphere-standalone-cp-0-0-1": { + {Name: "vsphere-standalone-cp-0-0-2"}, + }, + "vsphere-hosted-cp-0-0-1": {}, + } + for _, tt := range []struct { + title string + upgrade bool + sourceTemplate string + clusterTemplates map[string][]hmc.AvailableUpgrade + templateType Type + expectedTemplate string + expectedUpgradeTemplate string + expectedErr error + }{ + { + title: "no templates of the provided type supported", + templateType: "aws-unsupported-cp", + expectedErr: errors.New("no Template of the aws-unsupported-cp type is supported"), + }, + { + title: "should find latest template for aws-hosted-cp", + templateType: TemplateAWSHostedCP, + expectedTemplate: "aws-hosted-cp-0-0-4", + }, + { + title: "upgrade: no upgrades are available for this type of templates", + upgrade: true, + templateType: TemplateVSphereHostedCP, + expectedErr: errors.New("invalid templates configuration. No vsphere-hosted-cp templates are available for the upgrade"), + }, + { + title: "upgrade: source template provided but it's not supported", + upgrade: true, + sourceTemplate: "aws-standalone-cp-0-0-1-1", + templateType: TemplateAWSStandaloneCP, + expectedErr: errors.New("invalid templates configuration. Template aws-standalone-cp-0-0-1-1 is not in the list of supported templates"), + }, + { + title: "upgrade: source template provided but no upgrades are available", + upgrade: true, + sourceTemplate: "aws-standalone-cp-0-0-3", + templateType: TemplateAWSStandaloneCP, + expectedErr: errors.New("invalid templates configuration. No upgrades are available from the Template aws-standalone-cp-0-0-3"), + }, + { + title: "upgrade: source template provided and the upgrade template was found", + upgrade: true, + sourceTemplate: "aws-standalone-cp-0-0-1", + templateType: TemplateAWSStandaloneCP, + expectedTemplate: "aws-standalone-cp-0-0-1", + expectedUpgradeTemplate: "aws-standalone-cp-0-0-3", + }, + { + title: "upgrade: no templates are available for the upgrade for this type of templates", + upgrade: true, + templateType: TemplateVSphereHostedCP, + expectedErr: errors.New("invalid templates configuration. No vsphere-hosted-cp templates are available for the upgrade"), + }, + { + title: "upgrade: should find latest template with available upgrades", + upgrade: true, + templateType: TemplateAWSHostedCP, + expectedTemplate: "aws-hosted-cp-0-0-1", + expectedUpgradeTemplate: "aws-hosted-cp-0-0-4", + }, + } { + t.Run(tt.title, func(t *testing.T) { + g := NewWithT(t) + var template, upgradeTemplate string + var err error + if tt.upgrade { + template, upgradeTemplate, err = FindTemplatesToUpgrade(templates, tt.templateType, tt.sourceTemplate) + } else { + template, err = FindTemplate(templates, tt.templateType) + } + if tt.expectedErr != nil { + g.Expect(err).To(MatchError(tt.expectedErr)) + } else { + g.Expect(err).To(Succeed()) + } + g.Expect(template).To(Equal(tt.expectedTemplate)) + g.Expect(upgradeTemplate).To(Equal(tt.expectedUpgradeTemplate)) + }) + } +} diff --git a/test/utils/utils.go b/test/utils/utils.go index 56ce5ee33..10501e7dd 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -107,7 +107,7 @@ func GetProjectDir() (string, error) { if err != nil { return wd, err } - wd = strings.ReplaceAll(wd, "/test/e2e", "") + wd = strings.ReplaceAll(wd, "/test/e2e/scenarios", "") return wd, nil }