diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index fe2fa81cd..5d0104d84 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -1,6 +1,6 @@ name: CI on: - pull_request_target: + pull_request: types: - labeled - opened @@ -23,29 +23,14 @@ env: jobs: build: concurrency: - group: build-${{ github.head_ref || github.run_id }} + group: ${{ github.head_ref || github.run_id }} cancel-in-progress: true name: Build and Unit Test runs-on: ubuntu-latest - outputs: - version: ${{ steps.vars.outputs.version }} - clustername: ${{ steps.vars.outputs.clustername }} - pr: ${{ steps.pr.outputs.result }} steps: - - name: Get PR ref - uses: actions/github-script@v7 - id: pr - with: - script: | - const { data: pullRequest } = await github.rest.pulls.get({ - ...context.repo, - pull_number: context.payload.pull_request.number, - }); - return pullRequest - name: Checkout repository uses: actions/checkout@v4 with: - ref: ${{fromJSON(steps.pr.outputs.result).merge_commit_sha}} fetch-depth: 0 - name: Setup Go uses: actions/setup-go@v5 @@ -59,6 +44,31 @@ jobs: - name: Unit tests run: | make test + - name: Build HMC controller image + run: | + make docker-build + + push: + concurrency: + group: push-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + name: E2E Push Images and Charts to GHCR + runs-on: ubuntu-latest + needs: build + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + outputs: + version: ${{ steps.vars.outputs.version }} + clustername: ${{ steps.vars.outputs.clustername }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false - name: Set up Buildx uses: docker/setup-buildx-action@v3 - name: Login to GHCR @@ -73,7 +83,8 @@ jobs: GIT_VERSION=$(git describe --tags --always) echo "version=${GIT_VERSION:1}" >> $GITHUB_OUTPUT echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT - - name: Build and push HMC controller image + - name: Push HMC Controller Image to GHCR + if: uses: docker/build-push-action@v6 with: build-args: | @@ -85,7 +96,7 @@ jobs: push: true cache-from: type=gha cache-to: type=gha,mode=max - - name: Prepare and push HMC template charts + - name: Prepare and push HMC template charts to GHCR run: | make hmc-chart-release make helm-push @@ -93,28 +104,27 @@ jobs: controller-e2etest: name: E2E Controller runs-on: ubuntu-latest - needs: build + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: push concurrency: group: controller-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup kubectl uses: azure/setup-kubectl@v4 - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'controller' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -129,14 +139,13 @@ jobs: name: E2E Cloud Providers runs-on: ubuntu-latest if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - needs: build + needs: push concurrency: group: cloud-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} env: AWS_REGION: us-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} @@ -151,7 +160,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -162,9 +170,9 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:cloud' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -179,14 +187,13 @@ jobs: name: E2E On-Prem Providers runs-on: self-hosted if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - needs: build + needs: push concurrency: group: onprem-${{ github.head_ref || github.run_id }} cancel-in-progress: true outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} env: VSPHERE_USER: ${{ secrets.CI_VSPHERE_USER }} VSPHERE_PASSWORD: ${{ secrets.CI_VSPHERE_PASSWORD }} @@ -205,7 +212,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -215,9 +221,9 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:onprem' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' - VERSION: ${{ needs.build.outputs.version }} + MANAGED_CLUSTER_NAME: ${{ needs.push.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.push.outputs.version }}' + VERSION: ${{ needs.push.outputs.version }} run: | make test-e2e - name: Archive test results @@ -229,23 +235,21 @@ jobs: test/e2e/*.log cleanup: - name: Cleanup + name: E2E Cleanup needs: - - build + - push - provider-cloud-e2etest runs-on: ubuntu-latest - if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.build.result, 'success') }} + if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.push.result, 'success') }} timeout-minutes: 15 outputs: - clustername: ${{ needs.build.outputs.clustername }} - version: ${{ needs.build.outputs.version }} - pr: ${{ needs.build.outputs.pr }} + clustername: ${{ needs.push.outputs.clustername }} + version: ${{ needs.push.outputs.version }} steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{fromJSON(needs.build.outputs.pr).merge_commit_sha}} - name: Setup Go uses: actions/setup-go@v5 with: @@ -260,7 +264,7 @@ jobs: AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} - CLUSTER_NAME: '${{ needs.build.outputs.clustername }}' + CLUSTER_NAME: '${{ needs.push.outputs.clustername }}' run: | make dev-aws-nuke - make dev-azure-nuke + make dev-azure-nuke \ No newline at end of file diff --git a/config/dev/eks-managedcluster.yaml b/config/dev/eks-managedcluster.yaml index d17cc48d9..f45ab58f8 100644 --- a/config/dev/eks-managedcluster.yaml +++ b/config/dev/eks-managedcluster.yaml @@ -4,7 +4,7 @@ metadata: name: eks-dev namespace: ${NAMESPACE} spec: - template: aws-eks-0-0-2 + template: aws-eks-0-0-3 credential: "aws-cluster-identity-cred" config: region: ${AWS_REGION} diff --git a/templates/cluster/aws-eks/Chart.yaml b/templates/cluster/aws-eks/Chart.yaml index f0707095d..b8b215478 100644 --- a/templates/cluster/aws-eks/Chart.yaml +++ b/templates/cluster/aws-eks/Chart.yaml @@ -6,7 +6,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.2 +version: 0.0.3 annotations: cluster.x-k8s.io/provider: infrastructure-aws cluster.x-k8s.io/infrastructure-aws: v1beta2 diff --git a/templates/provider/hmc-templates/files/templates/aws-eks-0-0-2.yaml b/templates/provider/hmc-templates/files/templates/aws-eks-0-0-3.yaml similarity index 77% rename from templates/provider/hmc-templates/files/templates/aws-eks-0-0-2.yaml rename to templates/provider/hmc-templates/files/templates/aws-eks-0-0-3.yaml index 57e5fd6a5..e30176fec 100644 --- a/templates/provider/hmc-templates/files/templates/aws-eks-0-0-2.yaml +++ b/templates/provider/hmc-templates/files/templates/aws-eks-0-0-3.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: aws-eks-0-0-2 + name: aws-eks-0-0-3 annotations: helm.sh/resource-policy: keep spec: helm: chartName: aws-eks - chartVersion: 0.0.2 + chartVersion: 0.0.3 diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index d37c4f8f5..799a10a66 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -191,6 +191,11 @@ rules: - clusterprofiles - clustersummaries verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com resources: diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index e76a4c245..48aab8d11 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -217,8 +217,10 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") output, err := utils.Run(cmd) if err != nil { - utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) - return + if !strings.Contains(err.Error(), "unable to verify clusterctl version") { + utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) + return + } } err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) if err != nil { diff --git a/test/e2e/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go index e3801e4e0..d5e328aaa 100644 --- a/test/e2e/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -279,3 +279,15 @@ func (kc *KubeClient) ListK0sControlPlanes( Resource: "k0scontrolplanes", }, clusterName) } + +func (kc *KubeClient) ListAWSManagedControlPlanes( + ctx context.Context, clusterName string, +) ([]unstructured.Unstructured, error) { + GinkgoHelper() + + return kc.listResource(ctx, schema.GroupVersionResource{ + Group: "controlplane.cluster.x-k8s.io", + Version: "v1beta2", + Resource: "awsmanagedcontrolplanes", + }, clusterName) +} diff --git a/test/e2e/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go index 72efa96f8..e8be90e15 100644 --- a/test/e2e/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -50,6 +50,7 @@ const ( TemplateAzureStandaloneCP Template = "azure-standalone-cp" TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" TemplateVSphereHostedCP Template = "vsphere-hosted-cp" + TemplateEKSCP Template = "aws-eks-cp" ) //go:embed resources/aws-standalone-cp.yaml.tpl @@ -70,6 +71,9 @@ var vsphereStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/vsphere-hosted-cp.yaml.tpl var vsphereHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/aws-eks-cp.yaml.tpl +var eksCPManagedClusterTemplateBytes []byte + func FilterAllProviders() []string { return []string{ utils.HMCControllerLabel, @@ -134,6 +138,8 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes case TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes + case TemplateEKSCP: + managedClusterTemplateBytes = eksCPManagedClusterTemplateBytes default: Fail(fmt.Sprintf("Unsupported template: %s", templateName)) } diff --git a/test/e2e/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go index 4df0fa84b..3ccb5a074 100644 --- a/test/e2e/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -65,14 +65,22 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") + case TemplateEKSCP: + resourcesToValidate["control-planes"] = validateAWSManagedControlPlanes + delete(resourcesToValidate, "csi-driver") case TemplateAzureStandaloneCP, TemplateVSphereStandaloneCP: delete(resourcesToValidate, "csi-driver") } } else { + validateCPDeletedFunc := validateK0sControlPlanesDeleted + if template == TemplateEKSCP { + validateCPDeletedFunc = validateAWSManagedControlPlanesDeleted + } + resourcesToValidate = map[string]resourceValidationFunc{ "clusters": validateClusterDeleted, "machinedeployments": validateMachineDeploymentsDeleted, - "control-planes": validateK0sControlPlanesDeleted, + "control-planes": validateCPDeletedFunc, } resourceOrder = []string{"clusters", "machinedeployments", "control-planes"} } diff --git a/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl new file mode 100644 index 000000000..649b06f05 --- /dev/null +++ b/test/e2e/managedcluster/resources/aws-eks-cp.yaml.tpl @@ -0,0 +1,16 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-eks +spec: + template: aws-eks-0-0-3 + credential: ${AWS_CLUSTER_IDENTITY}-cred + config: + region: ${AWS_REGION} + workersNumber: ${WORKERS_NUMBER:=1} + clusterIdentity: + name: ${AWS_CLUSTER_IDENTITY}-cred + namespace: ${NAMESPACE} + publicIP: ${AWS_PUBLIC_IP:=true} + worker: + instanceType: ${AWS_INSTANCE_TYPE:=t3.small} diff --git a/test/e2e/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go index e09d4c254..4272f9fe5 100644 --- a/test/e2e/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -101,3 +101,21 @@ func validateK0sControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeCli return nil } + +func validateAWSManagedControlPlanesDeleted(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + controlPlanes, err := kc.ListAWSManagedControlPlanes(ctx, clusterName) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + + var cpNames []string + if len(controlPlanes) > 0 { + for _, cp := range controlPlanes { + cpNames = append(cpNames, cp.GetName()) + + return fmt.Errorf("AWS Managed control planes still exist: %s", cpNames) + } + } + + return nil +} diff --git a/test/e2e/managedcluster/validate_deployed.go b/test/e2e/managedcluster/validate_deployed.go index bae823f75..7a600ff3f 100644 --- a/test/e2e/managedcluster/validate_deployed.go +++ b/test/e2e/managedcluster/validate_deployed.go @@ -55,7 +55,7 @@ func validateCluster(ctx context.Context, kc *kubeclient.KubeClient, clusterName Fail(err.Error()) } - return utils.ValidateConditionsTrue(cluster) + return utils.NewConditionsValidator().IfTrue(cluster) } func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { @@ -79,7 +79,7 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam Fail(err.Error()) } - if err := utils.ValidateConditionsTrue(&md); err != nil { + if err := utils.NewConditionsValidator().IfTrue(&md); err != nil { return err } } @@ -90,7 +90,7 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam Fail(err.Error()) } - if err := utils.ValidateConditionsTrue(&machine); err != nil { + if err := utils.NewConditionsValidator().IfTrue(&machine); err != nil { return err } } @@ -113,7 +113,7 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl // k0s does not use the metav1.Condition type for status.conditions, // instead it uses a custom type so we can't use - // ValidateConditionsTrue here, instead we'll check for "ready: true". + // ordinary conditions validation here, instead we'll check for "ready: true". objStatus, found, err := unstructured.NestedFieldCopy(controlPlane.Object, "status") if !found { return fmt.Errorf("no status found for %s: %s", objKind, objName) @@ -139,6 +139,26 @@ func validateK0sControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, cl return nil } +func validateAWSManagedControlPlanes(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { + controlPlanes, err := kc.ListAWSManagedControlPlanes(ctx, clusterName) + if err != nil { + return err + } + + for _, controlPlane := range controlPlanes { + if err := utils.ValidateObjectNamePrefix(&controlPlane, clusterName); err != nil { + Fail(err.Error()) + } + + // EKSControlPlaneCreating condition very often has READY=False, SEVERITY=Info and REASON=created (this is fine). + if err := utils.NewConditionsValidator(utils.WithExcluded([]string{"EKSControlPlaneCreating"})).IfTrue(&controlPlane); err != nil { + return err + } + } + + return nil +} + // validateCSIDriver validates that the provider CSI driver is functioning // by creating a PVC and verifying it enters "Bound" status. func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go index 6614698b4..7712ff74b 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/provider_aws_test.go @@ -72,7 +72,7 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order } }) - It("should work with an AWS provider", func() { + XIt("should work with an AWS provider", func() { // Deploy a standalone cluster and verify it is running/ready. // Deploy standalone with an xlarge instance since it will also be // hosting the hosted cluster. @@ -186,4 +186,45 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order time.Second).Should(Succeed()) */ }) + + It("should work with an EKS provider", func() { + // Deploy a standalone cluster and verify it is running/ready. + GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.small") + + cmd := exec.Command("kubectl", "get", "clustertemplates", "-n", "hmc-system", "-o", "yaml") + output, err := utils.Run(cmd) + _, _ = fmt.Fprintln(GinkgoWriter, string(output)) + Expect(err).NotTo(HaveOccurred()) + + templateBy(managedcluster.TemplateEKSCP, "creating a ManagedCluster for EKS") + sd := managedcluster.GetUnstructured(managedcluster.TemplateEKSCP) + clusterName = sd.GetName() + + standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + + templateBy(managedcluster.TemplateEKSCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateEKSCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(60 * time.Minute).WithPolling(30 * time.Second).Should(Succeed()) + + // --- clean up --- + templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster for EKS") + Expect(standaloneDeleteFunc()).NotTo(HaveOccurred()) + + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(15 * time.Minute).WithPolling(10 * + time.Second).Should(Succeed()) + }) }) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go index fcdbe27c7..9e2b76abb 100644 --- a/test/e2e/provider_azure_test.go +++ b/test/e2e/provider_azure_test.go @@ -32,7 +32,7 @@ import ( "github.com/Mirantis/hmc/test/utils" ) -var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { +var _ = XContext("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { var ( kc *kubeclient.KubeClient standaloneClient *kubeclient.KubeClient @@ -77,7 +77,7 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or } }) - It("should work with an Azure provider", func() { + XIt("should work with an Azure provider", func() { templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) sdName = sd.GetName() diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go index 202d7deda..251d4c18f 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/provider_vsphere_test.go @@ -29,7 +29,7 @@ import ( "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" ) -var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { +var _ = XContext("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { var ( kc *kubeclient.KubeClient deleteFunc func() error @@ -78,7 +78,7 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" } }) - It("should deploy standalone managed cluster", func() { + XIt("should deploy standalone managed cluster", func() { By("creating a managed cluster") d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) clusterName = d.GetName() diff --git a/test/utils/utils.go b/test/utils/utils.go index 56ce5ee33..792125589 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -24,6 +24,7 @@ import ( . "github.com/onsi/ginkgo/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/strings/slices" "github.com/Mirantis/hmc/internal/utils/status" ) @@ -111,10 +112,28 @@ func GetProjectDir() (string, error) { return wd, nil } -// ValidateConditionsTrue iterates over the conditions of the given +type ConditionsValidator struct { + excludedConditions []string +} + +func NewConditionsValidator(options ...func(*ConditionsValidator)) *ConditionsValidator { + cv := &ConditionsValidator{} + for _, o := range options { + o(cv) + } + return cv +} + +func WithExcluded(excludedConditions []string) func(*ConditionsValidator) { + return func(cv *ConditionsValidator) { + cv.excludedConditions = excludedConditions + } +} + +// IfTrue iterates over the conditions of the given // unstructured object and returns an error if any of the conditions are not // true. Conditions are expected to be of type metav1.Condition. -func ValidateConditionsTrue(unstrObj *unstructured.Unstructured) error { +func (cv *ConditionsValidator) IfTrue(unstrObj *unstructured.Unstructured) error { objKind, objName := status.ObjKindName(unstrObj) conditions, err := status.ConditionsFromUnstructured(unstrObj) @@ -129,6 +148,10 @@ func ValidateConditionsTrue(unstrObj *unstructured.Unstructured) error { continue } + if slices.Contains(cv.excludedConditions, c.Type) { + continue + } + errs = errors.Join(errors.New(ConvertConditionsToString(c)), errs) }