From 38e489c4de92fe300dc88dc9255a10909e6ec4a8 Mon Sep 17 00:00:00 2001 From: Wahab Ali Date: Wed, 16 Oct 2024 10:05:29 -0400 Subject: [PATCH] Reconcile MultiClusterService --- Makefile | 8 +- api/v1alpha1/managedcluster_types.go | 7 +- api/v1alpha1/multiclusterservice_types.go | 14 +- config/dev/aws-managedcluster.yaml | 1 + config/dev/multiclusterservice.yaml | 13 ++ .../controller/managedcluster_controller.go | 98 +-------- .../multiclusterservice_controller.go | 161 +++++++++++++- .../multiclusterservice_controller_test.go | 203 +++++++++++++++--- internal/sveltos/profile.go | 181 ++++++++++------ .../files/templates/ingress-nginx-4-11-3.yaml | 10 + .../hmc.mirantis.com_managedclusters.yaml | 23 +- ...hmc.mirantis.com_multiclusterservices.yaml | 23 +- .../hmc/templates/rbac/controller/roles.yaml | 1 + .../Chart.lock | 0 .../Chart.yaml | 0 .../service/ingress-nginx-4-11-3/Chart.lock | 6 + .../service/ingress-nginx-4-11-3/Chart.yaml | 10 + 17 files changed, 552 insertions(+), 207 deletions(-) create mode 100644 config/dev/multiclusterservice.yaml create mode 100644 templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml rename templates/service/{ingress-nginx => ingress-nginx-4-11-0}/Chart.lock (100%) rename templates/service/{ingress-nginx => ingress-nginx-4-11-0}/Chart.yaml (100%) create mode 100644 templates/service/ingress-nginx-4-11-3/Chart.lock create mode 100644 templates/service/ingress-nginx-4-11-3/Chart.yaml diff --git a/Makefile b/Makefile index 6025e5650..9222b7c1b 100644 --- a/Makefile +++ b/Makefile @@ -379,6 +379,8 @@ FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmrepositories-$(FLUX_SOURC FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmchart-$(FLUX_SOURCE_VERSION).yaml FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml +SVELTOS_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/projectsveltos/libsveltos") | .Version') +SVELTOS_CRD ?= $(EXTERNAL_CRD_DIR)/sveltos-$(SVELTOS_VERSION).yaml ## Tool Binaries KUBECTL ?= kubectl @@ -445,8 +447,12 @@ $(FLUX_SOURCE_REPO_CRD): $(EXTERNAL_CRD_DIR) rm -f $(FLUX_SOURCE_REPO_CRD) curl -s https://raw.githubusercontent.com/fluxcd/source-controller/$(FLUX_SOURCE_VERSION)/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml > $(FLUX_SOURCE_REPO_CRD) +$(SVELTOS_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(SVELTOS_CRD) + curl -s https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) + .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 89a7f9bf9..779c25676 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -71,13 +71,16 @@ type ManagedClusterSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=2147483646 - // Priority sets the priority for the services defined in this spec. + // ServicesPriority sets the priority for the services defined in this spec. // Higher value means higher priority and lower means lower. // In case of conflict with another object managing the service, // the one with higher priority will get to deploy its services. - Priority int32 `json:"priority,omitempty"` + ServicesPriority int32 `json:"servicesPriority,omitempty"` // DryRun specifies whether the template should be applied after validation or only validated. DryRun bool `json:"dryRun,omitempty"` + + // +kubebuilder:default:=false + // StopOnConflict specifies what to do in case of a conflict. // E.g. If another object is already managing a service. // By default the remaining services will be deployed even if conflict is detected. diff --git a/api/v1alpha1/multiclusterservice_types.go b/api/v1alpha1/multiclusterservice_types.go index 74916e7ef..5e3633447 100644 --- a/api/v1alpha1/multiclusterservice_types.go +++ b/api/v1alpha1/multiclusterservice_types.go @@ -19,6 +19,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // MultiClusterServiceFinalizer is finalizer applied to MultiClusterService objects. + MultiClusterServiceFinalizer = "hmc.mirantis.com/multicluster-service" + // MultiClusterServiceKind is the string representation of a MultiClusterServiceKind. + MultiClusterServiceKind = "MultiClusterService" +) + // ServiceSpec represents a Service to be managed type ServiceSpec struct { // Values is the helm values to be passed to the template. @@ -52,11 +59,14 @@ type MultiClusterServiceSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=2147483646 - // Priority sets the priority for the services defined in this spec. + // ServicesPriority sets the priority for the services defined in this spec. // Higher value means higher priority and lower means lower. // In case of conflict with another object managing the service, // the one with higher priority will get to deploy its services. - Priority int32 `json:"priority,omitempty"` + ServicesPriority int32 `json:"servicesPriority,omitempty"` + + // +kubebuilder:default:=false + // StopOnConflict specifies what to do in case of a conflict. // E.g. If another object is already managing a service. // By default the remaining services will be deployed even if conflict is detected. diff --git a/config/dev/aws-managedcluster.yaml b/config/dev/aws-managedcluster.yaml index 6400d55d3..c2c17b3b2 100644 --- a/config/dev/aws-managedcluster.yaml +++ b/config/dev/aws-managedcluster.yaml @@ -19,6 +19,7 @@ spec: workersNumber: 1 installBeachHeadServices: false template: aws-standalone-cp-0-0-2 + servicesPriority: 100 services: - template: kyverno-3-2-6 name: kyverno diff --git a/config/dev/multiclusterservice.yaml b/config/dev/multiclusterservice.yaml new file mode 100644 index 000000000..afcf2c9b1 --- /dev/null +++ b/config/dev/multiclusterservice.yaml @@ -0,0 +1,13 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: MultiClusterService +metadata: + name: global-ingress +spec: + servicesPriority: 1000 + clusterSelector: + matchLabels: + app.kubernetes.io/managed-by: Helm + services: + - template: ingress-nginx-4-11-3 + name: ingress-nginx + namespace: ingress-nginx diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index a55303346..3b80ee271 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -48,7 +48,6 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" "github.com/Mirantis/hmc/internal/telemetry" - "github.com/Mirantis/hmc/internal/utils" ) const ( @@ -377,65 +376,12 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *h // updateServices reconciles services provided in ManagedCluster.Spec.Services. // TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places. func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) { - l := ctrl.LoggerFrom(ctx) - opts := []sveltos.HelmChartOpts{} - - // NOTE: The Profile object will be updated with no helm - // charts if len(mc.Spec.Services) == 0. This will result in the - // helm charts being uninstalled on matching clusters if - // Profile originally had len(m.Spec.Sevices) > 0. - for _, svc := range mc.Spec.Services { - if svc.Disable { - l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mc.Name)) - continue - } - - tmpl := &hmc.ServiceTemplate{} - tmplRef := client.ObjectKey{Name: svc.Template, Namespace: mc.Namespace} - if err := r.Get(ctx, tmplRef, tmpl); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err) - } - - source, err := r.getServiceTemplateSource(ctx, tmpl) - if err != nil { - return ctrl.Result{}, fmt.Errorf("could not get repository url: %w", err) - } - - opts = append(opts, sveltos.HelmChartOpts{ - Values: svc.Values, - RepositoryURL: source.Spec.URL, - // We don't have repository name so chart name becomes repository name. - RepositoryName: tmpl.Spec.Helm.ChartName, - ChartName: func() string { - if source.Spec.Type == utils.RegistryTypeOCI { - return tmpl.Spec.Helm.ChartName - } - // Sveltos accepts ChartName in / format for non-OCI. - // We don't have a repository name, so we can use / instead. - // See: https://projectsveltos.github.io/sveltos/addons/helm_charts/. - return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName) - }(), - ChartVersion: tmpl.Spec.Helm.ChartVersion, - ReleaseName: svc.Name, - ReleaseNamespace: func() string { - if svc.Namespace != "" { - return svc.Namespace - } - return svc.Name - }(), - // The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because - // the source.Spec.Insecure field is meant to be used for connecting to repositories - // over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for. - // See: https://github.com/fluxcd/source-controller/pull/1288 - PlainHTTP: source.Spec.Insecure, - }) + opts, err := helmChartOpts(ctx, r.Client, mc.Namespace, mc.Spec.Services) + if err != nil { + return ctrl.Result{}, err } if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, - map[string]string{ - hmc.FluxHelmChartNamespaceKey: mc.Namespace, - hmc.FluxHelmChartNameKey: mc.Name, - }, sveltos.ReconcileProfileOpts{ OwnerReference: &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), @@ -443,8 +389,14 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M Name: mc.Name, UID: mc.UID, }, + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + hmc.FluxHelmChartNamespaceKey: mc.Namespace, + hmc.FluxHelmChartNameKey: mc.Name, + }, + }, HelmChartOpts: opts, - Priority: mc.Spec.Priority, + Priority: mc.Spec.ServicesPriority, StopOnConflict: mc.Spec.StopOnConflict, }); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile Profile: %w", err) @@ -458,36 +410,6 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } -// getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate. -// It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository. -func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) { - tmplRef := client.ObjectKey{Namespace: tmpl.Namespace, Name: tmpl.Name} - - if tmpl.Status.ChartRef == nil { - return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String()) - } - - hc := &sourcev1.HelmChart{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: tmpl.Status.ChartRef.Namespace, - Name: tmpl.Status.ChartRef.Name, - }, hc); err != nil { - return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err) - } - - repo := &sourcev1.HelmRepository{} - if err := r.Get(ctx, client.ObjectKey{ - // Using chart's namespace because it's source - // (helm repository in this case) should be within the same namespace. - Namespace: hc.Namespace, - Name: hc.Spec.SourceRef.Name, - }, repo); err != nil { - return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err) - } - - return repo, nil -} - func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error { install := action.NewInstall(actionConfig) install.DryRun = true diff --git a/internal/controller/multiclusterservice_controller.go b/internal/controller/multiclusterservice_controller.go index 46e5ab497..06077127b 100644 --- a/internal/controller/multiclusterservice_controller.go +++ b/internal/controller/multiclusterservice_controller.go @@ -16,11 +16,19 @@ package controller import ( "context" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/sveltos" + "github.com/Mirantis/hmc/internal/utils" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // MultiClusterServiceReconciler reconciles a MultiClusterService object @@ -29,10 +37,157 @@ type MultiClusterServiceReconciler struct { } // Reconcile reconciles a MultiClusterService object. -func (*MultiClusterServiceReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { - _ = ctrl.LoggerFrom(ctx) +func (r *MultiClusterServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Reconciling MultiClusterService") - // TODO(https://github.com/Mirantis/hmc/issues/455): Implement me. + mcsvc := &hmc.MultiClusterService{} + err := r.Get(ctx, req.NamespacedName, mcsvc) + if apierrors.IsNotFound(err) { + l.Info("MultiClusterService not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + if err != nil { + l.Error(err, "Failed to get MultiClusterService") + return ctrl.Result{}, err + } + + if !mcsvc.DeletionTimestamp.IsZero() { + l.Info("Deleting MultiClusterService") + return r.reconcileDelete(ctx, mcsvc) + } + + if controllerutil.AddFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer) { + if err := r.Client.Update(ctx, mcsvc); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update MultiClusterService %s with finalizer %s: %w", mcsvc.Name, hmc.MultiClusterServiceFinalizer, err) + } + return ctrl.Result{}, nil + } + + // By using DefaultSystemNamespace we are enforcing that MultiClusterService + // may only use ServiceTemplates that are present in the hmc-system namespace. + opts, err := helmChartOpts(ctx, r.Client, utils.DefaultSystemNamespace, mcsvc.Spec.Services) + if err != nil { + return ctrl.Result{}, err + } + + if _, err := sveltos.ReconcileClusterProfile(ctx, r.Client, mcsvc.Name, + sveltos.ReconcileProfileOpts{ + OwnerReference: &metav1.OwnerReference{ + APIVersion: hmc.GroupVersion.String(), + Kind: hmc.MultiClusterServiceKind, + Name: mcsvc.Name, + UID: mcsvc.UID, + }, + LabelSelector: mcsvc.Spec.ClusterSelector, + HelmChartOpts: opts, + Priority: mcsvc.Spec.ServicesPriority, + StopOnConflict: mcsvc.Spec.StopOnConflict, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile ClusterProfile: %w", err) + } + + return ctrl.Result{}, nil +} + +// helmChartOpts returns slice of helm chart options to use with Sveltos. +// Namespace is the namespace of the referred templates in services slice. +func helmChartOpts(ctx context.Context, c client.Client, namespace string, services []hmc.ServiceSpec) ([]sveltos.HelmChartOpts, error) { + l := ctrl.LoggerFrom(ctx) + opts := []sveltos.HelmChartOpts{} + + // NOTE: The Profile/ClusterProfile object will be updated with + // no helm charts if len(mc.Spec.Services) == 0. This will result + // in the helm charts being uninstalled on matching clusters if + // Profile/ClusterProfile originally had len(m.Spec.Sevices) > 0. + for _, svc := range services { + if svc.Disable { + l.Info(fmt.Sprintf("Skip adding ServiceTemplate %s because Disable=true", svc.Template)) + continue + } + + tmpl := &hmc.ServiceTemplate{} + // Here we can use the same namespace for all services + // because if the services slice is part of: + // 1. ManagedCluster: Then the referred template must be in its own namespace. + // 2. MultiClusterService: Then the referred template must be in hmc-system namespace. + tmplRef := types.NamespacedName{Name: svc.Template, Namespace: namespace} + if err := c.Get(ctx, tmplRef, tmpl); err != nil { + return nil, fmt.Errorf("failed to get ServiceTemplate %s: %w", tmplRef.String(), err) + } + + if tmpl.GetCommonStatus() == nil || tmpl.GetCommonStatus().ChartRef == nil { + return nil, fmt.Errorf("status for ServiceTemplate %s/%s has not been updated yet", tmpl.Namespace, tmpl.Name) + } + + chart := &sourcev1.HelmChart{} + chartRef := types.NamespacedName{ + Namespace: tmpl.GetCommonStatus().ChartRef.Namespace, + Name: tmpl.GetCommonStatus().ChartRef.Name, + } + if err := c.Get(ctx, chartRef, chart); err != nil { + return nil, fmt.Errorf("failed to get HelmChart %s referenced by ServiceTemplate %s: %w", chartRef.String(), tmplRef.String(), err) + } + + repo := &sourcev1.HelmRepository{} + repoRef := types.NamespacedName{ + // Using chart's namespace because it's source + // should be within the same namespace. + Namespace: chart.Namespace, + Name: chart.Spec.SourceRef.Name, + } + if err := c.Get(ctx, repoRef, repo); err != nil { + return nil, fmt.Errorf("failed to get HelmRepository %s: %w", repoRef.String(), err) + } + + chartName := tmpl.Spec.Helm.ChartName + if chartName == "" { + chartName = tmpl.Spec.Helm.ChartRef.Name + } + + opts = append(opts, sveltos.HelmChartOpts{ + Values: svc.Values, + RepositoryURL: repo.Spec.URL, + // We don't have repository name so chart name becomes repository name. + RepositoryName: chartName, + ChartName: func() string { + if repo.Spec.Type == utils.RegistryTypeOCI { + return chartName + } + // Sveltos accepts ChartName in / format for non-OCI. + // We don't have a repository name, so we can use / instead. + // See: https://projectsveltos.github.io/sveltos/addons/helm_charts/. + return fmt.Sprintf("%s/%s", chartName, chartName) + }(), + ChartVersion: tmpl.Spec.Helm.ChartVersion, + ReleaseName: svc.Name, + ReleaseNamespace: func() string { + if svc.Namespace != "" { + return svc.Namespace + } + return svc.Name + }(), + // The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because + // the source.Spec.Insecure field is meant to be used for connecting to repositories + // over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for. + // See: https://github.com/fluxcd/source-controller/pull/1288 + PlainHTTP: repo.Spec.Insecure, + }) + } + + return opts, nil +} + +func (r *MultiClusterServiceReconciler) reconcileDelete(ctx context.Context, mcsvc *hmc.MultiClusterService) (ctrl.Result, error) { + if err := sveltos.DeleteClusterProfile(ctx, r.Client, mcsvc.Name); err != nil { + return ctrl.Result{}, err + } + + if controllerutil.RemoveFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer) { + if err := r.Client.Update(ctx, mcsvc); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer %s from MultiClusterService %s: %w", hmc.MultiClusterServiceFinalizer, mcsvc.Name, err) + } + } return ctrl.Result{}, nil } diff --git a/internal/controller/multiclusterservice_controller_test.go b/internal/controller/multiclusterservice_controller_test.go index e14ad3dff..b5fe5954f 100644 --- a/internal/controller/multiclusterservice_controller_test.go +++ b/internal/controller/multiclusterservice_controller_test.go @@ -16,66 +16,211 @@ package controller import ( "context" + "time" + helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + "helm.sh/helm/v3/pkg/chart" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - hmcmirantiscomv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/utils" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" ) var _ = Describe("MultiClusterService Controller", func() { Context("When reconciling a resource", func() { - const resourceName = "test-resource" + const ( + testNamespace = utils.DefaultSystemNamespace + serviceTemplateName = "test-service-0-1-0" + helmRepoName = "test-helmrepo" + helmChartName = "test-helmchart" + helmChartReleaseName = "test-helmchart-release" + helmChartVersion = "0.1.0" + helmChartURL = "http://source-controller.hmc-system.svc.cluster.local./helmchart/hmc-system/test-chart/0.1.0.tar.gz" + multiClusterServiceName = "test-multiclusterservice" + ) + + fakeDownloadHelmChartFunc := func(context.Context, *sourcev1.Artifact) (*chart.Chart, error) { + return &chart.Chart{ + Metadata: &chart.Metadata{ + APIVersion: "v2", + Version: helmChartVersion, + Name: helmChartName, + }, + }, nil + } ctx := context.Background() - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - multiclusterservice := &hmcmirantiscomv1alpha1.MultiClusterService{} + namespace := &corev1.Namespace{} + helmChart := &sourcev1.HelmChart{} + helmRepo := &sourcev1.HelmRepository{} + serviceTemplate := &hmc.ServiceTemplate{} + multiClusterService := &hmc.MultiClusterService{} + clusterProfile := &sveltosv1beta1.ClusterProfile{} + + helmRepositoryRef := types.NamespacedName{Namespace: testNamespace, Name: helmRepoName} + helmChartRef := types.NamespacedName{Namespace: testNamespace, Name: helmChartName} + serviceTemplateRef := types.NamespacedName{Namespace: testNamespace, Name: serviceTemplateName} + multiClusterServiceRef := types.NamespacedName{Name: multiClusterServiceName} + clusterProfileRef := types.NamespacedName{Name: multiClusterServiceName} BeforeEach(func() { - By("creating the custom resource for the Kind MultiClusterService") - err := k8sClient.Get(ctx, typeNamespacedName, multiclusterservice) - if err != nil && errors.IsNotFound(err) { - resource := &hmcmirantiscomv1alpha1.MultiClusterService{ + By("creating Namespace") + err := k8sClient.Get(ctx, types.NamespacedName{Name: testNamespace}, namespace) + if err != nil && apierrors.IsNotFound(err) { + namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) + } + + By("creating HelmRepository") + err = k8sClient.Get(ctx, types.NamespacedName{Name: helmRepoName, Namespace: testNamespace}, helmRepo) + if err != nil && apierrors.IsNotFound(err) { + helmRepo = &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: helmRepoName, + Namespace: testNamespace, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: "oci://test/helmrepo", + }, + } + Expect(k8sClient.Create(ctx, helmRepo)).To(Succeed()) + } + + By("creating HelmChart") + err = k8sClient.Get(ctx, types.NamespacedName{Name: helmChartName, Namespace: testNamespace}, helmChart) + if err != nil && apierrors.IsNotFound(err) { + helmChart = &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: helmChartName, + Namespace: testNamespace, + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: helmRepoName, + }, + }, + } + Expect(k8sClient.Create(ctx, helmChart)).To(Succeed()) + } + + By("updating HelmChart status with artifact URL") + helmChart.Status.URL = helmChartURL + helmChart.Status.Artifact = &sourcev1.Artifact{ + URL: helmChartURL, + LastUpdateTime: metav1.Now(), + } + Expect(k8sClient.Status().Update(ctx, helmChart)).Should(Succeed()) + + By("creating ServiceTemplate") + err = k8sClient.Get(ctx, serviceTemplateRef, serviceTemplate) + if err != nil && apierrors.IsNotFound(err) { + serviceTemplate = &hmc.ServiceTemplate{ ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + Name: serviceTemplateName, + Namespace: testNamespace, + Labels: map[string]string{ + hmc.HMCManagedLabelKey: "true", + }, + }, + Spec: hmc.ServiceTemplateSpec{ + Helm: hmc.HelmSpec{ + ChartVersion: helmChartVersion, + ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: helmChartName, + Namespace: testNamespace, + }, + }, }, - // TODO(user): Specify other spec details if needed. } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + Expect(k8sClient.Create(ctx, serviceTemplate)).To(Succeed()) + + By("creating MultiClusterService") + err = k8sClient.Get(ctx, multiClusterServiceRef, multiClusterService) + if err != nil && apierrors.IsNotFound(err) { + multiClusterService = &hmc.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: multiClusterServiceName, + Finalizers: []string{ + // Reconcile attempts to add this finalizer and returns immediately + // if successful. So adding this finalizer here manually in order + // to avoid having to call reconcile multiple times for this test. + hmc.MultiClusterServiceFinalizer, + }, + }, + Spec: hmc.MultiClusterServiceSpec{ + Services: []hmc.ServiceSpec{ + { + Template: serviceTemplateName, + Name: helmChartReleaseName, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, multiClusterService)).To(Succeed()) } }) AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &hmcmirantiscomv1alpha1.MultiClusterService{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) + By("cleaning up") + multiClusterServiceResource := &hmc.MultiClusterService{} + Expect(k8sClient.Get(ctx, multiClusterServiceRef, multiClusterServiceResource)).NotTo(HaveOccurred()) + + reconciler := &MultiClusterServiceReconciler{Client: k8sClient} + Expect(k8sClient.Delete(ctx, multiClusterService)).To(Succeed()) + // Running reconcile to remove the finalizer and delete the MultiClusterService + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: multiClusterServiceRef}) Expect(err).NotTo(HaveOccurred()) + Eventually(k8sClient.Get(ctx, multiClusterServiceRef, multiClusterService), 1*time.Minute, 5*time.Second).Should(HaveOccurred()) + + Expect(k8sClient.Get(ctx, clusterProfileRef, &sveltosv1beta1.ClusterProfile{})).To(HaveOccurred()) + + serviceTemplateResource := &hmc.ServiceTemplate{} + Expect(k8sClient.Get(ctx, serviceTemplateRef, serviceTemplateResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, serviceTemplateResource)).To(Succeed()) + + helmChartResource := &sourcev1.HelmChart{} + Expect(k8sClient.Get(ctx, helmChartRef, helmChartResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, helmChartResource)).To(Succeed()) - By("Cleanup the specific resource instance MultiClusterService") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + helmRepositoryResource := &sourcev1.HelmRepository{} + Expect(k8sClient.Get(ctx, helmRepositoryRef, helmRepositoryResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, helmRepositoryResource)).To(Succeed()) }) + It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &MultiClusterServiceReconciler{ - Client: k8sClient, + By("reconciling ServiceTemplate used by MultiClusterService") + templateReconciler := TemplateReconciler{ + Client: k8sClient, + downloadHelmChartFunc: fakeDownloadHelmChartFunc, } + serviceTemplateReconciler := &ServiceTemplateReconciler{TemplateReconciler: templateReconciler} + _, err := serviceTemplateReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: serviceTemplateRef}) + Expect(err).NotTo(HaveOccurred()) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) + By("reconciling MultiClusterService") + multiClusterServiceReconciler := &MultiClusterServiceReconciler{Client: k8sClient} + + _, err = multiClusterServiceReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: multiClusterServiceRef}) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + + Eventually(k8sClient.Get(ctx, clusterProfileRef, clusterProfile), 1*time.Minute, 5*time.Second).ShouldNot(HaveOccurred()) }) }) }) diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go index 7c4f60ccf..8bba498b8 100644 --- a/internal/sveltos/profile.go +++ b/internal/sveltos/profile.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "math" + "unsafe" hmc "github.com/Mirantis/hmc/api/v1alpha1" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" @@ -32,6 +33,7 @@ import ( type ReconcileProfileOpts struct { OwnerReference *metav1.OwnerReference + LabelSelector metav1.LabelSelector HelmChartOpts []HelmChartOpts Priority int32 StopOnConflict bool @@ -49,97 +51,145 @@ type HelmChartOpts struct { InsecureSkipTLSVerify bool } +// ReconcileClusterProfile reconciles a Sveltos ClusterProfile object. +func ReconcileClusterProfile( + ctx context.Context, + cl client.Client, + name string, + opts ReconcileProfileOpts, +) (*sveltosv1beta1.ClusterProfile, error) { + l := ctrl.LoggerFrom(ctx) + obj := objectMeta(opts.OwnerReference) + obj.SetName(name) + + cp := &sveltosv1beta1.ClusterProfile{ + ObjectMeta: obj, + } + + operation, err := ctrl.CreateOrUpdate(ctx, cl, cp, func() error { + spec, err := Spec(&opts) + if err != nil { + return err + } + cp.Spec = *spec + + return nil + }) + if err != nil { + return nil, err + } + + if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { + l.Info(fmt.Sprintf("Successfully %s ClusterProfile %s", string(operation), cp.Name)) + } + + return cp, nil +} + // ReconcileProfile reconciles a Sveltos Profile object. -func ReconcileProfile(ctx context.Context, +func ReconcileProfile( + ctx context.Context, cl client.Client, namespace string, name string, - matchLabels map[string]string, opts ReconcileProfileOpts, ) (*sveltosv1beta1.Profile, error) { l := ctrl.LoggerFrom(ctx) + obj := objectMeta(opts.OwnerReference) + obj.SetNamespace(namespace) + obj.SetName(name) - cp := &sveltosv1beta1.Profile{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, + p := &sveltosv1beta1.Profile{ + ObjectMeta: obj, } - tier, err := PriorityToTier(opts.Priority) + operation, err := ctrl.CreateOrUpdate(ctx, cl, p, func() error { + spec, err := Spec(&opts) + if err != nil { + return err + } + p.Spec = *spec + + return nil + }) if err != nil { return nil, err } - operation, err := ctrl.CreateOrUpdate(ctx, cl, cp, func() error { - if cp.Labels == nil { - cp.Labels = make(map[string]string) - } + if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { + l.Info(fmt.Sprintf("Successfully %s Profile %s", string(operation), p.Name)) + } - cp.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue - if opts.OwnerReference != nil { - cp.OwnerReferences = []metav1.OwnerReference{*opts.OwnerReference} - } + return p, nil +} + +// Spec returns a spec object to be used with +// a Sveltos Profile or ClusterProfile object. +func Spec(opts *ReconcileProfileOpts) (*sveltosv1beta1.Spec, error) { + tier, err := PriorityToTier(opts.Priority) + if err != nil { + return nil, err + } - cp.Spec = sveltosv1beta1.Spec{ - ClusterSelector: libsveltosv1beta1.Selector{ - LabelSelector: metav1.LabelSelector{ - MatchLabels: matchLabels, - }, + spec := &sveltosv1beta1.Spec{ + ClusterSelector: libsveltosv1beta1.Selector{ + LabelSelector: opts.LabelSelector, + }, + Tier: tier, + ContinueOnConflict: !opts.StopOnConflict, + HelmCharts: make([]sveltosv1beta1.HelmChart, 0, len(opts.HelmChartOpts)), + } + + for _, hc := range opts.HelmChartOpts { + helmChart := sveltosv1beta1.HelmChart{ + RepositoryURL: hc.RepositoryURL, + RepositoryName: hc.RepositoryName, + ChartName: hc.ChartName, + ChartVersion: hc.ChartVersion, + ReleaseName: hc.ReleaseName, + ReleaseNamespace: hc.ReleaseNamespace, + HelmChartAction: sveltosv1beta1.HelmChartActionInstall, + RegistryCredentialsConfig: &sveltosv1beta1.RegistryCredentialsConfig{ + PlainHTTP: hc.PlainHTTP, + InsecureSkipTLSVerify: hc.InsecureSkipTLSVerify, }, - Tier: tier, - ContinueOnConflict: !opts.StopOnConflict, } - for _, hc := range opts.HelmChartOpts { - helmChart := sveltosv1beta1.HelmChart{ - RepositoryURL: hc.RepositoryURL, - RepositoryName: hc.RepositoryName, - ChartName: hc.ChartName, - ChartVersion: hc.ChartVersion, - ReleaseName: hc.ReleaseName, - ReleaseNamespace: hc.ReleaseNamespace, - HelmChartAction: sveltosv1beta1.HelmChartActionInstall, - RegistryCredentialsConfig: &sveltosv1beta1.RegistryCredentialsConfig{ - PlainHTTP: hc.PlainHTTP, - InsecureSkipTLSVerify: hc.InsecureSkipTLSVerify, - }, - } + if hc.PlainHTTP { + // InsecureSkipTLSVerify is redundant in this case. + helmChart.RegistryCredentialsConfig.InsecureSkipTLSVerify = false + } - if hc.PlainHTTP { - // InsecureSkipTLSVerify is redundant in this case. - helmChart.RegistryCredentialsConfig.InsecureSkipTLSVerify = false + if hc.Values != nil && len(hc.Values.Raw) > 0 { + b, err := yaml.JSONToYAML(hc.Values.Raw) + if err != nil { + return nil, fmt.Errorf("failed to convert values from JSON to YAML for service %s: %w", hc.RepositoryName, err) } - if hc.Values != nil { - b, err := hc.Values.MarshalJSON() - if err != nil { - return fmt.Errorf("failed to marshal values to JSON for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) - } + helmChart.Values = unsafe.String(&b[0], len(b)) + } - b, err = yaml.JSONToYAML(b) - if err != nil { - return fmt.Errorf("failed to convert values from JSON to YAML for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) - } + spec.HelmCharts = append(spec.HelmCharts, helmChart) + } - helmChart.Values = string(b) - } + return spec, nil +} - cp.Spec.HelmCharts = append(cp.Spec.HelmCharts, helmChart) - } - return nil - }) - if err != nil { - return nil, err +func objectMeta(owner *metav1.OwnerReference) metav1.ObjectMeta { + obj := metav1.ObjectMeta{ + Labels: map[string]string{ + hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue, + }, } - if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { - l.Info(fmt.Sprintf("Successfully %s Profile (%s/%s)", string(operation), cp.Namespace, cp.Name)) + if owner != nil { + obj.OwnerReferences = []metav1.OwnerReference{*owner} } - return cp, nil + return obj } +// DeleteProfile deletes a Sveltos Profile object. func DeleteProfile(ctx context.Context, cl client.Client, namespace string, name string) error { err := cl.Delete(ctx, &sveltosv1beta1.Profile{ ObjectMeta: metav1.ObjectMeta{ @@ -151,6 +201,17 @@ func DeleteProfile(ctx context.Context, cl client.Client, namespace string, name return client.IgnoreNotFound(err) } +// DeleteClusterProfile deletes a Sveltos ClusterProfile object. +func DeleteClusterProfile(ctx context.Context, cl client.Client, name string) error { + err := cl.Delete(ctx, &sveltosv1beta1.ClusterProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }) + + return client.IgnoreNotFound(err) +} + // PriorityToTier converts priority value to Sveltos tier value. func PriorityToTier(priority int32) (int32, error) { var mini int32 = 1 diff --git a/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml b/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml new file mode 100644 index 000000000..6e33c5e85 --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml @@ -0,0 +1,10 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ServiceTemplate +metadata: + name: ingress-nginx-4-11-3 + annotations: + helm.sh/resource-policy: keep +spec: + helm: + chartName: ingress-nginx + chartVersion: 4.11.3 diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 6c33ac0bb..d3ad6d5f5 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -69,17 +69,6 @@ spec: description: DryRun specifies whether the template should be applied after validation or only validated. type: boolean - priority: - default: 100 - description: |- - Priority sets the priority for the services defined in this spec. - Higher value means higher priority and lower means lower. - In case of conflict with another object managing the service, - the one with higher priority will get to deploy its services. - format: int32 - maximum: 2147483646 - minimum: 1 - type: integer services: description: |- Services is a list of services created via ServiceTemplates @@ -113,7 +102,19 @@ spec: - template type: object type: array + servicesPriority: + default: 100 + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + maximum: 2147483646 + minimum: 1 + type: integer stopOnConflict: + default: false description: |- StopOnConflict specifies what to do in case of a conflict. E.g. If another object is already managing a service. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml index 953f6b87c..dc2a7fa93 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml @@ -87,17 +87,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - priority: - default: 100 - description: |- - Priority sets the priority for the services defined in this spec. - Higher value means higher priority and lower means lower. - In case of conflict with another object managing the service, - the one with higher priority will get to deploy its services. - format: int32 - maximum: 2147483646 - minimum: 1 - type: integer services: description: |- Services is a list of services created via ServiceTemplates @@ -131,7 +120,19 @@ spec: - template type: object type: array + servicesPriority: + default: 100 + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + maximum: 2147483646 + minimum: 1 + type: integer stopOnConflict: + default: false description: |- StopOnConflict specifies what to do in case of a conflict. E.g. If another object is already managing a service. diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 739275821..48c52a7cd 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -171,6 +171,7 @@ rules: - config.projectsveltos.io resources: - profiles + - clusterprofiles verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com diff --git a/templates/service/ingress-nginx/Chart.lock b/templates/service/ingress-nginx-4-11-0/Chart.lock similarity index 100% rename from templates/service/ingress-nginx/Chart.lock rename to templates/service/ingress-nginx-4-11-0/Chart.lock diff --git a/templates/service/ingress-nginx/Chart.yaml b/templates/service/ingress-nginx-4-11-0/Chart.yaml similarity index 100% rename from templates/service/ingress-nginx/Chart.yaml rename to templates/service/ingress-nginx-4-11-0/Chart.yaml diff --git a/templates/service/ingress-nginx-4-11-3/Chart.lock b/templates/service/ingress-nginx-4-11-3/Chart.lock new file mode 100644 index 000000000..51eb4dc3f --- /dev/null +++ b/templates/service/ingress-nginx-4-11-3/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: ingress-nginx + repository: https://kubernetes.github.io/ingress-nginx + version: 4.11.3 +digest: sha256:0963a4470e5fe0ce97023b16cfc9c3cde18b74707c6379947542e09afa6d5346 +generated: "2024-10-16T10:19:41.054555-04:00" diff --git a/templates/service/ingress-nginx-4-11-3/Chart.yaml b/templates/service/ingress-nginx-4-11-3/Chart.yaml new file mode 100644 index 000000000..8fe3cc1d5 --- /dev/null +++ b/templates/service/ingress-nginx-4-11-3/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: ingress-nginx +description: A Helm chart to refer the official ingress-nginx helm chart +type: application +version: 4.11.3 +appVersion: "1.11.3" +dependencies: + - name: ingress-nginx + version: 4.11.3 + repository: https://kubernetes.github.io/ingress-nginx