Skip to content

Commit

Permalink
Reconcile MultiClusterService
Browse files Browse the repository at this point in the history
  • Loading branch information
wahabmk committed Oct 18, 2024
1 parent 76f1b29 commit 61de7ea
Show file tree
Hide file tree
Showing 17 changed files with 552 additions and 207 deletions.
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,8 @@ FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmrepositories-$(FLUX_SOURC
FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmchart-$(FLUX_SOURCE_VERSION).yaml
FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version')
FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml
SVELTOS_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/projectsveltos/libsveltos") | .Version')
SVELTOS_CRD ?= $(EXTERNAL_CRD_DIR)/sveltos-$(SVELTOS_VERSION).yaml

## Tool Binaries
KUBECTL ?= kubectl
Expand Down Expand Up @@ -445,8 +447,12 @@ $(FLUX_SOURCE_REPO_CRD): $(EXTERNAL_CRD_DIR)
rm -f $(FLUX_SOURCE_REPO_CRD)
curl -s https://raw.githubusercontent.com/fluxcd/source-controller/$(FLUX_SOURCE_VERSION)/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml > $(FLUX_SOURCE_REPO_CRD)

$(SVELTOS_CRD): $(EXTERNAL_CRD_DIR)
rm -f $(SVELTOS_CRD)
curl -s https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD)

.PHONY: external-crd
external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD)
external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD)

.PHONY: kind
kind: $(KIND) ## Download kind locally if necessary.
Expand Down
7 changes: 5 additions & 2 deletions api/v1alpha1/managedcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,16 @@ type ManagedClusterSpec struct {
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=2147483646

// Priority sets the priority for the services defined in this spec.
// ServicesPriority sets the priority for the services defined in this spec.
// Higher value means higher priority and lower means lower.
// In case of conflict with another object managing the service,
// the one with higher priority will get to deploy its services.
Priority int32 `json:"priority,omitempty"`
ServicesPriority int32 `json:"servicesPriority,omitempty"`
// DryRun specifies whether the template should be applied after validation or only validated.
DryRun bool `json:"dryRun,omitempty"`

// +kubebuilder:default:=false

// StopOnConflict specifies what to do in case of a conflict.
// E.g. If another object is already managing a service.
// By default the remaining services will be deployed even if conflict is detected.
Expand Down
14 changes: 12 additions & 2 deletions api/v1alpha1/multiclusterservice_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
// MultiClusterServiceFinalizer is finalizer applied to MultiClusterService objects.
MultiClusterServiceFinalizer = "hmc.mirantis.com/multicluster-service"
// MultiClusterServiceKind is the string representation of a MultiClusterServiceKind.
MultiClusterServiceKind = "MultiClusterService"
)

// ServiceSpec represents a Service to be managed
type ServiceSpec struct {
// Values is the helm values to be passed to the template.
Expand Down Expand Up @@ -52,11 +59,14 @@ type MultiClusterServiceSpec struct {
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=2147483646

// Priority sets the priority for the services defined in this spec.
// ServicesPriority sets the priority for the services defined in this spec.
// Higher value means higher priority and lower means lower.
// In case of conflict with another object managing the service,
// the one with higher priority will get to deploy its services.
Priority int32 `json:"priority,omitempty"`
ServicesPriority int32 `json:"servicesPriority,omitempty"`

// +kubebuilder:default:=false

// StopOnConflict specifies what to do in case of a conflict.
// E.g. If another object is already managing a service.
// By default the remaining services will be deployed even if conflict is detected.
Expand Down
1 change: 1 addition & 0 deletions config/dev/aws-managedcluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ spec:
workersNumber: 1
installBeachHeadServices: false
template: aws-standalone-cp-0-0-1
servicesPriority: 100
services:
- template: kyverno-3-2-6
name: kyverno
Expand Down
13 changes: 13 additions & 0 deletions config/dev/multiclusterservice.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: hmc.mirantis.com/v1alpha1
kind: MultiClusterService
metadata:
name: global-ingress
spec:
servicesPriority: 1000
clusterSelector:
matchLabels:
app.kubernetes.io/managed-by: Helm
services:
- template: ingress-nginx-4-11-3
name: ingress-nginx
namespace: ingress-nginx
98 changes: 10 additions & 88 deletions internal/controller/managedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ import (
hmc "github.com/Mirantis/hmc/api/v1alpha1"
"github.com/Mirantis/hmc/internal/helm"
"github.com/Mirantis/hmc/internal/telemetry"
"github.com/Mirantis/hmc/internal/utils"
)

const (
Expand Down Expand Up @@ -381,74 +380,27 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *h
// updateServices reconciles services provided in ManagedCluster.Spec.Services.
// TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places.
func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) {
l := ctrl.LoggerFrom(ctx)
opts := []sveltos.HelmChartOpts{}

// NOTE: The Profile object will be updated with no helm
// charts if len(mc.Spec.Services) == 0. This will result in the
// helm charts being uninstalled on matching clusters if
// Profile originally had len(m.Spec.Sevices) > 0.
for _, svc := range mc.Spec.Services {
if svc.Disable {
l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mc.Name))
continue
}

tmpl := &hmc.ServiceTemplate{}
tmplRef := client.ObjectKey{Name: svc.Template, Namespace: mc.Namespace}
if err := r.Get(ctx, tmplRef, tmpl); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err)
}

source, err := r.getServiceTemplateSource(ctx, tmpl)
if err != nil {
return ctrl.Result{}, fmt.Errorf("could not get repository url: %w", err)
}

opts = append(opts, sveltos.HelmChartOpts{
Values: svc.Values,
RepositoryURL: source.Spec.URL,
// We don't have repository name so chart name becomes repository name.
RepositoryName: tmpl.Spec.Helm.ChartName,
ChartName: func() string {
if source.Spec.Type == utils.RegistryTypeOCI {
return tmpl.Spec.Helm.ChartName
}
// Sveltos accepts ChartName in <repository>/<chart> format for non-OCI.
// We don't have a repository name, so we can use <chart>/<chart> instead.
// See: https://projectsveltos.github.io/sveltos/addons/helm_charts/.
return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName)
}(),
ChartVersion: tmpl.Spec.Helm.ChartVersion,
ReleaseName: svc.Name,
ReleaseNamespace: func() string {
if svc.Namespace != "" {
return svc.Namespace
}
return svc.Name
}(),
// The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because
// the source.Spec.Insecure field is meant to be used for connecting to repositories
// over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for.
// See: https://github.com/fluxcd/source-controller/pull/1288
PlainHTTP: source.Spec.Insecure,
})
opts, err := helmChartOpts(ctx, r.Client, mc.Namespace, mc.Spec.Services)
if err != nil {
return ctrl.Result{}, err
}

if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name,
map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
sveltos.ReconcileProfileOpts{
OwnerReference: &metav1.OwnerReference{
APIVersion: hmc.GroupVersion.String(),
Kind: hmc.ManagedClusterKind,
Name: mc.Name,
UID: mc.UID,
},
LabelSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
},
HelmChartOpts: opts,
Priority: mc.Spec.Priority,
Priority: mc.Spec.ServicesPriority,
StopOnConflict: mc.Spec.StopOnConflict,
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile Profile: %w", err)
Expand All @@ -462,36 +414,6 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M
return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil
}

// getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate.
// It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository.
func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) {
tmplRef := client.ObjectKey{Namespace: tmpl.Namespace, Name: tmpl.Name}

if tmpl.Status.ChartRef == nil {
return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String())
}

hc := &sourcev1.HelmChart{}
if err := r.Get(ctx, client.ObjectKey{
Namespace: tmpl.Status.ChartRef.Namespace,
Name: tmpl.Status.ChartRef.Name,
}, hc); err != nil {
return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err)
}

repo := &sourcev1.HelmRepository{}
if err := r.Get(ctx, client.ObjectKey{
// Using chart's namespace because it's source
// (helm repository in this case) should be within the same namespace.
Namespace: hc.Namespace,
Name: hc.Spec.SourceRef.Name,
}, repo); err != nil {
return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err)
}

return repo, nil
}

func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error {
install := action.NewInstall(actionConfig)
install.DryRun = true
Expand Down
Loading

0 comments on commit 61de7ea

Please sign in to comment.