Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
wahabmk committed Oct 10, 2024
1 parent 8bad252 commit 314dbc0
Show file tree
Hide file tree
Showing 7 changed files with 356 additions and 72 deletions.
17 changes: 15 additions & 2 deletions api/v1alpha1/multiclusterservice_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
MultiClusterServiceFinalizer = "hmc.mirantis.com/multicluster-service"
)

// ServiceSpec represents a Service to be managed
type ServiceSpec struct {
// Values is the helm values to be passed to the template.
Expand All @@ -27,6 +31,12 @@ type ServiceSpec struct {
// +kubebuilder:validation:MinLength=1

// Template is a reference to a Template object located in the same namespace.
// wahab:
// --------
// I think we should be able to reference Template from another namespace
// in a MultiClusterService obj, so we need to also have another filed
// to specify namespace. So maybe we should have separate ServiceSpec
// structs for ManagedCluster and MultiClusterService?
Template string `json:"template"`

// +kubebuilder:validation:MinLength=1
Expand Down Expand Up @@ -57,6 +67,9 @@ type MultiClusterServiceSpec struct {
// In case of conflict with another object managing the service,
// the one with higher priority will get to deploy its services.
Priority int32 `json:"priority,omitempty"`

// +kubebuilder:default:=false

// StopOnConflict specifies what to do in case of a conflict.
// E.g. If another object is already managing a service.
// By default the remaining services will be deployed even if conflict is detected.
Expand All @@ -70,8 +83,8 @@ type MultiClusterServiceSpec struct {
// If this status ends up being common with ManagedClusterStatus,
// then make a common status struct that can be shared by both.
type MultiClusterServiceStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// ObservedGeneration is the last observed generation.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
7 changes: 5 additions & 2 deletions config/dev/aws-managedcluster.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: hmc.mirantis.com/v1alpha1
kind: ManagedCluster
metadata:
name: aws-dev
name: wali-aws-dev
namespace: ${NAMESPACE}
spec:
credential: aws-cluster-identity-cred
Expand All @@ -10,15 +10,18 @@ spec:
name: aws-cluster-identity
namespace: ${NAMESPACE}
controlPlane:
amiID: ami-0eb9fdcf0d07bd5ef
instanceType: t3.small
controlPlaneNumber: 1
publicIP: true
region: us-west-2
region: ca-central-1
worker:
amiID: ami-0eb9fdcf0d07bd5ef
instanceType: t3.small
workersNumber: 1
installBeachHeadServices: false
template: aws-standalone-cp-0-0-1
priority: 1000
services:
- template: kyverno-3-2-6
name: kyverno
Expand Down
8 changes: 4 additions & 4 deletions internal/controller/managedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -429,17 +429,17 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M
}

if _, err := sveltos.ReconcileProfile(ctx, r.Client, l, mc.Namespace, mc.Name,
map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
sveltos.ReconcileProfileOpts{
OwnerReference: &metav1.OwnerReference{
APIVersion: hmc.GroupVersion.String(),
Kind: hmc.ManagedClusterKind,
Name: mc.Name,
UID: mc.UID,
},
MatchLabels: map[string]string{
hmc.FluxHelmChartNamespaceKey: mc.Namespace,
hmc.FluxHelmChartNameKey: mc.Name,
},
HelmChartOpts: opts,
Priority: mc.Spec.Priority,
StopOnConflict: mc.Spec.StopOnConflict,
Expand Down
194 changes: 191 additions & 3 deletions internal/controller/multiclusterservice_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,212 @@ package controller

import (
"context"
"fmt"

apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/yaml"

hmc "github.com/Mirantis/hmc/api/v1alpha1"
"github.com/Mirantis/hmc/internal/sveltos"
"github.com/Mirantis/hmc/internal/utils"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
"github.com/go-logr/logr"
)

/*
apiVersion: hmc.mirantis.com/v1alpha1
kind: MultiClusterService
metadata:
name: wali-global-ingress
spec:
clusterSelector:
matchLabels:
app.kubernetes.io/managed-by: Helm
services:
- template: ingress-nginx-4-11-0
name: ingress-nginx
namespace: ingress-nginx
*/

// MultiClusterServiceReconciler reconciles a MultiClusterService object
type MultiClusterServiceReconciler struct {
client.Client
}

// Reconcile reconciles a MultiClusterService object.
func (*MultiClusterServiceReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
_ = ctrl.LoggerFrom(ctx)
func (r *MultiClusterServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := ctrl.LoggerFrom(ctx).WithValues("MultiClusterServiceController", req.NamespacedName.String())
l.Info("Reconciling MultiClusterService")

// 1. Get the MultiClusterService obj from kube based on its name only (cluster-scope)
mcsvc := &hmc.MultiClusterService{}
err := r.Get(ctx, req.NamespacedName, mcsvc)

fmt.Printf("\n>>>>>>>>>>>> [Reconcile] req.Namespace=%s, req=Name=%s, req.NamespacedName=%s, err=%s\n", req.Namespace, req.Name, req.NamespacedName, err)

Check failure on line 65 in internal/controller/multiclusterservice_controller.go

View workflow job for this annotation

GitHub Actions / Build and Unit Test

unhandled-error: Unhandled error in call to function fmt.Printf (revive)

if apierrors.IsNotFound(err) {
l.Info("MultiClusterService not found, ignoring since object must be deleted")
return ctrl.Result{}, nil
}
if err != nil {
l.Error(err, "Failed to get MultiClusterService")
return ctrl.Result{}, err
}

b, _ := yaml.Marshal(mcsvc)
fmt.Printf("\n>>>>>>>>>>>> [Reconcile] object=\n%s\n", string(b))

Check failure on line 77 in internal/controller/multiclusterservice_controller.go

View workflow job for this annotation

GitHub Actions / Build and Unit Test

unhandled-error: Unhandled error in call to function fmt.Printf (revive)

// ================================================================================================================

// 2. If its DeletionTimestamp.IsZero() then reconcile its deletion
if !mcsvc.DeletionTimestamp.IsZero() {
l.Info("Deleting MultiClusterService")
return r.reconcileDelete(ctx)
}

// ================================================================================================================

// 3. If ObservedGeneration == 0 then make a creation event for telemetry

// ================================================================================================================

// 4. Now reconcile
// 4a. Add finalizer if doesn't exist
// 4b. Reconcile the ClusterProfile object
return r.reconcile(ctx, l, mcsvc)
}

func (r *MultiClusterServiceReconciler) reconcile(ctx context.Context, l logr.Logger, mcsvc *hmc.MultiClusterService) (ctrl.Result, error) {

Check failure on line 99 in internal/controller/multiclusterservice_controller.go

View workflow job for this annotation

GitHub Actions / Build and Unit Test

confusing-naming: Method 'reconcile' differs only by capitalization to method 'Reconcile' in the same source file (revive)
isUpdated := controllerutil.AddFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer)
if isUpdated {
if err := r.Client.Update(ctx, mcsvc); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update MultiClusterService %s/%s with finalizer %s: %w", mcsvc.Namespace, mcsvc.Name, hmc.MultiClusterServiceFinalizer, err)
}
return ctrl.Result{}, nil
}

opts, err := r.getHelmChartOpts(ctx, l, mcsvc)
if err != nil {
return ctrl.Result{}, err
}

if _, err := sveltos.ReconcileClusterProfile(ctx, r.Client, l, mcsvc.Namespace, mcsvc.Name,
sveltos.ReconcileProfileOpts{
OwnerReference: &metav1.OwnerReference{
APIVersion: hmc.GroupVersion.String(),
Kind: hmc.ManagedClusterKind,
Name: mcsvc.Name,
UID: mcsvc.UID,
},
MatchLabels: map[string]string{
hmc.FluxHelmChartNamespaceKey: mcsvc.Namespace,
hmc.FluxHelmChartNameKey: mcsvc.Name,
},
HelmChartOpts: opts,
Priority: mcsvc.Spec.Priority,
StopOnConflict: mcsvc.Spec.StopOnConflict,
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile Profile: %w", err)
}

// TODO(https://github.com/Mirantis/hmc/issues/455): Implement me.
return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil
}

// wahab: needs to be shared somehow
func (r *MultiClusterServiceReconciler) getHelmChartOpts(ctx context.Context, l logr.Logger, mcsvc *hmc.MultiClusterService) ([]sveltos.HelmChartOpts, error) {
opts := []sveltos.HelmChartOpts{}

// NOTE: The Profile object will be updated with no helm
// charts if len(mc.Spec.Services) == 0. This will result in the
// helm charts being uninstalled on matching clusters if
// Profile originally had len(m.Spec.Sevices) > 0.
for _, svc := range mcsvc.Spec.Services {
if svc.Disable {
l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mcsvc.Name))
continue
}

tmpl := &hmc.ServiceTemplate{}
tmplRef := types.NamespacedName{Name: svc.Template, Namespace: utils.DefaultSystemNamespace}
if err := r.Get(ctx, tmplRef, tmpl); err != nil {
return nil, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err)
}

source, err := r.getServiceTemplateSource(ctx, tmpl)
if err != nil {
return nil, fmt.Errorf("could not get repository url: %w", err)
}

opts = append(opts, sveltos.HelmChartOpts{
Values: svc.Values,
RepositoryURL: source.Spec.URL,
// We don't have repository name so chart name becomes repository name.
RepositoryName: tmpl.Spec.Helm.ChartName,
ChartName: func() string {
if source.Spec.Type == utils.RegistryTypeOCI {
return tmpl.Spec.Helm.ChartName
}
// Sveltos accepts ChartName in <repository>/<chart> format for non-OCI.
// We don't have a repository name, so we can use <chart>/<chart> instead.
// See: https://projectsveltos.github.io/sveltos/addons/helm_charts/.
return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName)
}(),
ChartVersion: tmpl.Spec.Helm.ChartVersion,
ReleaseName: svc.Name,
ReleaseNamespace: func() string {
if svc.Namespace != "" {
return svc.Namespace
}
return svc.Name
}(),
// The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because
// the source.Spec.Insecure field is meant to be used for connecting to repositories
// over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for.
// See: https://github.com/fluxcd/source-controller/pull/1288
PlainHTTP: source.Spec.Insecure,
})
}

return opts, nil
}

// wahab: needs to be shared
func (r *MultiClusterServiceReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) {
tmplRef := types.NamespacedName{Namespace: tmpl.Namespace, Name: tmpl.Name}

if tmpl.Status.ChartRef == nil {
return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String())
}

hc := &sourcev1.HelmChart{}
if err := r.Get(ctx, types.NamespacedName{
Namespace: tmpl.Status.ChartRef.Namespace,
Name: tmpl.Status.ChartRef.Name,
}, hc); err != nil {
return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err)
}

repo := &sourcev1.HelmRepository{}
if err := r.Get(ctx, types.NamespacedName{
// Using chart's namespace because it's source
// (helm repository in this case) should be within the same namespace.
Namespace: hc.Namespace,
Name: hc.Spec.SourceRef.Name,
}, repo); err != nil {
return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err)
}

return repo, nil
}

func (r *MultiClusterServiceReconciler) reconcileDelete(_ context.Context) (ctrl.Result, error) {

Check failure on line 222 in internal/controller/multiclusterservice_controller.go

View workflow job for this annotation

GitHub Actions / Build and Unit Test

unused-receiver: method receiver 'r' is not referenced in method's body, consider removing or renaming it as _ (revive)
// 2a. Handle what you need to handle
// 2b. Remove finalizer
return ctrl.Result{}, nil
}

Expand Down
Loading

0 comments on commit 314dbc0

Please sign in to comment.