diff --git a/Makefile b/Makefile index 5b9a91333..f298dc33d 100644 --- a/Makefile +++ b/Makefile @@ -403,11 +403,9 @@ FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_SOURCE_CHART_NAME)-$(FLUX_SO FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml CAPI_VERSION ?= v1.8.4 -CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/capi-$(CAPI_VERSION).yaml -K0SMOTRON_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/k0sproject/k0smotron") | .Version') -K0SMOTRON_CRD ?= $(EXTERNAL_CRD_DIR)/k0smotron-$(K0SMOTRON_VERSION).yaml +CAPI_REPO_NAME ?= capi +CAPI_CRD ?= $(EXTERNAL_CRD_DIR)/$(CAPI_REPO_NAME)-$(CAPI_VERSION).yaml FLUX_HELM_NAME ?= helm -FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/$(FLUX_HELM_NAME)-$(FLUX_HELM_VERSION).yaml SVELTOS_VERSION ?= v$(shell $(YQ) -r '.appVersion' $(PROVIDER_TEMPLATES_DIR)/projectsveltos/Chart.yaml) SVELTOS_NAME ?= sveltos @@ -482,16 +480,13 @@ $(SVELTOS_CRD): | yq $(EXTERNAL_CRD_DIR) rm -f $(EXTERNAL_CRD_DIR)/$(SVELTOS_NAME)* curl -s --fail https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) -$(K0SMOTRON_CRD): $(EXTERNAL_CRD_DIR) - rm -f $(K0SMOTRON_CRD) - curl -s https://raw.githubusercontent.com/k0sproject/k0smotron/$(K0SMOTRON_VERSION)/config/crd/bases/infrastructure.cluster.x-k8s.io_remoteclusters.yaml > $(K0SMOTRON_CRD) - $(CAPI_CRD): $(EXTERNAL_CRD_DIR) - rm -f $(CAPI_CRD) - curl -s https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + rm -f $(EXTERNAL_CRD_DIR)/$(CAPI_REPO_NAME)* + curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_clusters.yaml > $(CAPI_CRD) + curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CAPI_VERSION)/config/crd/bases/cluster.x-k8s.io_machines.yaml >> $(CAPI_CRD) .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(K0SMOTRON_CRD) $(CAPI_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(CAPI_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/api/v1alpha1/unmanagedcluster_types.go b/api/v1alpha1/unmanagedcluster_types.go index 9fbe76b79..6a5a4a3d6 100644 --- a/api/v1alpha1/unmanagedcluster_types.go +++ b/api/v1alpha1/unmanagedcluster_types.go @@ -18,12 +18,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - const ( UnmanagedClusterKind = "UnmanagedCluster" - UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanage-dcluster" + UnmanagedClusterFinalizer = "hmc.mirantis.com/unmanaged-cluster" AllNodesCondition = "AllNodesCondition" NodeCondition = "NodeCondition" HelmChart = "HelmChart" @@ -31,10 +28,6 @@ const ( // UnmanagedClusterSpec defines the desired state of UnmanagedCluster type UnmanagedClusterSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - - Name string `json:"name,omitempty"` // Services is a list of services created via ServiceTemplates // that could be installed on the target cluster. Services []ServiceSpec `json:"services,omitempty"` @@ -59,6 +52,7 @@ type UnmanagedClusterSpec struct { // UnmanagedClusterStatus defines the observed state of UnmanagedCluster type UnmanagedClusterStatus struct { // Flag indicating whether the unmanaged cluster is in the ready state or not + // +kubebuilder:default:=false Ready bool `json:"ready"` // Conditions contains details for the current state of the ManagedCluster. diff --git a/api/v1alpha1/unmanagedmachine_types.go b/api/v1alpha1/unmanagedmachine_types.go index 45e2025d6..a9e9eb41e 100644 --- a/api/v1alpha1/unmanagedmachine_types.go +++ b/api/v1alpha1/unmanagedmachine_types.go @@ -18,20 +18,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // UnmanagedMachineSpec defines the desired state of UnmanagedMachine type UnmanagedMachineSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "make" to regenerate code after modifying this file - ProviderID string `json:"providerID,omitempty"` - ClusterName string `json:"clusterName,omitempty"` + ProviderID string `json:"providerID,omitempty"` + ClusterName string `json:"clusterName,omitempty"` + ControlPlane bool `json:"controlPlane,omitempty"` } // UnmanagedMachineStatus defines the observed state of UnmanagedMachine type UnmanagedMachineStatus struct { // Flag indicating whether the machine is in the ready state or not + // +kubebuilder:default:=false Ready bool `json:"ready,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` diff --git a/go.mod b/go.mod index 1a4df884e..e913ba3bb 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/fluxcd/source-controller/api v1.4.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 - github.com/k0sproject/k0smotron v1.1.2 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 diff --git a/go.sum b/go.sum index dd2ae8638..b4c781a9a 100644 --- a/go.sum +++ b/go.sum @@ -163,11 +163,11 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U= +github.com/gdexlab/go-render v1.0.1/go.mod h1:wRi5nW2qfjiGj4mPukH4UV0IknS1cHD4VgFTmJX5JzM= github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.5.6 h1:CYsqysemXfEaQbyrLJmdsCRuufHoLa3P/gGWGl5TDrM= github.com/go-asn1-ber/asn1-ber v1.5.6/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/gdexlab/go-render v1.0.1 h1:rxqB3vo5s4n1kF0ySmoNeSPRYkEsyHgln4jFIQY7v0U= -github.com/gdexlab/go-render v1.0.1/go.mod h1:wRi5nW2qfjiGj4mPukH4UV0IknS1cHD4VgFTmJX5JzM= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -291,8 +291,6 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k0sproject/k0smotron v1.1.2 h1:5jyGugN37Yk64pd/YTcuJwfBAVUx820MGI7zEeNdlRI= -github.com/k0sproject/k0smotron v1.1.2/go.mod h1:TZVJaCTigFGpKpUkpfIsWPSkpCLAYf73420bI9Gt6n8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= @@ -676,10 +674,10 @@ k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.6 h1:z8cmxQXBU8yZ4mkytWqXfo6tZcamPwjsuxYU81xJ8Lk= oras.land/oras-go v1.2.6/go.mod h1:OVPc1PegSEe/K8YiLfosrlqlqTN9PUyFvOw5Y9gwrT8= -sigs.k8s.io/cluster-api v1.8.5 h1:lNA2fPN4fkXEs+oOQlnwxT/4VwRFBpv5kkSoJG8nqBA= -sigs.k8s.io/cluster-api v1.8.5/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/cluster-api v1.8.5 h1:lNA2fPN4fkXEs+oOQlnwxT/4VwRFBpv5kkSoJG8nqBA= +sigs.k8s.io/cluster-api v1.8.5/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= sigs.k8s.io/cluster-api-provider-azure v1.17.1 h1:f1sTGfv6hAN9WrxeawE4pQ2nRhEKb7AJjH6MhU/wAzg= sigs.k8s.io/cluster-api-provider-azure v1.17.1/go.mod h1:16VtsvIpK8qtNHplG2ZHZ74/JKTzOUQIAWWutjnpvEc= sigs.k8s.io/cluster-api-provider-vsphere v1.11.2 h1:4Y8jRyLS1nVM7hny/ZKYY5HSuJ+9LZGg7WBNoZ8H5C0= diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 37f7f0959..f665334d6 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "sigs.k8s.io/cluster-api/api/v1beta1" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -118,6 +119,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = sveltosv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/internal/controller/unmanagedcluster_controller.go b/internal/controller/unmanagedcluster_controller.go index e11e7172c..f6830745f 100644 --- a/internal/controller/unmanagedcluster_controller.go +++ b/internal/controller/unmanagedcluster_controller.go @@ -16,12 +16,12 @@ package controller import ( "context" + "errors" "fmt" "net/url" "strconv" "strings" - v1beta12 "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" "github.com/projectsveltos/libsveltos/lib/clusterproxy" corev1 "k8s.io/api/core/v1" @@ -30,7 +30,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -49,32 +48,10 @@ type UnmanagedClusterReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedclusters/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the UnmanagedCluster object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) l.Info("Reconciling UnmanagedCluster") - - if err := v1beta12.AddToScheme(r.Client.Scheme()); err != nil { - return ctrl.Result{}, err - } - - if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { - return ctrl.Result{}, err - } - - unmanagedCluster := new(hmc.UnmanagedCluster) + unmanagedCluster := &hmc.UnmanagedCluster{} if err := r.Get(ctx, req.NamespacedName, unmanagedCluster); err != nil { if apierrors.IsNotFound(err) { l.Info("UnmanagedCluster not found, ignoring since object must be deleted") @@ -94,6 +71,10 @@ func (r *UnmanagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req // SetupWithManager sets up the controller with the Manager. func (r *UnmanagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). For(&hmc.UnmanagedCluster{}). Complete(r) @@ -150,25 +131,23 @@ func (r *UnmanagedClusterReconciler) createCluster(ctx context.Context, unmanage }, }, } - clusterObject.Status.SetTypedPhase(v1beta1.ClusterPhaseUnknown) + err = r.Client.Create(ctx, clusterObject) if err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %s", unmanagedCluster.Namespace, unmanagedCluster.Name, err) + return fmt.Errorf("failed to create unmanagedCluster object %s/%s: %w", unmanagedCluster.Namespace, unmanagedCluster.Name, err) } return nil } func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanagedCluster *hmc.UnmanagedCluster) error { - l := ctrl.LoggerFrom(ctx) + // l := ctrl.LoggerFrom(ctx) nodelist, err := r.getNodeList(ctx, unmanagedCluster) if err != nil { return err } - kubeConfigSecretName := secret.Name(unmanagedCluster.Name, secret.Kubeconfig) - // find any existing unmanaged machines for the cluster to see if any need to be cleaned up because // the underlying node was removed existingMachines := &hmc.UnmanagedMachineList{} @@ -179,13 +158,17 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag return fmt.Errorf("failed to list existing unmanaged machines: %w", err) } - existingMachinesByName := map[string]*hmc.UnmanagedMachine{} + existingMachinesByName := make(map[string]*hmc.UnmanagedMachine) for _, existingMachine := range existingMachines.Items { existingMachinesByName[existingMachine.GetName()] = &existingMachine } for _, node := range nodelist.Items { delete(existingMachinesByName, node.Name) + isControlPlane := false + if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { + isControlPlane = true + } unmanagedMachine := hmc.UnmanagedMachine{ TypeMeta: metav1.TypeMeta{ @@ -200,11 +183,9 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag }, }, Spec: hmc.UnmanagedMachineSpec{ - ProviderID: node.Spec.ProviderID, - ClusterName: unmanagedCluster.Name, - }, - Status: hmc.UnmanagedMachineStatus{ - Ready: true, + ProviderID: node.Spec.ProviderID, + ClusterName: unmanagedCluster.Name, + ControlPlane: isControlPlane, }, } @@ -212,65 +193,6 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag if err != nil && !apierrors.IsAlreadyExists(err) { return fmt.Errorf("failed to create machine: %w", err) } - - ref := types.NamespacedName{Name: unmanagedMachine.Name, Namespace: unmanagedMachine.Namespace} - if err := r.Get(ctx, ref, &unmanagedMachine); err != nil { - return fmt.Errorf("failed to get unmanaged machine: %w", err) - } - unmanagedMachine.Status = hmc.UnmanagedMachineStatus{ - Ready: true, - } - if err := r.Status().Update(ctx, &unmanagedMachine); err != nil { - return fmt.Errorf("failed to update unmanaged machine status: %w", err) - } - - l.Info("Create machine", "node", node.Name) - machine := v1beta1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: v1beta1.GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: node.Name, - Namespace: unmanagedCluster.Namespace, - Labels: map[string]string{v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, v1beta1.ClusterNameLabel: unmanagedCluster.Name}, - }, - Spec: v1beta1.MachineSpec{ - ClusterName: unmanagedCluster.Name, - Bootstrap: v1beta1.Bootstrap{ - DataSecretName: &kubeConfigSecretName, - }, - InfrastructureRef: corev1.ObjectReference{ - Kind: "UnmanagedMachine", - Namespace: unmanagedCluster.Namespace, - Name: node.Name, - APIVersion: hmc.GroupVersion.Identifier(), - }, - ProviderID: &node.Spec.ProviderID, - }, - Status: v1beta1.MachineStatus{ - NodeRef: &corev1.ObjectReference{ - Kind: "Node", - Name: node.Name, - APIVersion: "v1", - }, - NodeInfo: &corev1.NodeSystemInfo{}, - CertificatesExpiryDate: nil, - BootstrapReady: true, - InfrastructureReady: true, - }, - } - - if _, ok := node.Labels[v1beta1.NodeRoleLabelPrefix+"/control-plane"]; ok { - if machine.Labels == nil { - machine.Labels = make(map[string]string) - } - machine.Labels[v1beta1.MachineControlPlaneLabel] = "true" - } - err = r.Create(ctx, &machine) - if err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create machine: %w", err) - } } // cleanup any orphaned unmanaged machines and capi machines @@ -283,6 +205,14 @@ func (r *UnmanagedClusterReconciler) createMachines(ctx context.Context, unmanag ObjectMeta: metav1.ObjectMeta{ Name: existingUnmanagedMachine.Name, Namespace: unmanagedCluster.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hmc.GroupVersion.Identifier(), + Kind: "UnmanagedMachine", + Name: existingUnmanagedMachine.Name, + UID: existingUnmanagedMachine.UID, + }, + }, }, }); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("failed to delete orphaned machine: %w", err) @@ -313,29 +243,19 @@ func (r *UnmanagedClusterReconciler) reconcileUnmanagedCluster(ctx context.Conte } if err := r.createCluster(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } if err := r.createServices(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } if err := r.createMachines(ctx, unmanagedCluster); err != nil { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: true}, err } requeue, err := r.updateStatus(ctx, unmanagedCluster) - if err != nil { - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil - } - return ctrl.Result{}, nil + return ctrl.Result{Requeue: requeue}, err } func (r *UnmanagedClusterReconciler) createServices(ctx context.Context, mc *hmc.UnmanagedCluster) error { @@ -380,7 +300,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma &hmc.UnmanagedMachine{}, deleteAllOpts..., ); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete unmanaged machines: %w", err) } if err := r.DeleteAllOf( @@ -388,7 +308,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma &v1beta1.Machine{}, deleteAllOpts..., ); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete unmanaged machines: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete unmanaged machines: %w", err) } if err := r.Delete(ctx, &corev1.Secret{ @@ -400,7 +320,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma }, }, }); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster secret: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete cluster secret: %w", err) } if err := r.Delete(ctx, &v1beta1.Cluster{ @@ -412,7 +332,7 @@ func (r *UnmanagedClusterReconciler) reconcileDeletion(ctx context.Context, unma }, }, }); err != nil && !apierrors.IsNotFound(err) { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, fmt.Errorf("failed to delete cluster: %w", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to delete cluster: %w", err) } if controllerutil.RemoveFinalizer(unmanagedCluster, hmc.UnmanagedClusterFinalizer) { @@ -431,6 +351,10 @@ func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster * return true, err } + defer func() { + err = errors.Join(err, r.Status().Update(ctx, cluster)) + }() + allNodeCondition := metav1.Condition{ Type: hmc.AllNodesCondition, Status: "True", @@ -497,9 +421,5 @@ func (r *UnmanagedClusterReconciler) updateStatus(ctx context.Context, cluster * } } - if err := r.Status().Update(ctx, cluster); err != nil { - return true, fmt.Errorf("failed to update unmanaged cluster status: %w", err) - } - return requeue, nil } diff --git a/internal/controller/unmanagedcluster_controller_test.go b/internal/controller/unmanagedcluster_controller_test.go index 1c404b59b..d70e3e137 100644 --- a/internal/controller/unmanagedcluster_controller_test.go +++ b/internal/controller/unmanagedcluster_controller_test.go @@ -17,7 +17,6 @@ package controller import ( "context" - "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -49,11 +48,9 @@ var _ = Describe("UnmanagedCluster Controller", func() { unmanagedcluster := &hmc.UnmanagedCluster{} BeforeEach(func() { - Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) By("creating the custom resource for the Kind UnmanagedCluster") secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) - secret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -80,7 +77,6 @@ var _ = Describe("UnmanagedCluster Controller", func() { Namespace: unmanagedClusterNamespace, }, Spec: hmc.UnmanagedClusterSpec{ - Name: unmanagedClusterName, Services: nil, ServicesPriority: 1, StopOnConflict: true, @@ -124,8 +120,6 @@ var _ = Describe("UnmanagedCluster Controller", func() { NamespacedName: typeNamespacedName, }) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/unmanagedmachine_controller.go b/internal/controller/unmanagedmachine_controller.go index 988b046fa..c29d8146a 100644 --- a/internal/controller/unmanagedmachine_controller.go +++ b/internal/controller/unmanagedmachine_controller.go @@ -17,6 +17,7 @@ package controller import ( "context" "fmt" + "strconv" "github.com/projectsveltos/libsveltos/lib/clusterproxy" corev1 "k8s.io/api/core/v1" @@ -25,6 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -38,14 +41,9 @@ type UnmanagedMachineReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=hmc.mirantis.com.hmc.mirantis.com,resources=unmanagedmachines/finalizers,verbs=update - func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) - - unmanagedMachine := new(hmc.UnmanagedMachine) + unmanagedMachine := &hmc.UnmanagedMachine{} if err := r.Get(ctx, req.NamespacedName, unmanagedMachine); err != nil { if apierrors.IsNotFound(err) { l.Info("UnmanagedMachine not found, ignoring since object must be deleted") @@ -55,18 +53,17 @@ func (r *UnmanagedMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, err } - requeue, err := r.reconcileStatus(ctx, unmanagedMachine) + requeue, err := r.reconcileMachine(ctx, unmanagedMachine) if err != nil { - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err - } - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err + return ctrl.Result{Requeue: requeue}, err } - if requeue { - return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil + requeue, err = r.reconcileStatus(ctx, unmanagedMachine) + if err != nil { + return ctrl.Result{Requeue: requeue}, err } - return ctrl.Result{}, nil + + return ctrl.Result{Requeue: requeue}, nil } func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { @@ -111,7 +108,63 @@ func (r *UnmanagedMachineReconciler) reconcileStatus(ctx context.Context, unmana // SetupWithManager sets up the controller with the Manager. func (r *UnmanagedMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := v1beta1.AddToScheme(r.Client.Scheme()); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). For(&hmc.UnmanagedMachine{}). Complete(r) } + +func (r *UnmanagedMachineReconciler) reconcileMachine(ctx context.Context, unmanagedMachine *hmc.UnmanagedMachine) (bool, error) { + l := log.FromContext(ctx) + + secretName := secret.Name(unmanagedMachine.Spec.ClusterName, secret.Kubeconfig) + l.Info("Create machine", "node", unmanagedMachine.Name) + machine := v1beta1.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: v1beta1.GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: unmanagedMachine.Name, + Namespace: unmanagedMachine.Namespace, + Labels: map[string]string{ + v1beta1.GroupVersion.Identifier(): hmc.GroupVersion.Version, + v1beta1.ClusterNameLabel: unmanagedMachine.Spec.ClusterName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hmc.GroupVersion.Identifier(), + Kind: "UnmanagedMachine", + Name: unmanagedMachine.Name, + UID: unmanagedMachine.UID, + }, + }, + }, + Spec: v1beta1.MachineSpec{ + ClusterName: unmanagedMachine.Spec.ClusterName, + Bootstrap: v1beta1.Bootstrap{ + DataSecretName: &secretName, + }, + InfrastructureRef: corev1.ObjectReference{ + Kind: "UnmanagedMachine", + Namespace: unmanagedMachine.Namespace, + Name: unmanagedMachine.Name, + APIVersion: hmc.GroupVersion.Identifier(), + }, + ProviderID: &unmanagedMachine.Spec.ProviderID, + }, + } + + if machine.Labels == nil { + machine.Labels = make(map[string]string) + } + machine.Labels[v1beta1.MachineControlPlaneLabel] = strconv.FormatBool(unmanagedMachine.Spec.ControlPlane) + if err := r.Create(ctx, &machine); err != nil && !apierrors.IsAlreadyExists(err) { + return true, fmt.Errorf("failed to create machine: %w", err) + } + + return false, nil +} diff --git a/internal/controller/unmanagedmachine_controller_test.go b/internal/controller/unmanagedmachine_controller_test.go index f8d0b2b92..7853c3458 100644 --- a/internal/controller/unmanagedmachine_controller_test.go +++ b/internal/controller/unmanagedmachine_controller_test.go @@ -17,7 +17,6 @@ package controller import ( "context" - "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -48,8 +47,6 @@ var _ = Describe("UnmanagedMachine Controller", func() { BeforeEach(func() { By("creating the custom resource for the Kind UnmanagedCluster") - Expect(v1beta1.AddToScheme(k8sClient.Scheme())).To(Succeed()) - Expect(capi.AddToScheme(k8sClient.Scheme())).To(Succeed()) secretName := secret.Name(unmanagedClusterName, secret.Kubeconfig) secret := &corev1.Secret{ diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml index 4c8459ad8..883c4a2f0 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedclusters.yaml @@ -41,8 +41,6 @@ spec: spec: description: UnmanagedClusterSpec defines the desired state of UnmanagedCluster properties: - name: - type: string services: description: |- Services is a list of services created via ServiceTemplates @@ -155,6 +153,7 @@ spec: type: object type: array ready: + default: false description: Flag indicating whether the unmanaged cluster is in the ready state or not type: boolean diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml index 0482e8710..53473d117 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_unmanagedmachines.yaml @@ -48,10 +48,9 @@ spec: properties: clusterName: type: string + controlPlane: + type: boolean providerID: - description: |- - INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - Important: Run "make" to regenerate code after modifying this file type: string type: object status: @@ -116,6 +115,7 @@ spec: type: object type: array ready: + default: false description: Flag indicating whether the machine is in the ready state or not type: boolean diff --git a/test/objects/unmanagedcluster/unmanagedcluster.go b/test/objects/unmanagedcluster/unmanagedcluster.go index c796e1771..e90052559 100644 --- a/test/objects/unmanagedcluster/unmanagedcluster.go +++ b/test/objects/unmanagedcluster/unmanagedcluster.go @@ -14,17 +14,24 @@ package unmanagedcluster -import "github.com/Mirantis/hmc/api/v1alpha1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -type Opt func(unmanagedCluster *v1alpha1.UnmanagedCluster) + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +type Opt func(unmanagedCluster *hmc.UnmanagedCluster) const ( DefaultName = "hmc-uc" ) -func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { - uc := &v1alpha1.UnmanagedCluster{ - Spec: v1alpha1.UnmanagedClusterSpec{Name: DefaultName}, +func NewUnmanagedCluster(opts ...Opt) *hmc.UnmanagedCluster { + uc := &hmc.UnmanagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultName, + }, + Spec: hmc.UnmanagedClusterSpec{}, } for _, opt := range opts { @@ -34,7 +41,7 @@ func NewUnmanagedCluster(opts ...Opt) *v1alpha1.UnmanagedCluster { } func WithNameAndNamespace(name, namespace string) Opt { - return func(uc *v1alpha1.UnmanagedCluster) { + return func(uc *hmc.UnmanagedCluster) { uc.Name = name uc.Namespace = namespace }