Skip to content

Commit

Permalink
Fix components cleanup
Browse files Browse the repository at this point in the history
* add helm-release label to the helmreleases
  installed by the HMC management part
* cleanup helmreleases only if removed and
  the new label value equals to the mgmt part
* small refactor to fix linter

Closes k0rdent#703
  • Loading branch information
zerospiel committed Dec 3, 2024
1 parent 5ddea6f commit 9319b9a
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 27 deletions.
1 change: 1 addition & 0 deletions api/v1alpha1/managedcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ const (
FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name"
FluxHelmChartNamespaceKey = "helm.toolkit.fluxcd.io/namespace"

PartOfLabel = "hmc.mirantis.com/part-of" // PartOfNameLabel label key with value containing the name of the release owner
HMCManagedLabelKey = "hmc.mirantis.com/managed"
HMCManagedLabelValue = "true"

Expand Down
19 changes: 8 additions & 11 deletions internal/controller/managedcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,7 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return r.reconcileUpdate(ctx, managedCluster)
}

func (r *ManagedClusterReconciler) setStatusFromChildObjects(
ctx context.Context, managedCluster *hmc.ManagedCluster, gvr schema.GroupVersionResource, conditions []string,
) (requeue bool, _ error) {
func (r *ManagedClusterReconciler) setStatusFromChildObjects(ctx context.Context, managedCluster *hmc.ManagedCluster, gvr schema.GroupVersionResource, conditions []string) (requeue bool, _ error) {
l := ctrl.LoggerFrom(ctx)

resourceConditions, err := status.GetResourceConditions(ctx, managedCluster.Namespace, r.DynamicClient, gvr,
Expand All @@ -123,11 +121,11 @@ func (r *ManagedClusterReconciler) setStatusFromChildObjects(
allConditionsComplete := true
for _, metaCondition := range resourceConditions.Conditions {
if slices.Contains(conditions, metaCondition.Type) {
if metaCondition.Status != "True" {
if metaCondition.Status != metav1.ConditionTrue {
allConditionsComplete = false
}

if metaCondition.Reason == "" && metaCondition.Status == "True" {
if metaCondition.Reason == "" && metaCondition.Status == metav1.ConditionTrue {
metaCondition.Message += " is Ready"
metaCondition.Reason = "Succeeded"
}
Expand Down Expand Up @@ -359,13 +357,12 @@ func (r *ManagedClusterReconciler) updateCluster(ctx context.Context, mc *hmc.Ma
return ctrl.Result{}, nil
}

func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context, managedCluster *hmc.ManagedCluster) (bool, error) {
func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context, managedCluster *hmc.ManagedCluster) (requeue bool, _ error) {
type objectToCheck struct {
gvr schema.GroupVersionResource
conditions []string
}

var needToRequeue bool
var errs error
for _, obj := range []objectToCheck{
{
Expand All @@ -385,14 +382,14 @@ func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context,
conditions: []string{"Available"},
},
} {
requeue, err := r.setStatusFromChildObjects(ctx, managedCluster, obj.gvr, obj.conditions)
needRequeue, err := r.setStatusFromChildObjects(ctx, managedCluster, obj.gvr, obj.conditions)
errs = errors.Join(errs, err)
if requeue {
needToRequeue = true
if needRequeue {
requeue = true
}
}

return needToRequeue, errs
return requeue, errs
}

// updateServices reconciles services provided in ManagedCluster.Spec.Services.
Expand Down
5 changes: 4 additions & 1 deletion internal/controller/management_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@ func (r *ManagementReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return r.Update(ctx, management)
}

const partOfManagement = "management"

func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Management) (ctrl.Result, error) {
l := ctrl.LoggerFrom(ctx)

Expand Down Expand Up @@ -148,6 +150,7 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag
DependsOn: component.dependsOn,
TargetNamespace: component.targetNamespace,
CreateNamespace: component.createNamespace,
AddExtraLabels: map[string]string{hmc.PartOfLabel: partOfManagement},
}); err != nil {
errMsg := fmt.Sprintf("Failed to reconcile HelmRelease %s/%s: %s", r.SystemNamespace, component.helmReleaseName, err)
updateComponentsStatus(statusAccumulator, component, nil, errMsg)
Expand Down Expand Up @@ -195,7 +198,7 @@ func (r *ManagementReconciler) cleanupRemovedComponents(ctx context.Context, man

managedHelmReleases := new(fluxv2.HelmReleaseList)
if err := r.Client.List(ctx, managedHelmReleases,
client.MatchingLabels{hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue},
client.MatchingLabels{hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue, hmc.PartOfLabel: partOfManagement}, // do not remove non-management related components (#703)
client.InNamespace(r.SystemNamespace), // all helmreleases are being installed only in the system namespace
); err != nil {
return fmt.Errorf("failed to list %s: %w", fluxv2.GroupVersion.WithKind(fluxv2.HelmReleaseKind), err)
Expand Down
73 changes: 58 additions & 15 deletions internal/controller/management_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,9 @@ var _ = Describe("Management Controller", func() {

helmChartName, helmChartNamespace = "helm-chart-test-name", utils.DefaultSystemNamespace

helmReleaseName = someComponentName // WARN: helm release name should be equal to the component name
helmReleaseNamespace = utils.DefaultSystemNamespace
helmReleaseName = someComponentName // WARN: helm release name should be equal to the component name
helmReleaseNamespace = utils.DefaultSystemNamespace
someOtherHelmReleaseName = "managed-cluster-release-name"

timeout = time.Second * 10
interval = time.Millisecond * 250
Expand Down Expand Up @@ -180,6 +181,7 @@ var _ = Describe("Management Controller", func() {
Namespace: helmReleaseNamespace,
Labels: map[string]string{
hmcmirantiscomv1alpha1.HMCManagedLabelKey: hmcmirantiscomv1alpha1.HMCManagedLabelValue,
hmcmirantiscomv1alpha1.PartOfLabel: partOfManagement,
},
},
Spec: helmcontrollerv2.HelmReleaseSpec{
Expand All @@ -192,6 +194,25 @@ var _ = Describe("Management Controller", func() {
}
Expect(k8sClient.Create(ctx, helmRelease)).To(Succeed())

By("Creating a HelmRelease object for some managed cluster")
someOtherHelmRelease := &helmcontrollerv2.HelmRelease{
ObjectMeta: metav1.ObjectMeta{
Name: someOtherHelmReleaseName,
Namespace: helmReleaseNamespace,
Labels: map[string]string{
hmcmirantiscomv1alpha1.HMCManagedLabelKey: hmcmirantiscomv1alpha1.HMCManagedLabelValue,
},
},
Spec: helmcontrollerv2.HelmReleaseSpec{
ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{
Kind: sourcev1.HelmChartKind,
Name: "any-chart-name",
Namespace: helmChartNamespace,
},
},
}
Expect(k8sClient.Create(ctx, someOtherHelmRelease)).To(Succeed())

By("Creating a Management object with removed component in the spec and containing it in the status")
mgmt := &hmcmirantiscomv1alpha1.Management{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -235,7 +256,11 @@ var _ = Describe("Management Controller", func() {
return fmt.Errorf("expected .status.components[%s] template be %s, got %s", someComponentName, providerTemplateName, v.Template)
}

// HelmRelease
// HelmReleases
if err := k8sClient.Get(ctx, client.ObjectKey{Name: someOtherHelmReleaseName, Namespace: helmReleaseNamespace}, &helmcontrollerv2.HelmRelease{}); err != nil {
return err
}

return k8sClient.Get(ctx, client.ObjectKey{Name: helmReleaseName, Namespace: helmReleaseNamespace}, &helmcontrollerv2.HelmRelease{})
}).WithTimeout(timeout).WithPolling(interval).Should(Succeed())

Expand All @@ -261,8 +286,10 @@ var _ = Describe("Management Controller", func() {
Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgmt), mgmt)).To(Succeed())
Expect(mgmt.Status.AvailableProviders).To(BeEmpty())

By("Checking the Management components status is populated")
By("Checking the other (managed) helm-release has not been removed")
Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(someOtherHelmRelease), someOtherHelmRelease)).To(Succeed())

By("Checking the Management components status is populated")
Expect(mgmt.Status.Components).To(HaveLen(2)) // required: capi, hmc
Expect(mgmt.Status.Components).To(BeEquivalentTo(map[string]hmcmirantiscomv1alpha1.ComponentStatus{
hmcmirantiscomv1alpha1.CoreHMCName: {
Expand All @@ -277,15 +304,13 @@ var _ = Describe("Management Controller", func() {
},
}))

By("Update core HelmReleases with Ready condition")

By("Updating core HelmReleases with Ready condition")
for _, coreComponent := range coreComponents {
helmRelease := &helmcontrollerv2.HelmRelease{}
err := k8sClient.Get(ctx, types.NamespacedName{
Expect(k8sClient.Get(ctx, types.NamespacedName{
Namespace: helmReleaseNamespace,
Name: coreComponent.helmReleaseName,
}, helmRelease)
Expect(err).NotTo(HaveOccurred())
}, helmRelease)).To(Succeed())

fluxconditions.Set(helmRelease, &metav1.Condition{
Type: fluxmeta.ReadyCondition,
Expand All @@ -302,8 +327,7 @@ var _ = Describe("Management Controller", func() {
Expect(k8sClient.Status().Update(ctx, helmRelease)).To(Succeed())
}

By("Create Cluster API CoreProvider object")

By("Creating Cluster API CoreProvider object")
coreProvider := &capioperator.CoreProvider{
ObjectMeta: metav1.ObjectMeta{
Name: "capi",
Expand All @@ -314,19 +338,30 @@ var _ = Describe("Management Controller", func() {
},
}
Expect(k8sClient.Create(ctx, coreProvider)).To(Succeed())

coreProvider.Status.Conditions = clusterv1.Conditions{
{
Type: clusterv1.ReadyCondition,
Status: "True",
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
},
}

Expect(k8sClient.Status().Update(ctx, coreProvider)).To(Succeed())
Eventually(func() error {
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(coreProvider), coreProvider); err != nil {
return err
}
if l := len(coreProvider.Status.Conditions); l != 1 {
return fmt.Errorf("expected .status.conditions length to be exactly 1, got %d", l)
}
cond := coreProvider.Status.Conditions[0]
if cond.Type != clusterv1.ReadyCondition || cond.Status != corev1.ConditionTrue {
return fmt.Errorf("unexpected status condition: type %s, status %s", cond.Type, cond.Status)
}

By("Reconciling the Management object again to ensure the components status is updated")
return nil
}).WithTimeout(timeout).WithPolling(interval).Should(Succeed())

By("Reconciling the Management object again to ensure the components status is updated")
_, err = controllerReconciler.Reconcile(ctx, reconcile.Request{
NamespacedName: client.ObjectKeyFromObject(mgmt),
})
Expand Down Expand Up @@ -356,9 +391,17 @@ var _ = Describe("Management Controller", func() {
return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(providerTemplateRequired), &hmcmirantiscomv1alpha1.ProviderTemplate{}))
}).WithTimeout(timeout).WithPolling(interval).Should(BeTrue())

Expect(k8sClient.Delete(ctx, someOtherHelmRelease)).To(Succeed())
Eventually(func() bool {
return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(someOtherHelmRelease), &helmcontrollerv2.HelmRelease{}))
}).WithTimeout(timeout).WithPolling(interval).Should(BeTrue())

coreProvider.Finalizers = nil
Expect(k8sClient.Update(ctx, coreProvider)).To(Succeed())
Expect(k8sClient.Delete(ctx, coreProvider)).To(Succeed())
Eventually(func() bool {
return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(coreProvider), &capioperator.CoreProvider{}))
}).WithTimeout(timeout).WithPolling(interval).Should(BeTrue())
})
})
})
6 changes: 6 additions & 0 deletions internal/helm/release.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type ReconcileHelmReleaseOpts struct {
OwnerReference *metav1.OwnerReference
ChartRef *hcv2.CrossNamespaceSourceReference
ReconcileInterval *time.Duration
AddExtraLabels map[string]string
TargetNamespace string
DependsOn []meta.NamespacedObjectReference
CreateNamespace bool
Expand All @@ -60,7 +61,12 @@ func ReconcileHelmRelease(ctx context.Context,
if hr.Labels == nil {
hr.Labels = make(map[string]string)
}

for k, v := range opts.AddExtraLabels {
hr.Labels[k] = v
}
hr.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue

if opts.OwnerReference != nil {
hr.OwnerReferences = []metav1.OwnerReference{*opts.OwnerReference}
}
Expand Down

0 comments on commit 9319b9a

Please sign in to comment.