From a26ae734e54af8af1e01630e5646295f590d39b0 Mon Sep 17 00:00:00 2001 From: Leela Venkaiah G Date: Tue, 3 Dec 2024 11:25:29 +0000 Subject: [PATCH 1/2] controllers: remove storageclaim from manager and move functionality first cut of removal of storageclaim controller and moving the work over to the storageclient controller. Existing resources that are owned by storageclaims will become unmanaged but protected against deletion due to finalizer for which current expectation is to be removed manually no provider changes done atm and the expectation is, provider would send info that it used to send for storageclaim now to storageclient and any opportunity to change the message struct will be made and corresondingly a new PR will be raised. Signed-off-by: Leela Venkaiah G --- cmd/main.go | 9 - .../controller/storageclaim_controller.go | 3 - .../controller/storageclient_controller.go | 395 ++++++++++++------ 3 files changed, 256 insertions(+), 151 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 9dcb5e20..a97ed52a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -181,15 +181,6 @@ func main() { os.Exit(1) } - if err = (&controller.StorageClaimReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - OperatorNamespace: utils.GetOperatorNamespace(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "StorageClaim") - os.Exit(1) - } - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) diff --git a/internal/controller/storageclaim_controller.go b/internal/controller/storageclaim_controller.go index 9111654a..afa4afb3 100644 --- a/internal/controller/storageclaim_controller.go +++ b/internal/controller/storageclaim_controller.go @@ -51,9 +51,6 @@ const ( storageClaimFinalizer = "storageclaim.ocs.openshift.io" storageClaimAnnotation = "ocs.openshift.io/storageclaim" keyRotationAnnotation = "keyrotation.csiaddons.openshift.io/schedule" - - pvClusterIDIndexName = "index:persistentVolumeClusterID" - vscClusterIDIndexName = "index:volumeSnapshotContentCSIDriver" ) // StorageClaimReconciler reconciles a StorageClaim object diff --git a/internal/controller/storageclient_controller.go b/internal/controller/storageclient_controller.go index c1585c13..f3c651ee 100644 --- a/internal/controller/storageclient_controller.go +++ b/internal/controller/storageclient_controller.go @@ -20,21 +20,27 @@ import ( "context" "encoding/json" "fmt" - quotav1 "github.com/openshift/api/quota/v1" - "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" - "github.com/red-hat-storage/ocs-client-operator/pkg/utils" "os" + "slices" "strings" + "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" + "github.com/red-hat-storage/ocs-client-operator/pkg/templates" + "github.com/red-hat-storage/ocs-client-operator/pkg/utils" + csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1" + replicationv1a1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1" configv1 "github.com/openshift/api/config/v1" + quotav1 "github.com/openshift/api/quota/v1" opv1a1 "github.com/operator-framework/api/pkg/operators/v1alpha1" providerClient "github.com/red-hat-storage/ocs-operator/services/provider/api/v4/client" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -55,54 +61,77 @@ const ( GetStorageConfig = "GetStorageConfig" AcknowledgeOnboarding = "AcknowledgeOnboarding" - storageClientNameLabel = "ocs.openshift.io/storageclient.name" - storageClientFinalizer = "storageclient.ocs.openshift.io" - storageClaimProcessedAnnotationKey = "ocs.openshift.io/storageclaim.processed" - storageClientDefaultAnnotationKey = "ocs.openshift.io/storageclient.default" + storageClientNameLabel = "ocs.openshift.io/storageclient.name" + storageClientFinalizer = "storageclient.ocs.openshift.io" // indexes for caching - ownerIndexName = "index:ownerUID" + pvClusterIDIndexName = "index:persistentVolumeClusterID" + vscClusterIDIndexName = "index:volumeSnapshotContentCSIDriver" csvPrefix = "ocs-client-operator" ) // StorageClientReconciler reconciles a StorageClient object type StorageClientReconciler struct { - ctx context.Context client.Client - Log klog.Logger - Scheme *runtime.Scheme - recorder *utils.EventReporter - storageClient *v1alpha1.StorageClient + Scheme *runtime.Scheme OperatorNamespace string + + log klog.Logger + ctx context.Context + recorder *utils.EventReporter + storageClient *v1alpha1.StorageClient + storageClientHash string } // SetupWithManager sets up the controller with the Manager. func (r *StorageClientReconciler) SetupWithManager(mgr ctrl.Manager) error { ctx := context.Background() - if err := mgr.GetCache().IndexField(ctx, &v1alpha1.StorageClaim{}, ownerIndexName, func(obj client.Object) []string { - refs := obj.GetOwnerReferences() - var owners []string - for i := range refs { - owners = append(owners, string(refs[i].UID)) + csiDrivers := []string{templates.RBDDriverName, templates.CephFsDriverName} + if err := mgr.GetCache().IndexField(ctx, &corev1.PersistentVolume{}, pvClusterIDIndexName, func(o client.Object) []string { + pv := o.(*corev1.PersistentVolume) + if pv != nil && + pv.Spec.CSI != nil && + slices.Contains(csiDrivers, pv.Spec.CSI.Driver) && + pv.Spec.CSI.VolumeAttributes["clusterID"] != "" { + return []string{pv.Spec.CSI.VolumeAttributes["clusterID"]} + } + return nil + }); err != nil { + return fmt.Errorf("unable to set up FieldIndexer for PV cluster id: %v", err) + } + if err := mgr.GetCache().IndexField(ctx, &snapapi.VolumeSnapshotContent{}, vscClusterIDIndexName, func(o client.Object) []string { + vsc := o.(*snapapi.VolumeSnapshotContent) + if vsc != nil && + slices.Contains(csiDrivers, vsc.Spec.Driver) && + vsc.Status != nil && + vsc.Status.SnapshotHandle != nil { + parts := strings.Split(*vsc.Status.SnapshotHandle, "-") + if len(parts) == 9 { + // second entry in the volumeID is clusterID which is unique across the cluster + return []string{parts[2]} + } } - return owners + return nil }); err != nil { - return fmt.Errorf("unable to set up FieldIndexer for StorageClaim's owner uid: %v", err) + return fmt.Errorf("unable to set up FieldIndexer for VSC csi driver name: %v", err) } r.recorder = utils.NewEventReporter(mgr.GetEventRecorderFor("controller_storageclient")) generationChangePredicate := predicate.GenerationChangedPredicate{} bldr := ctrl.NewControllerManagedBy(mgr). For(&v1alpha1.StorageClient{}). - Owns(&v1alpha1.StorageClaim{}). Owns(&batchv1.CronJob{}). Owns("av1.ClusterResourceQuota{}, builder.WithPredicates(generationChangePredicate)). Owns(&nbv1.NooBaa{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Owns(&corev1.Secret{}). Owns(&csiopv1a1.CephConnection{}, builder.WithPredicates(generationChangePredicate)). - Owns(&csiopv1a1.ClientProfileMapping{}, builder.WithPredicates(generationChangePredicate)) + Owns(&csiopv1a1.ClientProfileMapping{}, builder.WithPredicates(generationChangePredicate)). + Owns(&storagev1.StorageClass{}). + Owns(&snapapi.VolumeSnapshotClass{}). + Owns(&replicationv1a1.VolumeReplicationClass{}, builder.WithPredicates(generationChangePredicate)). + Owns(&csiopv1a1.ClientProfile{}, builder.WithPredicates(generationChangePredicate)) return bldr.Complete(r) } @@ -118,21 +147,27 @@ func (r *StorageClientReconciler) SetupWithManager(mgr ctrl.Manager) error { //+kubebuilder:rbac:groups=noobaa.io,resources=noobaas,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups=csi.ceph.io,resources=clientprofilemappings,verbs=get;list;update;create;watch;delete +//+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch;create;delete +//+kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch +//+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotcontents,verbs=get;list;watch +//+kubebuilder:rbac:groups=csi.ceph.io,resources=clientprofiles,verbs=get;list;update;create;watch;delete +//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch;create;delete func (r *StorageClientReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var err error r.ctx = ctx - r.Log = log.FromContext(ctx, "StorageClient", req) - r.Log.Info("Reconciling StorageClient") + r.log = log.FromContext(ctx, "StorageClient", req) + r.log.Info("Reconciling StorageClient") r.storageClient = &v1alpha1.StorageClient{} r.storageClient.Name = req.Name if err = r.get(r.storageClient); err != nil { if kerrors.IsNotFound(err) { - r.Log.Info("StorageClient resource not found. Ignoring since object must be deleted.") + r.log.Info("StorageClient resource not found. Ignoring since object must be deleted.") return reconcile.Result{}, nil } - r.Log.Error(err, "Failed to get StorageClient.") + r.log.Error(err, "Failed to get StorageClient.") return reconcile.Result{}, fmt.Errorf("failed to get StorageClient: %v", err) } @@ -141,12 +176,13 @@ func (r *StorageClientReconciler) Reconcile(ctx context.Context, req ctrl.Reques return reconcile.Result{}, nil } + r.storageClientHash = utils.GetMD5Hash(r.storageClient.Name) result, reconcileErr := r.reconcilePhases() // Apply status changes to the StorageClient statusErr := r.Client.Status().Update(ctx, r.storageClient) if statusErr != nil { - r.Log.Error(statusErr, "Failed to update StorageClient status.") + r.log.Error(statusErr, "Failed to update StorageClient status.") } if reconcileErr != nil { err = reconcileErr @@ -169,26 +205,16 @@ func (r *StorageClientReconciler) reconcilePhases() (ctrl.Result, error) { return r.deletionPhase(externalClusterClient) } - updateStorageClient := false storageClients := &v1alpha1.StorageClientList{} if err := r.list(storageClients); err != nil { - r.Log.Error(err, "unable to list storage clients") + r.log.Error(err, "unable to list storage clients") return ctrl.Result{}, err } - if len(storageClients.Items) == 1 && storageClients.Items[0].Name == r.storageClient.Name { - if utils.AddAnnotation(r.storageClient, storageClientDefaultAnnotationKey, "true") { - updateStorageClient = true - } - } // ensure finalizer if controllerutil.AddFinalizer(r.storageClient, storageClientFinalizer) { r.storageClient.Status.Phase = v1alpha1.StorageClientInitializing - r.Log.Info("Finalizer not found for StorageClient. Adding finalizer.", "StorageClient", r.storageClient.Name) - updateStorageClient = true - } - - if updateStorageClient { + r.log.Info("Finalizer not found for StorageClient. Adding finalizer.", "StorageClient", r.storageClient.Name) if err := r.update(r.storageClient); err != nil { return reconcile.Result{}, fmt.Errorf("failed to update StorageClient: %v", err) } @@ -302,20 +328,97 @@ func (r *StorageClientReconciler) reconcilePhases() (ctrl.Result, error) { if err != nil { return reconcile.Result{}, fmt.Errorf("failed to create remote noobaa: %v", err) } - } - } - if r.storageClient.GetAnnotations()[storageClaimProcessedAnnotationKey] != "true" { - if err := r.reconcileBlockStorageClaim(); err != nil { - return reconcile.Result{}, err - } + case "StorageClass": + data := map[string]string{} + err = json.Unmarshal(eResource.Data, &data) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to unmarshal storage configuration response: %v", err) + } + var storageClass *storagev1.StorageClass + data["csi.storage.k8s.io/provisioner-secret-namespace"] = r.OperatorNamespace + data["csi.storage.k8s.io/node-stage-secret-namespace"] = r.OperatorNamespace + data["csi.storage.k8s.io/controller-expand-secret-namespace"] = r.OperatorNamespace + + if eResource.Name == "cephfs" { + storageClass = r.getCephFSStorageClass() + } else if eResource.Name == "ceph-rbd" { + storageClass = r.getCephRBDStorageClass() + } + data["clusterID"] = r.storageClientHash + err = utils.CreateOrReplace(r.ctx, r.Client, storageClass, func() error { + if err := r.own(storageClass); err != nil { + return fmt.Errorf("failed to own Storage Class resource: %v", err) + } + utils.AddLabels(storageClass, eResource.Labels) + storageClass.Parameters = data + return nil + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create or update StorageClass: %s", err) + } + case "VolumeSnapshotClass": + data := map[string]string{} + err = json.Unmarshal(eResource.Data, &data) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to unmarshal storage configuration response: %v", err) + } + var volumeSnapshotClass *snapapi.VolumeSnapshotClass + data["csi.storage.k8s.io/snapshotter-secret-namespace"] = r.OperatorNamespace + if eResource.Name == "cephfs" { + volumeSnapshotClass = r.getCephFSVolumeSnapshotClass() + } else if eResource.Name == "ceph-rbd" { + volumeSnapshotClass = r.getCephRBDVolumeSnapshotClass() + } + data["clusterID"] = r.storageClientHash + err = utils.CreateOrReplace(r.ctx, r.Client, volumeSnapshotClass, func() error { + if err := r.own(volumeSnapshotClass); err != nil { + return fmt.Errorf("failed to own VolumeSnapshotClass resource: %v", err) + } + utils.AddLabels(volumeSnapshotClass, eResource.Labels) + volumeSnapshotClass.Parameters = data + return nil + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create or update VolumeSnapshotClass: %s", err) + } + case "VolumeReplicationClass": + vrc := &replicationv1a1.VolumeReplicationClass{} + vrc.Name = eResource.Name + err := utils.CreateOrReplace(r.ctx, r.Client, vrc, func() error { + if err := r.own(vrc); err != nil { + return fmt.Errorf("failed to own VolumeReplicationClass resource: %v", err) + } + if err := json.Unmarshal(eResource.Data, &vrc.Spec); err != nil { + return fmt.Errorf("failed to unmarshall VolumeReplicationClass spec: %v", err) + } + vrc.Spec.Parameters["replication.storage.openshift.io/replication-secret-namespace"] = r.OperatorNamespace - if err := r.reconcileSharedfileStorageClaim(); err != nil { - return reconcile.Result{}, err - } + utils.AddLabels(vrc, eResource.Labels) + utils.AddAnnotations(vrc, eResource.Annotations) - utils.AddAnnotation(r.storageClient, storageClaimProcessedAnnotationKey, "true") - if err := r.update(r.storageClient); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to update StorageClient with claim processed annotation: %v", err) + return nil + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create or update VolumeReplicationClass: %s", err) + } + case "ClientProfile": + clientProfile := &csiopv1a1.ClientProfile{} + clientProfile.Name = r.storageClientHash + clientProfile.Namespace = r.OperatorNamespace + if _, err := controllerutil.CreateOrUpdate(r.ctx, r.Client, clientProfile, func() error { + if err := r.own(clientProfile); err != nil { + return fmt.Errorf("failed to own clientProfile resource: %v", err) + } + if err := json.Unmarshal(eResource.Data, &clientProfile.Spec); err != nil { + return fmt.Errorf("failed to unmarshall clientProfile spec: %v", err) + } + clientProfile.Spec.CephConnectionRef = corev1.LocalObjectReference{ + Name: r.storageClient.Name, + } + return nil + }); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to reconcile clientProfile: %v", err) + } } } @@ -352,37 +455,39 @@ func (r *StorageClientReconciler) createOrUpdate(obj client.Object, f controller if err != nil { return err } - r.Log.Info(fmt.Sprintf("%s successfully %s", obj.GetObjectKind(), result), "name", obj.GetName()) + r.log.Info(fmt.Sprintf("%s successfully %s", obj.GetObjectKind(), result), "name", obj.GetName()) return nil } func (r *StorageClientReconciler) deletionPhase(externalClusterClient *providerClient.OCSProviderClient) (ctrl.Result, error) { - // TODO Need to take care of deleting the SCC created for this - // storageClient and also the default SCC created for this storageClient r.storageClient.Status.Phase = v1alpha1.StorageClientOffboarding - if err := r.deleteOwnedStorageClaims(); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to delete storageclaims owned by storageclient %v: %v", r.storageClient.Name, err) + if exist, err := r.hasPersistentVolumes(); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to verify persistentvolumes dependent on storageclient %q: %v", r.storageClient.Name, err) + } else if exist { + return reconcile.Result{}, fmt.Errorf("one or more persistentvolumes exist that are dependent on storageclient %s", r.storageClient.Name) } - if err := r.verifyNoStorageClaimsExist(); err != nil { - r.Log.Error(err, "still storageclaims exist for this storageclient") - return reconcile.Result{}, fmt.Errorf("still storageclaims exist for this storageclient: %v", err) + if exist, err := r.hasVolumeSnapshotContents(); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to verify volumesnapshotcontents dependent on storageclient %q: %v", r.storageClient.Name, err) + } else if exist { + return reconcile.Result{}, fmt.Errorf("one or more volumesnapshotcontents exist that are dependent on storageclient %s", r.storageClient.Name) } + if res, err := r.offboardConsumer(externalClusterClient); err != nil { - r.Log.Error(err, "Offboarding in progress.") + r.log.Error(err, "Offboarding in progress.") } else if !res.IsZero() { // result is not empty return res, nil } if controllerutil.RemoveFinalizer(r.storageClient, storageClientFinalizer) { - r.Log.Info("removing finalizer from StorageClient.", "StorageClient", r.storageClient.Name) + r.log.Info("removing finalizer from StorageClient.", "StorageClient", r.storageClient.Name) if err := r.update(r.storageClient); err != nil { - r.Log.Info("Failed to remove finalizer from StorageClient", "StorageClient", r.storageClient.Name) + r.log.Info("Failed to remove finalizer from StorageClient", "StorageClient", r.storageClient.Name) return reconcile.Result{}, fmt.Errorf("failed to remove finalizer from StorageClient: %v", err) } } - r.Log.Info("StorageClient is offboarded", "StorageClient", r.storageClient.Name) + r.log.Info("StorageClient is offboarded", "StorageClient", r.storageClient.Name) return reconcile.Result{}, nil } @@ -406,7 +511,7 @@ func (r *StorageClientReconciler) onboardConsumer(externalClusterClient *provide clusterVersion := &configv1.ClusterVersion{} clusterVersion.Name = "version" if err := r.get(clusterVersion); err != nil { - r.Log.Error(err, "failed to get the clusterVersion version of the OCP cluster") + r.log.Error(err, "failed to get the clusterVersion version of the OCP cluster") return reconcile.Result{}, fmt.Errorf("failed to get the clusterVersion version of the OCP cluster: %v", err) } @@ -436,14 +541,14 @@ func (r *StorageClientReconciler) onboardConsumer(externalClusterClient *provide if response.StorageConsumerUUID == "" { err = fmt.Errorf("storage provider response is empty") - r.Log.Error(err, "empty response") + r.log.Error(err, "empty response") return reconcile.Result{}, err } r.storageClient.Status.ConsumerID = response.StorageConsumerUUID r.storageClient.Status.Phase = v1alpha1.StorageClientOnboarding - r.Log.Info("onboarding started") + r.log.Info("onboarding started") return reconcile.Result{Requeue: true}, nil } @@ -454,12 +559,12 @@ func (r *StorageClientReconciler) acknowledgeOnboarding(externalClusterClient *p if st, ok := status.FromError(err); ok { r.logGrpcErrorAndReportEvent(AcknowledgeOnboarding, err, st.Code()) } - r.Log.Error(err, "Failed to acknowledge onboarding.") + r.log.Error(err, "Failed to acknowledge onboarding.") return reconcile.Result{}, fmt.Errorf("failed to acknowledge onboarding: %v", err) } r.storageClient.Status.Phase = v1alpha1.StorageClientConnected - r.Log.Info("Onboarding is acknowledged successfully.") + r.log.Info("Onboarding is acknowledged successfully.") return reconcile.Result{Requeue: true}, nil } @@ -477,42 +582,6 @@ func (r *StorageClientReconciler) offboardConsumer(externalClusterClient *provid return reconcile.Result{}, nil } -func (r *StorageClientReconciler) deleteOwnedStorageClaims() error { - storageClaims := &v1alpha1.StorageClaimList{} - if err := r.list(storageClaims, client.MatchingFields{ownerIndexName: string(r.storageClient.UID)}); err != nil { - return fmt.Errorf("failed to list storageClaims via owner reference: %v", err) - } - - for idx := range storageClaims.Items { - storageClaim := &storageClaims.Items[idx] - if err := r.delete(storageClaim); err != nil { - return fmt.Errorf("failed to delete storageClaim %v: %v", storageClaim.Name, err) - } - } - return nil -} - -func (r *StorageClientReconciler) verifyNoStorageClaimsExist() error { - - storageClaims := &v1alpha1.StorageClaimList{} - if err := r.list(storageClaims); err != nil { - return fmt.Errorf("failed to list storageClaims: %v", err) - } - - for idx := range storageClaims.Items { - storageClaim := &storageClaims.Items[idx] - if (storageClaim.Spec.StorageClient == "" && r.storageClient.Annotations[storageClientDefaultAnnotationKey] == "true") || - storageClaim.Spec.StorageClient == r.storageClient.Name { - err := fmt.Errorf("failed to cleanup resources. storageClaims are present on the cluster") - r.recorder.ReportIfNotPresent(r.storageClient, corev1.EventTypeWarning, "Cleanup", err.Error()) - r.Log.Error(err, "Waiting for all storageClaims to be deleted.") - return err - } - } - - return nil -} - func (r *StorageClientReconciler) logGrpcErrorAndReportEvent(grpcCallName string, err error, errCode codes.Code) { var msg, eventReason, eventType string @@ -556,7 +625,7 @@ func (r *StorageClientReconciler) logGrpcErrorAndReportEvent(grpcCallName string } if msg != "" { - r.Log.Error(err, "StorageProvider:"+grpcCallName+":"+msg) + r.log.Error(err, "StorageProvider:"+grpcCallName+":"+msg) r.recorder.ReportIfNotPresent(r.storageClient, eventType, eventReason, msg) } } @@ -564,7 +633,7 @@ func (r *StorageClientReconciler) logGrpcErrorAndReportEvent(grpcCallName string func (r *StorageClientReconciler) reconcileClientStatusReporterJob() (reconcile.Result, error) { cronJob := &batchv1.CronJob{} // maximum characters allowed for cronjob name is 52 and below interpolation creates 47 characters - cronJob.Name = fmt.Sprintf("storageclient-%s-status-reporter", utils.GetMD5Hash(r.storageClient.Name)[:16]) + cronJob.Name = fmt.Sprintf("storageclient-%s-status-reporter", r.storageClientHash[:16]) cronJob.Namespace = r.OperatorNamespace var podDeadLineSeconds int64 = 120 @@ -631,36 +700,95 @@ func (r *StorageClientReconciler) reconcileClientStatusReporterJob() (reconcile. return reconcile.Result{}, nil } -func (r *StorageClientReconciler) list(obj client.ObjectList, listOptions ...client.ListOption) error { - return r.Client.List(r.ctx, obj, listOptions...) +func (r *StorageClientReconciler) hasPersistentVolumes() (bool, error) { + pvList := &corev1.PersistentVolumeList{} + if err := r.list(pvList, client.MatchingFields{pvClusterIDIndexName: r.storageClientHash}, client.Limit(1)); err != nil { + return false, fmt.Errorf("failed to list persistent volumes: %v", err) + } + + if len(pvList.Items) != 0 { + r.log.Info(fmt.Sprintf("PersistentVolumes referring storageclient %q exists", r.storageClient.Name)) + return true, nil + } + + return false, nil } -func (r *StorageClientReconciler) reconcileBlockStorageClaim() error { - blockClaim := &v1alpha1.StorageClaim{} - blockClaim.Name = fmt.Sprintf("%s-ceph-rbd", r.storageClient.Name) - blockClaim.Spec.Type = "block" - blockClaim.Spec.StorageClient = r.storageClient.Name - if err := r.own(blockClaim); err != nil { - return fmt.Errorf("failed to own storageclaim of type block: %v", err) +func (r *StorageClientReconciler) hasVolumeSnapshotContents() (bool, error) { + vscList := &snapapi.VolumeSnapshotContentList{} + if err := r.list(vscList, client.MatchingFields{vscClusterIDIndexName: r.storageClientHash}); err != nil { + return false, fmt.Errorf("failed to list volume snapshot content resources: %v", err) } - if err := r.create(blockClaim); err != nil && !kerrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create block storageclaim: %v", err) + + if len(vscList.Items) != 0 { + r.log.Info(fmt.Sprintf("VolumeSnapshotContent referring storageclient %q exists", r.storageClient.Name)) + return true, nil } - return nil + + return false, nil } -func (r *StorageClientReconciler) reconcileSharedfileStorageClaim() error { - sharedfileClaim := &v1alpha1.StorageClaim{} - sharedfileClaim.Name = fmt.Sprintf("%s-cephfs", r.storageClient.Name) - sharedfileClaim.Spec.Type = "sharedfile" - sharedfileClaim.Spec.StorageClient = r.storageClient.Name - if err := r.own(sharedfileClaim); err != nil { - return fmt.Errorf("failed to own storageclaim of type sharedfile: %v", err) +func (r *StorageClientReconciler) getCephFSStorageClass() *storagev1.StorageClass { + pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-cephfs", r.storageClient.Name), + Annotations: map[string]string{ + "description": "Provides RWO and RWX Filesystem volumes", + }, + }, + ReclaimPolicy: &pvReclaimPolicy, + AllowVolumeExpansion: &allowVolumeExpansion, + Provisioner: templates.CephFsDriverName, } - if err := r.create(sharedfileClaim); err != nil && !kerrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to create sharedfile storageclaim: %v", err) + return storageClass +} + +func (r *StorageClientReconciler) getCephRBDStorageClass() *storagev1.StorageClass { + pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete + allowVolumeExpansion := true + storageClass := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-rbd", r.storageClient.Name), + Annotations: map[string]string{ + "description": "Provides RWO Filesystem volumes, and RWO and RWX Block volumes", + "reclaimspace.csiaddons.openshift.io/schedule": "@weekly", + }, + }, + ReclaimPolicy: &pvReclaimPolicy, + AllowVolumeExpansion: &allowVolumeExpansion, + Provisioner: templates.RBDDriverName, } - return nil + + // TODO: storageclass should contain keyrotation annotation while sending from provider + return storageClass +} + +func (r *StorageClientReconciler) getCephFSVolumeSnapshotClass() *snapapi.VolumeSnapshotClass { + volumesnapshotclass := &snapapi.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-cephfs", r.storageClient.Name), + }, + Driver: templates.CephFsDriverName, + DeletionPolicy: snapapi.VolumeSnapshotContentDelete, + } + return volumesnapshotclass +} + +func (r *StorageClientReconciler) getCephRBDVolumeSnapshotClass() *snapapi.VolumeSnapshotClass { + volumesnapshotclass := &snapapi.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-rbd", r.storageClient.Name), + }, + Driver: templates.RBDDriverName, + DeletionPolicy: snapapi.VolumeSnapshotContentDelete, + } + return volumesnapshotclass +} + +func (r *StorageClientReconciler) list(obj client.ObjectList, listOptions ...client.ListOption) error { + return r.Client.List(r.ctx, obj, listOptions...) } func (r *StorageClientReconciler) get(obj client.Object, opts ...client.GetOption) error { @@ -672,17 +800,6 @@ func (r *StorageClientReconciler) update(obj client.Object, opts ...client.Updat return r.Update(r.ctx, obj, opts...) } -func (r *StorageClientReconciler) create(obj client.Object, opts ...client.CreateOption) error { - return r.Create(r.ctx, obj, opts...) -} - -func (r *StorageClientReconciler) delete(obj client.Object, opts ...client.DeleteOption) error { - if err := r.Delete(r.ctx, obj, opts...); err != nil && !kerrors.IsNotFound(err) { - return err - } - return nil -} - func (r *StorageClientReconciler) own(dependent metav1.Object) error { return controllerutil.SetOwnerReference(r.storageClient, dependent, r.Scheme) } From c9f22c0d3b38b199a844a4c1e6f1318f0528d44b Mon Sep 17 00:00:00 2001 From: Leela Venkaiah G Date: Wed, 4 Dec 2024 07:37:26 +0000 Subject: [PATCH 2/2] controllers: remove storageclaim controller Signed-off-by: Leela Venkaiah G --- .../controller/storageclaim_controller.go | 593 ------------------ 1 file changed, 593 deletions(-) delete mode 100644 internal/controller/storageclaim_controller.go diff --git a/internal/controller/storageclaim_controller.go b/internal/controller/storageclaim_controller.go deleted file mode 100644 index afa4afb3..00000000 --- a/internal/controller/storageclaim_controller.go +++ /dev/null @@ -1,593 +0,0 @@ -/* -Copyright 2022 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "encoding/json" - "fmt" - v1alpha1 "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" - "github.com/red-hat-storage/ocs-client-operator/pkg/templates" - "github.com/red-hat-storage/ocs-client-operator/pkg/utils" - "slices" - "strings" - - csiopv1a1 "github.com/ceph/ceph-csi-operator/api/v1alpha1" - "github.com/go-logr/logr" - - replicationv1alpha1 "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" - snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - providerclient "github.com/red-hat-storage/ocs-operator/services/provider/api/v4/client" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - ctrllog "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - storageClaimFinalizer = "storageclaim.ocs.openshift.io" - storageClaimAnnotation = "ocs.openshift.io/storageclaim" - keyRotationAnnotation = "keyrotation.csiaddons.openshift.io/schedule" -) - -// StorageClaimReconciler reconciles a StorageClaim object -type StorageClaimReconciler struct { - client.Client - cache.Cache - Scheme *runtime.Scheme - OperatorNamespace string - - log logr.Logger - ctx context.Context - storageClient *v1alpha1.StorageClient - storageClaim *v1alpha1.StorageClaim - storageClaimHash string -} - -// SetupWithManager sets up the controller with the Manager. -func (r *StorageClaimReconciler) SetupWithManager(mgr ctrl.Manager) error { - ctx := context.Background() - csiDrivers := []string{templates.RBDDriverName, templates.CephFsDriverName} - if err := mgr.GetCache().IndexField(ctx, &corev1.PersistentVolume{}, pvClusterIDIndexName, func(o client.Object) []string { - pv := o.(*corev1.PersistentVolume) - if pv != nil && - pv.Spec.CSI != nil && - slices.Contains(csiDrivers, pv.Spec.CSI.Driver) && - pv.Spec.CSI.VolumeAttributes["clusterID"] != "" { - return []string{pv.Spec.CSI.VolumeAttributes["clusterID"]} - } - return nil - }); err != nil { - return fmt.Errorf("unable to set up FieldIndexer for PV cluster id: %v", err) - } - - if err := mgr.GetCache().IndexField(ctx, &snapapi.VolumeSnapshotContent{}, vscClusterIDIndexName, func(o client.Object) []string { - vsc := o.(*snapapi.VolumeSnapshotContent) - if vsc != nil && - slices.Contains(csiDrivers, vsc.Spec.Driver) && - vsc.Status != nil && - vsc.Status.SnapshotHandle != nil { - parts := strings.Split(*vsc.Status.SnapshotHandle, "-") - if len(parts) == 9 { - // second entry in the volumeID is clusterID which is unique across the cluster - return []string{parts[2]} - } - } - return nil - }); err != nil { - return fmt.Errorf("unable to set up FieldIndexer for VSC csi driver name: %v", err) - } - - generationChangePredicate := predicate.GenerationChangedPredicate{} - bldr := ctrl.NewControllerManagedBy(mgr). - For(&v1alpha1.StorageClaim{}, builder.WithPredicates(generationChangePredicate)). - Owns(&storagev1.StorageClass{}). - Owns(&snapapi.VolumeSnapshotClass{}). - Owns(&replicationv1alpha1.VolumeReplicationClass{}, builder.WithPredicates(generationChangePredicate)). - Owns(&csiopv1a1.ClientProfile{}, builder.WithPredicates(generationChangePredicate)) - - return bldr.Complete(r) -} - -//+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclaims,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclaims/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclaims/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete -//+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch;create;delete -//+kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch -//+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotcontents,verbs=get;list;watch -//+kubebuilder:rbac:groups=csi.ceph.io,resources=clientprofiles,verbs=get;list;update;create;watch;delete -//+kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch;create;delete - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the StorageClaim object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile -func (r *StorageClaimReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - - r.log = ctrllog.FromContext(ctx, "StorageClaim", req) - r.ctx = ctrllog.IntoContext(ctx, r.log) - r.log.Info("Reconciling StorageClaim.") - - // Fetch the StorageClaim instance - r.storageClaim = &v1alpha1.StorageClaim{} - r.storageClaim.Name = req.Name - - if err := r.get(r.storageClaim); err != nil { - if errors.IsNotFound(err) { - r.log.Info("StorageClaim resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - r.log.Error(err, "Failed to get StorageClaim.") - return reconcile.Result{}, err - } - - r.storageClaimHash = utils.GetMD5Hash(r.storageClaim.Name) - r.storageClaim.Status.Phase = v1alpha1.StorageClaimInitializing - - if r.storageClaim.Spec.StorageClient == "" { - storageClientList := &v1alpha1.StorageClientList{} - if err := r.list(storageClientList); err != nil { - return reconcile.Result{}, err - } - - if len(storageClientList.Items) == 0 { - r.log.Info("No StorageClient resource found.") - return reconcile.Result{}, fmt.Errorf("no StorageClient found") - } - if len(storageClientList.Items) > 1 { - r.log.Info("Multiple StorageClient resources found, but no storageClient specified.") - return reconcile.Result{}, fmt.Errorf("multiple StorageClient resources found, but no storageClient specified") - } - r.storageClient = &storageClientList.Items[0] - } else { - // Fetch the StorageClient instance - r.storageClient = &v1alpha1.StorageClient{} - r.storageClient.Name = r.storageClaim.Spec.StorageClient - if err := r.get(r.storageClient); err != nil { - r.log.Error(err, "Failed to get StorageClient.") - return reconcile.Result{}, err - } - } - - var result reconcile.Result - var reconcileError error - - // StorageCluster checks for required fields. - if r.storageClient.Spec.StorageProviderEndpoint == "" { - return reconcile.Result{}, fmt.Errorf("no external storage provider endpoint found on the " + - "StorageClient spec, cannot determine mode") - } - - result, reconcileError = r.reconcilePhases() - - // Apply status changes to the StorageClaim - statusError := r.Client.Status().Update(r.ctx, r.storageClaim) - if statusError != nil { - r.log.Error(statusError, "Failed to update StorageClaim status.") - } - - // Reconcile errors have higher priority than status update errors - if reconcileError != nil { - return result, reconcileError - } - - if statusError != nil { - return result, statusError - } - - return result, nil -} - -func (r *StorageClaimReconciler) reconcilePhases() (reconcile.Result, error) { - - providerClient, err := providerclient.NewProviderClient( - r.ctx, - r.storageClient.Spec.StorageProviderEndpoint, - utils.OcsClientTimeout, - ) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create provider client with endpoint %v: %v", r.storageClient.Spec.StorageProviderEndpoint, err) - } - - // Close client-side connections. - defer providerClient.Close() - - if r.storageClaim.GetDeletionTimestamp().IsZero() { - - // TODO: Phases do not have checks at the moment, in order to make them more predictable and less error-prone, at the expense of increased computation cost. - // Validation phase. - r.storageClaim.Status.Phase = v1alpha1.StorageClaimValidating - - // If a StorageClass already exists: - // StorageClaim passes validation and is promoted to the configuring phase if: - // * the StorageClaim has the same type as the StorageClass. - // * the StorageClaim has no encryption method specified when the type is filesystem. - // * the StorageClaim has a block type and: - // * the StorageClaim has an encryption method specified. - // * the StorageClaim has the same encryption method as the StorageClass. - // StorageClaim fails validation and falls back to a failed phase indefinitely (no reconciliation happens). - existing := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.storageClaim.Name, - }, - } - claimType := strings.ToLower(r.storageClaim.Spec.Type) - if err = r.get(existing); err == nil { - sccEncryptionMethod := r.storageClaim.Spec.EncryptionMethod - _, scIsFSType := existing.Parameters["fsName"] - scEncryptionMethod, scHasEncryptionMethod := existing.Parameters["encryptionMethod"] - if !((claimType == "sharedfile" && scIsFSType && !scHasEncryptionMethod) || - (claimType == "block" && !scIsFSType && sccEncryptionMethod == scEncryptionMethod)) { - r.log.Error(fmt.Errorf("storageClaim is not compatible with existing StorageClass"), - "StorageClaim validation failed.") - r.storageClaim.Status.Phase = v1alpha1.StorageClaimFailed - return reconcile.Result{}, nil - } - } else if !errors.IsNotFound(err) { - return reconcile.Result{}, fmt.Errorf("failed to get StorageClass [%v]: %s", existing.ObjectMeta, err) - } - - // Configuration phase. - r.storageClaim.Status.Phase = v1alpha1.StorageClaimConfiguring - - // Check if finalizers are present, if not, add them. - if controllerutil.AddFinalizer(r.storageClaim, storageClaimFinalizer) { - if err := r.update(r.storageClaim); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to update StorageClaim %q: %v", r.storageClaim.Name, err) - } - } - - // storageClaimStorageType is the storage type of the StorageClaim - var storageClaimStorageType providerclient.StorageType - switch claimType { - case "block": - storageClaimStorageType = providerclient.StorageTypeBlock - case "sharedfile": - storageClaimStorageType = providerclient.StorageTypeSharedFile - default: - return reconcile.Result{}, fmt.Errorf("unsupported storage type: %s", claimType) - } - - // Call the `FulfillStorageClaim` service on the provider server with StorageClaim as a request message. - _, err = providerClient.FulfillStorageClaim( - r.ctx, - r.storageClient.Status.ConsumerID, - r.storageClaim.Name, - storageClaimStorageType, - r.storageClaim.Spec.StorageProfile, - r.storageClaim.Spec.EncryptionMethod, - ) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to initiate fulfillment of StorageClaim: %v", err) - } - - // Call the `GetStorageClaimConfig` service on the provider server with StorageClaim as a request message. - response, err := providerClient.GetStorageClaimConfig( - r.ctx, - r.storageClient.Status.ConsumerID, - r.storageClaim.Name, - ) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to get StorageClaim config: %v", err) - } - resources := response.ExternalResource - if resources == nil { - return reconcile.Result{}, fmt.Errorf("no configuration data received") - } - - // Go over the received objects and operate on them accordingly. - for _, resource := range resources { - - // Create the received resources, if necessary. - switch resource.Kind { - case "Secret": - data := map[string]string{} - err = json.Unmarshal(resource.Data, &data) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to unmarshal StorageClaim configuration response: %v", err) - } - secret := &corev1.Secret{} - secret.Name = resource.Name - secret.Namespace = r.OperatorNamespace - _, err = controllerutil.CreateOrUpdate(r.ctx, r.Client, secret, func() error { - // cluster scoped resource owning namespace scoped resource which allows garbage collection - if err := r.own(secret); err != nil { - return fmt.Errorf("failed to own secret: %v", err) - } - - if secret.Data == nil { - secret.Data = map[string][]byte{} - } - for k, v := range data { - secret.Data[k] = []byte(v) - } - return nil - }) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update secret %v: %s", secret, err) - } - case "StorageClass": - data := map[string]string{} - err = json.Unmarshal(resource.Data, &data) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to unmarshal StorageClaim configuration response: %v", err) - } - // we are now using clientprofile from csi-operator for getting this info. - // until provider stops sending this info we'll just need to drop the field - // we'll make changes to provider at some version when all clients are dropping this field - delete(data, "radosnamespace") - delete(data, "subvolumegroupname") - - var storageClass *storagev1.StorageClass - data["csi.storage.k8s.io/provisioner-secret-namespace"] = r.OperatorNamespace - data["csi.storage.k8s.io/node-stage-secret-namespace"] = r.OperatorNamespace - data["csi.storage.k8s.io/controller-expand-secret-namespace"] = r.OperatorNamespace - data["clusterID"] = r.storageClaimHash - - if resource.Name == "cephfs" { - storageClass = r.getCephFSStorageClass() - } else if resource.Name == "ceph-rbd" { - storageClass = r.getCephRBDStorageClass() - } - err = utils.CreateOrReplace(r.ctx, r.Client, storageClass, func() error { - if err := r.own(storageClass); err != nil { - return fmt.Errorf("failed to own Storage Class resource: %v", err) - } - utils.AddLabels(storageClass, resource.Labels) - utils.AddAnnotation(storageClass, storageClaimAnnotation, r.storageClaim.Name) - storageClass.Parameters = data - return nil - }) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update StorageClass: %s", err) - } - case "VolumeSnapshotClass": - data := map[string]string{} - err = json.Unmarshal(resource.Data, &data) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to unmarshal StorageClaim configuration response: %v", err) - } - var volumeSnapshotClass *snapapi.VolumeSnapshotClass - data["csi.storage.k8s.io/snapshotter-secret-namespace"] = r.OperatorNamespace - // generate a new clusterID for cephfs subvolumegroup, as - // storageclaim is clusterscoped resources using its - // hash as the clusterID - data["clusterID"] = r.storageClaimHash - if resource.Name == "cephfs" { - volumeSnapshotClass = r.getCephFSVolumeSnapshotClass() - } else if resource.Name == "ceph-rbd" { - volumeSnapshotClass = r.getCephRBDVolumeSnapshotClass() - } - err = utils.CreateOrReplace(r.ctx, r.Client, volumeSnapshotClass, func() error { - if err := r.own(volumeSnapshotClass); err != nil { - return fmt.Errorf("failed to own VolumeSnapshotClass resource: %v", err) - } - utils.AddLabels(volumeSnapshotClass, resource.Labels) - utils.AddAnnotation(volumeSnapshotClass, storageClaimAnnotation, r.storageClaim.Name) - volumeSnapshotClass.Parameters = data - return nil - }) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update VolumeSnapshotClass: %s", err) - } - case "VolumeReplicationClass": - vrc := &replicationv1alpha1.VolumeReplicationClass{} - vrc.Name = resource.Name - err := utils.CreateOrReplace(r.ctx, r.Client, vrc, func() error { - if err := r.own(vrc); err != nil { - return fmt.Errorf("failed to own VolumeReplicationClass resource: %v", err) - } - if err := json.Unmarshal(resource.Data, &vrc.Spec); err != nil { - return fmt.Errorf("failed to unmarshall VolumeReplicationClass spec: %v", err) - } - vrc.Spec.Parameters["replication.storage.openshift.io/replication-secret-namespace"] = r.OperatorNamespace - - utils.AddLabels(vrc, resource.Labels) - utils.AddAnnotations(vrc, resource.Annotations) - - return nil - }) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to create or update VolumeReplicationClass: %s", err) - } - - case "ClientProfile": - clientProfile := &csiopv1a1.ClientProfile{} - clientProfile.Name = r.storageClaimHash - clientProfile.Namespace = r.OperatorNamespace - if _, err := controllerutil.CreateOrUpdate(r.ctx, r.Client, clientProfile, func() error { - if err := r.own(clientProfile); err != nil { - return fmt.Errorf("failed to own clientProfile resource: %v", err) - } - if err := json.Unmarshal(resource.Data, &clientProfile.Spec); err != nil { - return fmt.Errorf("failed to unmarshall clientProfile spec: %v", err) - } - clientProfile.Spec.CephConnectionRef = corev1.LocalObjectReference{ - Name: r.storageClient.Name, - } - return nil - }); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to reconcile clientProfile: %v", err) - } - } - } - - // Readiness phase. - // Update the StorageClaim status. - r.storageClaim.Status.Phase = v1alpha1.StorageClaimReady - - // Initiate deletion phase if the StorageClaim exists. - } else if r.storageClaim.UID != "" { - // Deletion phase. - // Update the StorageClaim status. - r.storageClaim.Status.Phase = v1alpha1.StorageClaimDeleting - - if exist, err := r.hasPersistentVolumes(); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to verify persistentvolumes dependent on storageclaim %q: %v", r.storageClaim.Name, err) - } else if exist { - return reconcile.Result{}, fmt.Errorf("one or more persistentvolumes exist that are dependent on storageclaim %s", r.storageClaim.Name) - } - - if exist, err := r.hasVolumeSnapshotContents(); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to verify volumesnapshotcontents dependent on storageclaim %q: %v", r.storageClaim.Name, err) - } else if exist { - return reconcile.Result{}, fmt.Errorf("one or more volumesnapshotcontents exist that are dependent on storageclaim %s", r.storageClaim.Name) - } - - // Call `RevokeStorageClaim` service on the provider server with StorageClaim as a request message. - // Check if StorageClaim is still exists (it might have been manually removed during the StorageClass - // removal above). - _, err = providerClient.RevokeStorageClaim( - r.ctx, - r.storageClient.Status.ConsumerID, - r.storageClaim.Name, - ) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to revoke StorageClaim: %s", err) - } - - if controllerutil.RemoveFinalizer(r.storageClaim, storageClaimFinalizer) { - if err := r.update(r.storageClaim); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to remove finalizer from storageClaim: %s", err) - } - } - } - - return reconcile.Result{}, nil -} - -func (r *StorageClaimReconciler) getCephFSStorageClass() *storagev1.StorageClass { - pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete - allowVolumeExpansion := true - storageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.storageClaim.Name, - Annotations: map[string]string{ - "description": "Provides RWO and RWX Filesystem volumes", - }, - }, - ReclaimPolicy: &pvReclaimPolicy, - AllowVolumeExpansion: &allowVolumeExpansion, - Provisioner: templates.CephFsDriverName, - } - return storageClass -} - -func (r *StorageClaimReconciler) getCephRBDStorageClass() *storagev1.StorageClass { - pvReclaimPolicy := corev1.PersistentVolumeReclaimDelete - allowVolumeExpansion := true - storageClass := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.storageClaim.Name, - Annotations: map[string]string{ - "description": "Provides RWO Filesystem volumes, and RWO and RWX Block volumes", - "reclaimspace.csiaddons.openshift.io/schedule": "@weekly", - }, - }, - ReclaimPolicy: &pvReclaimPolicy, - AllowVolumeExpansion: &allowVolumeExpansion, - Provisioner: templates.RBDDriverName, - } - - if r.storageClaim.Spec.EncryptionMethod != "" { - utils.AddAnnotation(storageClass, keyRotationAnnotation, utils.CronScheduleWeekly) - } - return storageClass -} - -func (r *StorageClaimReconciler) getCephFSVolumeSnapshotClass() *snapapi.VolumeSnapshotClass { - volumesnapshotclass := &snapapi.VolumeSnapshotClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.storageClaim.Name, - }, - Driver: templates.CephFsDriverName, - DeletionPolicy: snapapi.VolumeSnapshotContentDelete, - } - return volumesnapshotclass -} - -func (r *StorageClaimReconciler) getCephRBDVolumeSnapshotClass() *snapapi.VolumeSnapshotClass { - volumesnapshotclass := &snapapi.VolumeSnapshotClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: r.storageClaim.Name, - }, - Driver: templates.RBDDriverName, - DeletionPolicy: snapapi.VolumeSnapshotContentDelete, - } - return volumesnapshotclass -} - -func (r *StorageClaimReconciler) get(obj client.Object) error { - key := client.ObjectKeyFromObject(obj) - return r.Client.Get(r.ctx, key, obj) -} - -func (r *StorageClaimReconciler) update(obj client.Object) error { - return r.Client.Update(r.ctx, obj) -} - -func (r *StorageClaimReconciler) list(obj client.ObjectList, listOptions ...client.ListOption) error { - return r.Client.List(r.ctx, obj, listOptions...) -} - -func (r *StorageClaimReconciler) own(resource metav1.Object) error { - return controllerutil.SetControllerReference(r.storageClaim, resource, r.Scheme) -} - -func (r *StorageClaimReconciler) hasPersistentVolumes() (bool, error) { - pvList := &corev1.PersistentVolumeList{} - if err := r.list(pvList, client.MatchingFields{pvClusterIDIndexName: r.storageClaimHash}, client.Limit(1)); err != nil { - return false, fmt.Errorf("failed to list persistent volumes: %v", err) - } - - if len(pvList.Items) != 0 { - r.log.Info(fmt.Sprintf("PersistentVolumes referring storageclaim %q exists", r.storageClaim.Name)) - return true, nil - } - - return false, nil -} - -func (r *StorageClaimReconciler) hasVolumeSnapshotContents() (bool, error) { - vscList := &snapapi.VolumeSnapshotContentList{} - if err := r.list(vscList, client.MatchingFields{vscClusterIDIndexName: r.storageClaimHash}); err != nil { - return false, fmt.Errorf("failed to list volume snapshot content resources: %v", err) - } - - if len(vscList.Items) != 0 { - r.log.Info(fmt.Sprintf("VolumeSnapshotContent referring storageclaim %q exists", r.storageClaim.Name)) - return true, nil - } - - return false, nil -}