diff --git a/v2/charts/azure-service-operator/templates/apps_v1_deployment_azureserviceoperator-controller-manager.yaml b/v2/charts/azure-service-operator/templates/apps_v1_deployment_azureserviceoperator-controller-manager.yaml index d817a17e13..28d43eb5fc 100644 --- a/v2/charts/azure-service-operator/templates/apps_v1_deployment_azureserviceoperator-controller-manager.yaml +++ b/v2/charts/azure-service-operator/templates/apps_v1_deployment_azureserviceoperator-controller-manager.yaml @@ -17,6 +17,8 @@ spec: selector: matchLabels: control-plane: controller-manager + strategy: + type: Recreate template: metadata: annotations: @@ -55,9 +57,7 @@ spec: - --profiling-metrics={{ .Values.metrics.profiling }} {{- end }} - --health-addr=:8081 - {{- if or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "watchers") }} - --enable-leader-election - {{- end }} - --v=2 {{- if and (eq .Values.installCRDs true) (or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "webhooks")) }} - --crd-pattern={{ .Values.crdPattern }} diff --git a/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrole_azureserviceoperator-crd-manager-role.yaml b/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrole_azureserviceoperator-crd-manager-role.yaml index b59352c789..10a06e7d33 100644 --- a/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrole_azureserviceoperator-crd-manager-role.yaml +++ b/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrole_azureserviceoperator-crd-manager-role.yaml @@ -1,4 +1,3 @@ -{{- if or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "webhooks") }} {{- if and (eq .Values.installCRDs true) (or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "webhooks")) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -29,4 +28,3 @@ rules: verbs: - create {{- end }} -{{- end }} diff --git a/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrolebinding_azureserviceoperator-crd-manager-rolebinding.yaml b/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrolebinding_azureserviceoperator-crd-manager-rolebinding.yaml index e52e1130d8..9e9af7fb5c 100644 --- a/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrolebinding_azureserviceoperator-crd-manager-rolebinding.yaml +++ b/v2/charts/azure-service-operator/templates/rbac.authorization.k8s.io_v1_clusterrolebinding_azureserviceoperator-crd-manager-rolebinding.yaml @@ -1,4 +1,3 @@ -{{- if or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "webhooks") }} {{- if and (eq .Values.installCRDs true) (or (eq .Values.multitenant.enable false) (eq .Values.azureOperatorMode "webhooks")) }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -13,4 +12,3 @@ subjects: name: {{ include "azure-service-operator.serviceAccountName" . }} namespace: {{ .Release.Namespace }} {{- end }} -{{- end }} diff --git a/v2/cmd/controller/app/setup.go b/v2/cmd/controller/app/setup.go index 4d059bb269..327a46b5cd 100644 --- a/v2/cmd/controller/app/setup.go +++ b/v2/cmd/controller/app/setup.go @@ -49,6 +49,7 @@ import ( "github.com/Azure/azure-service-operator/v2/internal/util/interval" "github.com/Azure/azure-service-operator/v2/internal/util/kubeclient" "github.com/Azure/azure-service-operator/v2/internal/util/lockedrand" + "github.com/Azure/azure-service-operator/v2/internal/util/to" common "github.com/Azure/azure-service-operator/v2/pkg/common/config" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/Azure/azure-service-operator/v2/pkg/genruntime/conditions" @@ -92,11 +93,17 @@ func SetupControllerManager(ctx context.Context, setupLog logr.Logger, flgs *Fla } k8sConfig := ctrl.GetConfigOrDie() - mgr, err := ctrl.NewManager(k8sConfig, ctrl.Options{ + ctrlOptions := ctrl.Options{ Scheme: scheme, NewCache: cacheFunc, LeaderElection: flgs.EnableLeaderElection, LeaderElectionID: "controllers-leader-election-azinfra-generated", + // Manually set lease duration (to default) so that we can use it for our leader elector too. + // See https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/manager/internal.go#L52 + LeaseDuration: to.Ptr(15 * time.Second), + RenewDeadline: to.Ptr(10 * time.Second), + RetryPeriod: to.Ptr(2 * time.Second), + GracefulShutdownTimeout: to.Ptr(30 * time.Second), // It's only safe to set LeaderElectionReleaseOnCancel to true if the manager binary ends // when the manager exits. This is the case with us today, so we set this to true whenever // flgs.EnableLeaderElection is true. @@ -107,7 +114,8 @@ func SetupControllerManager(ctx context.Context, setupLog logr.Logger, flgs *Fla Port: flgs.WebhookPort, CertDir: flgs.WebhookCertDir, }), - }) + } + mgr, err := ctrl.NewManager(k8sConfig, ctrlOptions) if err != nil { setupLog.Error(err, "unable to create manager") os.Exit(1) @@ -119,13 +127,22 @@ func SetupControllerManager(ctx context.Context, setupLog logr.Logger, flgs *Fla os.Exit(1) } - // TODO: Put all of the CRD stuff into a method? - crdManager, err := newCRDManager(clients.log, mgr.GetConfig()) + var leaderElector *crdmanagement.LeaderElector + if flgs.EnableLeaderElection { + // nolint: contextcheck // false positive? + leaderElector, err = crdmanagement.NewLeaderElector(k8sConfig, setupLog, ctrlOptions, mgr) + if err != nil { + setupLog.Error(err, "failed to initialize leader elector") + os.Exit(1) + } + } + + crdManager, err := newCRDManager(clients.log, mgr.GetConfig(), leaderElector) if err != nil { setupLog.Error(err, "failed to initialize CRD client") os.Exit(1) } - existingCRDs, err := crdManager.ListOperatorCRDs(ctx) + existingCRDs, err := crdManager.ListCRDs(ctx) if err != nil { setupLog.Error(err, "failed to list current CRDs") os.Exit(1) @@ -133,31 +150,15 @@ func SetupControllerManager(ctx context.Context, setupLog logr.Logger, flgs *Fla switch flgs.CRDManagementMode { case "auto": - var goalCRDs []apiextensions.CustomResourceDefinition - goalCRDs, err = crdManager.LoadOperatorCRDs(crdmanagement.CRDLocation, cfg.PodNamespace) - if err != nil { - setupLog.Error(err, "failed to load CRDs from disk") - os.Exit(1) - } - // We only apply CRDs if we're in webhooks mode. No other mode will have CRD CRUD permissions if cfg.OperatorMode.IncludesWebhooks() { - var installationInstructions []*crdmanagement.CRDInstallationInstruction - installationInstructions, err = crdManager.DetermineCRDsToInstallOrUpgrade(goalCRDs, existingCRDs, flgs.CRDPatterns) - if err != nil { - setupLog.Error(err, "failed to determine CRDs to apply") - os.Exit(1) - } - - included := crdmanagement.IncludedCRDs(installationInstructions) - if len(included) == 0 { - err = eris.New("No existing CRDs in cluster and no --crd-pattern specified") - setupLog.Error(err, "failed to apply CRDs") - os.Exit(1) - } - // Note that this step will restart the pod when it succeeds - err = crdManager.ApplyCRDs(ctx, installationInstructions) + err = crdManager.Install(ctx, crdmanagement.Options{ + CRDPatterns: flgs.CRDPatterns, + ExistingCRDs: existingCRDs, + Path: crdmanagement.CRDLocation, + Namespace: cfg.PodNamespace, + }) if err != nil { setupLog.Error(err, "failed to apply CRDs") os.Exit(1) @@ -172,7 +173,7 @@ func SetupControllerManager(ctx context.Context, setupLog logr.Logger, flgs *Fla // There are 3 possibilities once we reach here: // 1. Webhooks mode + crd-management-mode=auto: existingCRDs will be up to date (upgraded, crd-pattern applied, etc) - // by the time we get here as the pod will keep exiting until it is so (see crdManager.ApplyCRDs above). + // by the time we get here as the pod will keep exiting until it is so (see crdManager.applyCRDs above). // 2. Non-webhooks mode + auto: As outlined in https://azure.github.io/azure-service-operator/guide/authentication/multitenant-deployment/#upgrading // the webhooks mode pod must be upgraded first, so there's not really much practical difference between this and // crd-management-mode=none (see below). @@ -458,7 +459,11 @@ func makeControllerOptions(log logr.Logger, cfg config.Values) generic.Options { } } -func newCRDManager(logger logr.Logger, k8sConfig *rest.Config) (*crdmanagement.Manager, error) { +func newCRDManager( + logger logr.Logger, + k8sConfig *rest.Config, + leaderElection *crdmanagement.LeaderElector, +) (*crdmanagement.Manager, error) { crdScheme := runtime.NewScheme() _ = apiextensions.AddToScheme(crdScheme) crdClient, err := client.New(k8sConfig, client.Options{Scheme: crdScheme}) @@ -466,6 +471,6 @@ func newCRDManager(logger logr.Logger, k8sConfig *rest.Config) (*crdmanagement.M return nil, eris.Wrap(err, "unable to create CRD client") } - crdManager := crdmanagement.NewManager(logger, kubeclient.NewClient(crdClient)) + crdManager := crdmanagement.NewManager(logger, kubeclient.NewClient(crdClient), leaderElection) return crdManager, nil } diff --git a/v2/config/manager/manager.yaml b/v2/config/manager/manager.yaml index 940ade49cb..a7c0315ede 100644 --- a/v2/config/manager/manager.yaml +++ b/v2/config/manager/manager.yaml @@ -25,6 +25,8 @@ spec: matchLabels: control-plane: controller-manager replicas: 1 + strategy: + type: Recreate revisionHistoryLimit: 10 template: metadata: diff --git a/v2/internal/crdmanagement/helpers_test.go b/v2/internal/crdmanagement/helpers_test.go index 8e38729f09..7e4f736280 100644 --- a/v2/internal/crdmanagement/helpers_test.go +++ b/v2/internal/crdmanagement/helpers_test.go @@ -114,7 +114,7 @@ func testSetup(t *testing.T) *testData { logger := testcommon.NewTestLogger(t) cfg := config.Values{} - crdManager := crdmanagement.NewManager(logger, kubeClient) + crdManager := crdmanagement.NewManager(logger, kubeClient, nil) return &testData{ cfg: cfg, diff --git a/v2/internal/crdmanagement/manager.go b/v2/internal/crdmanagement/manager.go index 4738acbdb1..75671d6b87 100644 --- a/v2/internal/crdmanagement/manager.go +++ b/v2/internal/crdmanagement/manager.go @@ -10,6 +10,7 @@ import ( "path/filepath" "reflect" "strings" + "sync" . "github.com/Azure/azure-service-operator/v2/internal/logging" @@ -20,8 +21,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + ctrlleader "sigs.k8s.io/controller-runtime/pkg/leaderelection" "sigs.k8s.io/yaml" "github.com/Azure/azure-service-operator/v2/internal/util/kubeclient" @@ -41,21 +46,104 @@ const CRDLocation = "crds" const certMgrInjectCAFromAnnotation = "cert-manager.io/inject-ca-from" +type LeaderElector struct { + Elector *leaderelection.LeaderElector + LeaseAcquired *sync.WaitGroup + LeaseReleased *sync.WaitGroup +} + +// NewLeaderElector creates a new LeaderElector +func NewLeaderElector( + k8sConfig *rest.Config, + log logr.Logger, + ctrlOptions ctrl.Options, + mgr ctrl.Manager, +) (*LeaderElector, error) { + resourceLock, err := ctrlleader.NewResourceLock( + k8sConfig, + mgr, + ctrlleader.Options{ + LeaderElection: ctrlOptions.LeaderElection, + LeaderElectionResourceLock: ctrlOptions.LeaderElectionResourceLock, + LeaderElectionID: ctrlOptions.LeaderElectionID, + }) + if err != nil { + return nil, err + } + + log = log.WithName("crdManagementLeaderElector") + leaseAcquiredWait := &sync.WaitGroup{} + leaseAcquiredWait.Add(1) + leaseReleasedWait := &sync.WaitGroup{} + leaseReleasedWait.Add(1) + + // My assumption is that OnStoppedLeading is guaranteed to + // be called after OnStartedLeading and we don't need to protect this + // shared state with a mutex. + var leaderContext context.Context + + leaderElector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: resourceLock, + LeaseDuration: *ctrlOptions.LeaseDuration, + RenewDeadline: *ctrlOptions.RenewDeadline, + RetryPeriod: *ctrlOptions.RetryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + log.V(Status).Info("Elected leader") + leaseAcquiredWait.Done() + leaderContext = ctx + }, + OnStoppedLeading: func() { + leaseReleasedWait.Done() + + exitCode := 1 + select { + case <-leaderContext.Done(): + exitCode = 0 // done is closed + default: + } + + if exitCode == 0 { + log.V(Status).Info("Lost leader due to cooperative lease release") + } else { + log.V(Status).Info("Lost leader") + } + os.Exit(exitCode) + }, + }, + ReleaseOnCancel: ctrlOptions.LeaderElectionReleaseOnCancel, + Name: ctrlOptions.LeaderElectionID, + }) + if err != nil { + return nil, err + } + + return &LeaderElector{ + Elector: leaderElector, + LeaseAcquired: leaseAcquiredWait, + LeaseReleased: leaseReleasedWait, + }, nil +} + type Manager struct { - logger logr.Logger - kubeClient kubeclient.Client + logger logr.Logger + kubeClient kubeclient.Client + leaderElection *LeaderElector crds []apiextensions.CustomResourceDefinition } -func NewManager(logger logr.Logger, kubeClient kubeclient.Client) *Manager { +// NewManager creates a new CRD manager. +// The leaderElection argument is optional, but strongly recommended. +func NewManager(logger logr.Logger, kubeClient kubeclient.Client, leaderElection *LeaderElector) *Manager { return &Manager{ - logger: logger, - kubeClient: kubeClient, + logger: logger, + kubeClient: kubeClient, + leaderElection: leaderElection, } } -func (m *Manager) ListOperatorCRDs(ctx context.Context) ([]apiextensions.CustomResourceDefinition, error) { +func (m *Manager) ListCRDs(ctx context.Context) ([]apiextensions.CustomResourceDefinition, error) { list := apiextensions.CustomResourceDefinitionList{} selector := labels.NewSelector() @@ -226,35 +314,52 @@ func (m *Manager) DetermineCRDsToInstallOrUpgrade( return results, nil } -func (m *Manager) ApplyCRDs( +func (m *Manager) applyCRDs( ctx context.Context, + goalCRDs []apiextensions.CustomResourceDefinition, instructions []*CRDInstallationInstruction, + options Options, ) error { - var instructionsToApply []*CRDInstallationInstruction - - for _, item := range instructions { - apply, reason := item.ShouldApply() - if apply { - instructionsToApply = append(instructionsToApply, item) - m.logger.V(Verbose).Info( - "Will update CRD", - "crd", item.CRD.Name, - "diffResult", item.DiffResult, - "filterReason", item.FilterReason, - "reason", reason) - } else { - m.logger.V(Verbose).Info( - "Will NOT update CRD", - "crd", item.CRD.Name, - "reason", reason) - } - } + instructionsToApply := m.filterInstallationInstructions(instructions, true) if len(instructionsToApply) == 0 { m.logger.V(Status).Info("Successfully reconciled CRDs because there were no CRDs to update.") return nil } + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if m.leaderElection != nil { + m.logger.V(Status).Info("Acquiring leader lock...") + go m.leaderElection.Elector.Run(ctx) + m.leaderElection.LeaseAcquired.Wait() // Wait for lease to be acquired + + // If lease was acquired we always want to wait til it's released, but defers run in LIFO order + // so we need to make sure that the ctx is cancelled first here + defer func() { + cancel() + m.leaderElection.LeaseReleased.Wait() + }() + + // Double-checked locking, we need to make sure once we have the lock there's still work to do, as it may + // already have been done while we were waiting for the lock. + m.logger.V(Status).Info("Double-checked locking - ensure there's still CRDs to apply...") + existingCRDs, err := m.ListCRDs(ctx) + if err != nil { + return eris.Wrap(err, "failed to list current CRDs") + } + instructions, err = m.DetermineCRDsToInstallOrUpgrade(goalCRDs, existingCRDs, options.CRDPatterns) + if err != nil { + return eris.Wrap(err, "failed to determine CRDs to apply") + } + instructionsToApply = m.filterInstallationInstructions(instructions, false) + if len(instructionsToApply) == 0 { + m.logger.V(Status).Info("Successfully reconciled CRDs because there were no CRDs to update.") + return nil + } + } + m.logger.V(Status).Info("Will apply CRDs", "count", len(instructionsToApply)) i := 0 @@ -286,6 +391,13 @@ func (m *Manager) ApplyCRDs( m.logger.V(Debug).Info("Successfully applied CRD", "name", instruction.CRD.Name, "result", result) } + // Cancel the context, and wait for the lease to complete + if m.leaderElection != nil { + m.logger.V(Info).Info("Giving up leadership lease") + cancel() + m.leaderElection.LeaseReleased.Wait() + } + // If we make it to here, we have successfully updated all the CRDs we needed to. We need to kill the pod and let it restart so // that the new shape CRDs can be reconciled. m.logger.V(Status).Info("Restarting operator pod after updating CRDs", "count", len(instructionsToApply)) @@ -295,6 +407,39 @@ func (m *Manager) ApplyCRDs( return nil } +type Options struct { + Path string + Namespace string + CRDPatterns string + ExistingCRDs []apiextensions.CustomResourceDefinition +} + +func (m *Manager) Install(ctx context.Context, options Options) error { + goalCRDs, err := m.LoadOperatorCRDs(options.Path, options.Namespace) + if err != nil { + return eris.Wrap(err, "failed to load CRDs from disk") + } + + installationInstructions, err := m.DetermineCRDsToInstallOrUpgrade(goalCRDs, options.ExistingCRDs, options.CRDPatterns) + if err != nil { + return eris.Wrap(err, "failed to determine CRDs to apply") + } + + included := IncludedCRDs(installationInstructions) + if len(included) == 0 { + return eris.New("No existing CRDs in cluster and no --crd-pattern specified") + } + + // Note that this step will restart the pod when it succeeds + // if any CRDs were applied. + err = m.applyCRDs(ctx, goalCRDs, installationInstructions, options) + if err != nil { + return eris.Wrap(err, "failed to apply CRDs") + } + + return nil +} + func (m *Manager) loadCRDs(path string) ([]apiextensions.CustomResourceDefinition, error) { // Expectation is that every file in this folder is a CRD entries, err := os.ReadDir(path) @@ -329,6 +474,34 @@ func (m *Manager) loadCRDs(path string) ([]apiextensions.CustomResourceDefinitio return results, nil } +func (m *Manager) filterInstallationInstructions(instructions []*CRDInstallationInstruction, log bool) []*CRDInstallationInstruction { + var instructionsToApply []*CRDInstallationInstruction + + for _, item := range instructions { + apply, reason := item.ShouldApply() + if apply { + instructionsToApply = append(instructionsToApply, item) + if log { + m.logger.V(Verbose).Info( + "Will update CRD", + "crd", item.CRD.Name, + "diffResult", item.DiffResult, + "filterReason", item.FilterReason, + "reason", reason) + } + } else { + if log { + m.logger.V(Verbose).Info( + "Will NOT update CRD", + "crd", item.CRD.Name, + "reason", reason) + } + } + } + + return instructionsToApply +} + func (m *Manager) fixCRDNamespaceRefs(crds []apiextensions.CustomResourceDefinition, namespace string) []apiextensions.CustomResourceDefinition { results := make([]apiextensions.CustomResourceDefinition, 0, len(crds)) diff --git a/v2/internal/crdmanagement/manager_test.go b/v2/internal/crdmanagement/manager_test.go index b4577aca85..ed1bd4890b 100644 --- a/v2/internal/crdmanagement/manager_test.go +++ b/v2/internal/crdmanagement/manager_test.go @@ -44,7 +44,7 @@ func Test_LoadCRDs(t *testing.T) { crdPath := filepath.Join(dir, "crd.yaml") g.Expect(os.WriteFile(crdPath, bytes, 0o600)).To(Succeed()) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) loadedCRDs, err := crdManager.LoadOperatorCRDs(dir, "azureserviceoperator-system") g.Expect(err).ToNot(HaveOccurred()) @@ -83,7 +83,7 @@ func Test_LoadCRDs_FixesNamespace(t *testing.T) { crdPath := filepath.Join(dir, "crd.yaml") g.Expect(os.WriteFile(crdPath, bytes, 0o600)).To(Succeed()) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) loadedCRDs, err := crdManager.LoadOperatorCRDs(dir, "other-namespace") g.Expect(err).ToNot(HaveOccurred()) @@ -108,7 +108,7 @@ func Test_FindMatchingCRDs_EqualCRDsCompareAsEqual(t *testing.T) { goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) matching := crdManager.FindMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -124,7 +124,7 @@ func Test_FindMatchingCRDs_MissingCRD(t *testing.T) { goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) matching := crdManager.FindMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -171,7 +171,7 @@ func Test_FindMatchingCRDs_CRDsWithDifferentConversionsCompareAsEqual(t *testing goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) matching := crdManager.FindMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -194,7 +194,7 @@ func Test_FindNonMatchingCRDs_EqualCRDsCompareAsEqual(t *testing.T) { goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) nonMatching := crdManager.FindNonMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -210,7 +210,7 @@ func Test_FindNonMatchingCRDs_MissingCRD(t *testing.T) { goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) nonMatching := crdManager.FindNonMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -257,7 +257,7 @@ func Test_FindNonMatchingCRDs_CRDsWithDifferentConversionsCompareAsEqual(t *test goal := []apiextensions.CustomResourceDefinition{goalCRD} logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) nonMatching := crdManager.FindNonMatchingCRDs(existing, goal, crdmanagement.SpecEqual) @@ -383,7 +383,7 @@ func Test_DetermineCRDsToInstallOrUpgrade(t *testing.T) { g := NewGomegaWithT(t) logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, nil) + crdManager := crdmanagement.NewManager(logger, nil, nil) instructions, err := crdManager.DetermineCRDsToInstallOrUpgrade(c.goal, c.existing, c.patterns) g.Expect(err).ToNot(HaveOccurred()) @@ -413,9 +413,9 @@ func Test_ListCRDs_ListsOnlyCRDsMatchingLabel(t *testing.T) { g.Expect(kubeClient.Create(ctx, &crd3)).To(Succeed()) logger := testcommon.NewTestLogger(t) - crdManager := crdmanagement.NewManager(logger, kubeClient) + crdManager := crdmanagement.NewManager(logger, kubeClient, nil) - crds, err := crdManager.ListOperatorCRDs(ctx) + crds, err := crdManager.ListCRDs(ctx) g.Expect(err).ToNot(HaveOccurred()) g.Expect(crds).To(HaveLen(1)) }