From 6360fa77cdba17f862e4cc18e3980e805cf62138 Mon Sep 17 00:00:00 2001 From: Tarun Gupta Akirala Date: Fri, 20 Jan 2023 16:37:14 -0800 Subject: [PATCH] :sparkles: feat: accept resource mutators in Move operation Signed-off-by: Tarun Gupta Akirala --- api/v1beta1/cluster_types.go | 3 + api/v1beta1/clusterclass_types.go | 3 + cmd/clusterctl/client/cluster/mover.go | 128 ++++++++++++---- cmd/clusterctl/client/cluster/mover_test.go | 159 +++++++++++++++++++- cmd/clusterctl/client/move.go | 6 +- cmd/clusterctl/client/move_test.go | 2 +- 6 files changed, 265 insertions(+), 36 deletions(-) diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index 4e5adab14073..06da650359e6 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -34,6 +34,9 @@ const ( // ClusterFinalizer is the finalizer used by the cluster controller to // cleanup the cluster resources when a Cluster is being deleted. ClusterFinalizer = "cluster.cluster.x-k8s.io" + + // ClusterKind represents the Kind of Cluster. + ClusterKind = "Cluster" ) // ANCHOR: ClusterSpec diff --git a/api/v1beta1/clusterclass_types.go b/api/v1beta1/clusterclass_types.go index b26eac69539e..12fb52b5590e 100644 --- a/api/v1beta1/clusterclass_types.go +++ b/api/v1beta1/clusterclass_types.go @@ -25,6 +25,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) +// ClusterClassKind represents the Kind of ClusterClass. +const ClusterClassKind = "ClusterClass" + // +kubebuilder:object:root=true // +kubebuilder:resource:path=clusterclasses,shortName=cc,scope=Namespaced,categories=cluster-api // +kubebuilder:storageversion diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 6d88b589197d..c576979a8a50 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -41,10 +42,13 @@ import ( "sigs.k8s.io/cluster-api/util/yaml" ) +// ResourceMutatorFunc holds the type for mutators to be applied on resources during a move operation. +type ResourceMutatorFunc func(u *unstructured.Unstructured) error + // ObjectMover defines methods for moving Cluster API objects to another management cluster. type ObjectMover interface { // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. - Move(namespace string, toCluster Client, dryRun bool) error + Move(namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error // ToDirectory writes all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target directory. ToDirectory(namespace string, directory string) error @@ -63,7 +67,7 @@ type objectMover struct { // ensure objectMover implements the ObjectMover interface. var _ ObjectMover = &objectMover{} -func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool) error { +func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error { log := logf.Log log.Info("Performing move...") o.dryRun = dryRun @@ -91,7 +95,7 @@ func (o *objectMover) Move(namespace string, toCluster Client, dryRun bool) erro proxy = toCluster.Proxy() } - return o.move(objectGraph, proxy) + return o.move(objectGraph, proxy, mutators...) } func (o *objectMover) ToDirectory(namespace string, directory string) error { @@ -308,7 +312,7 @@ func getMachineObj(proxy Proxy, machine *node, machineObj *clusterv1.Machine) er } // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster. -func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { +func (o *objectMover) move(graph *objectGraph, toProxy Proxy, mutators ...ResourceMutatorFunc) error { log := logf.Log clusters := graph.getClusters() @@ -328,11 +332,9 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { return errors.Wrap(err, "error pausing ClusterClasses") } - // Ensure all the expected target namespaces are in place before creating objects. - log.V(1).Info("Creating target namespaces, if missing") - if err := o.ensureNamespaces(graph, toProxy); err != nil { - return err - } + // Nb. DO NOT call ensureNamespaces at this point because: + // - namespace will be ensured to exist before creating the resource. + // - If it's done here, we might create a namespace that can end up unused on target cluster (due to mutators). // Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners. // The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g. @@ -344,11 +346,15 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { // Create all objects group by group, ensuring all the ownerReferences are re-created. log.Info("Creating objects in the target cluster") for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ { - if err := o.createGroup(moveSequence.getGroup(groupIndex), toProxy); err != nil { + if err := o.createGroup(moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil { return err } } + // Nb. mutators used after this point (after creating the resources on target clusters) are mainly intended for + // using the right namespace to fetch the resource from the target cluster. + // mutators affecting non metadata fields are no-op after this point. + // Delete all objects group by group in reverse order. log.Info("Deleting objects from the source cluster") for groupIndex := len(moveSequence.groups) - 1; groupIndex >= 0; groupIndex-- { @@ -359,13 +365,13 @@ func (o *objectMover) move(graph *objectGraph, toProxy Proxy) error { // Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target ClusterClasses") - if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun); err != nil { + if err := setClusterClassPause(toProxy, clusterClasses, false, o.dryRun, mutators...); err != nil { return errors.Wrap(err, "error resuming ClusterClasses") } // Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it. log.V(1).Info("Resuming the target cluster") - return setClusterPause(toProxy, clusters, false, o.dryRun) + return setClusterPause(toProxy, clusters, false, o.dryRun, mutators...) } func (o *objectMover) toDirectory(graph *objectGraph, directory string) error { @@ -532,7 +538,7 @@ func getMoveSequence(graph *objectGraph) *moveSequence { } // setClusterPause sets the paused field on nodes referring to Cluster objects. -func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) error { +func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -553,7 +559,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err // Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions. if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error { - return patchCluster(proxy, cluster, patch) + return patchCluster(proxy, cluster, patch, mutators...) }); err != nil { return errors.Wrapf(err, "error setting Cluster.Spec.Paused=%t", value) } @@ -562,7 +568,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err } // setClusterClassPause sets the paused annotation on nodes referring to ClusterClass objects. -func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRun bool) error { +func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRun bool, mutators ...ResourceMutatorFunc) error { if dryRun { return nil } @@ -580,7 +586,7 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu // Nb. The operation is wrapped in a retry loop to make setClusterClassPause more resilient to unexpected conditions. if err := retryWithExponentialBackoff(setClusterClassPauseBackoff, func() error { - return pauseClusterClass(proxy, clusterclass, pause) + return pauseClusterClass(proxy, clusterclass, pause, mutators...) }); err != nil { return errors.Wrapf(err, "error updating ClusterClass %s/%s", clusterclass.identity.Namespace, clusterclass.identity.Name) } @@ -589,19 +595,29 @@ func setClusterClassPause(proxy Proxy, clusterclasses []*node, pause bool, dryRu } // patchCluster applies a patch to a node referring to a Cluster object. -func patchCluster(proxy Proxy, cluster *node, patch client.Patch) error { +func patchCluster(proxy Proxy, n *node, patch client.Patch, mutators ...ResourceMutatorFunc) error { cFrom, err := proxy.NewClient() if err != nil { return err } - clusterObj := &clusterv1.Cluster{} - clusterObjKey := client.ObjectKey{ - Namespace: cluster.identity.Namespace, - Name: cluster.identity.Name, + // Since the patch has been generated already in caller of this function, the ONLY affect that mutators can have + // here is on namespace of the resource. + clusterObj, err := applyMutators(&clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }, + }, mutators...) + if err != nil { + return err } - if err := cFrom.Get(ctx, clusterObjKey, clusterObj); err != nil { + if err := cFrom.Get(ctx, client.ObjectKeyFromObject(clusterObj), clusterObj); err != nil { return errors.Wrapf(err, "error reading Cluster %s/%s", clusterObj.GetNamespace(), clusterObj.GetName()) } @@ -614,18 +630,35 @@ func patchCluster(proxy Proxy, cluster *node, patch client.Patch) error { return nil } -func pauseClusterClass(proxy Proxy, n *node, pause bool) error { +func pauseClusterClass(proxy Proxy, n *node, pause bool, mutators ...ResourceMutatorFunc) error { cFrom, err := proxy.NewClient() if err != nil { return errors.Wrap(err, "error creating client") } - // Get the ClusterClass from the server + // Get a mutated copy of the ClusterClass to identify the target namespace. + // The ClusterClass could have been moved to a different namespace after the move. + mutatedClusterClass, err := applyMutators(&clusterv1.ClusterClass{ + TypeMeta: metav1.TypeMeta{ + Kind: clusterv1.ClusterClassKind, + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.identity.Name, + Namespace: n.identity.Namespace, + }}, mutators...) + if err != nil { + return err + } + clusterClass := &clusterv1.ClusterClass{} + // Construct an object key using the mutatedClusterClass reflecting any changes to the namespace. clusterClassObjKey := client.ObjectKey{ - Name: n.identity.Name, - Namespace: n.identity.Namespace, + Name: mutatedClusterClass.GetName(), + Namespace: mutatedClusterClass.GetNamespace(), } + // Get a copy of the ClusterClass. + // This will ensure that any other changes from the mutator are ignored here as we work with a fresh copy of the cluster class. if err := cFrom.Get(ctx, clusterClassObjKey, clusterClass); err != nil { return errors.Wrapf(err, "error reading ClusterClass %s/%s", n.identity.Namespace, n.identity.Name) } @@ -735,7 +768,7 @@ func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { return err } - // If the namespace does not exists, create it. + // If the namespace does not exist, create it. ns = &corev1.Namespace{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -753,15 +786,18 @@ func (o *objectMover) ensureNamespace(toProxy Proxy, namespace string) error { } // createGroup creates all the Kubernetes objects into the target management cluster corresponding to the object graph nodes in a moveGroup. -func (o *objectMover) createGroup(group moveGroup, toProxy Proxy) error { +func (o *objectMover) createGroup(group moveGroup, toProxy Proxy, mutators ...ResourceMutatorFunc) error { createTargetObjectBackoff := newWriteBackoff() errList := []error{} + // Maintain a cache of namespaces that have been verified to already exist. + // Nb. This prevents us from making repetitive (and expensive) calls in listing all namespaces to ensure a namespace exists before creating a resource. + existingNamespaces := sets.New[string]() for _, nodeToCreate := range group { // Creates the Kubernetes object corresponding to the nodeToCreate. // Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions. err := retryWithExponentialBackoff(createTargetObjectBackoff, func() error { - return o.createTargetObject(nodeToCreate, toProxy) + return o.createTargetObject(nodeToCreate, toProxy, mutators, existingNamespaces) }) if err != nil { errList = append(errList, err) @@ -820,7 +856,7 @@ func (o *objectMover) restoreGroup(group moveGroup, toProxy Proxy) error { } // createTargetObject creates the Kubernetes object in the target Management cluster corresponding to the object graph node, taking care of restoring the OwnerReference with the owner nodes, if any. -func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) error { +func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy, mutators []ResourceMutatorFunc, existingNamespaces sets.Set[string]) error { log := logf.Log log.V(1).Info("Creating", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace) @@ -853,7 +889,7 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro // Removes current OwnerReferences obj.SetOwnerReferences(nil) - // Rebuild the owne reference chain + // Rebuild the owner reference chain o.buildOwnerChain(obj, nodeToCreate) // FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed. @@ -868,6 +904,17 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro return err } + obj, err = applyMutators(obj, mutators...) + if err != nil { + return err + } + // Applying mutators MAY change the namespace, so ensure the namespace exists before creating the resource. + if !nodeToCreate.isGlobal && !existingNamespaces.Has(obj.GetNamespace()) { + if err = o.ensureNamespace(toProxy, obj.GetNamespace()); err != nil { + return err + } + existingNamespaces.Insert(obj.GetNamespace()) + } oldManagedFields := obj.GetManagedFields() if err := cTo.Create(ctx, obj); err != nil { if !apierrors.IsAlreadyExists(err) { @@ -1188,3 +1235,22 @@ func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.M } return nil } + +func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) { + if object == nil { + return nil, nil + } + u := &unstructured.Unstructured{} + to, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object) + if err != nil { + return nil, err + } + u.SetUnstructuredContent(to) + for _, mutator := range mutators { + if err := mutator(u); err != nil { + return nil, errors.Wrapf(err, "error applying resource mutator to %q %s/%s", + u.GroupVersionKind(), object.GetNamespace(), object.GetName()) + } + } + return u, nil +} diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 56b10250d441..ade86d12733c 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -29,6 +29,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -49,6 +50,32 @@ var moveTests = []struct { wantMoveGroups [][]string wantErr bool }{ + { + name: "Cluster with ClusterClass", + fields: moveTestsFields{ + objs: func() []client.Object { + objs := test.NewFakeClusterClass("ns1", "class1").Objs() + objs = append(objs, test.NewFakeCluster("ns1", "foo").WithTopologyClass("class1").Objs()...) + return deduplicateObjects(objs) + }(), + }, + wantMoveGroups: [][]string{ + { // group 1 + "cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1", + }, + { // group 2 + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1", + "controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1", + "cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo", + }, + { // group 3 + "/v1, Kind=Secret, ns1/foo-ca", + "/v1, Kind=Secret, ns1/foo-kubeconfig", + "infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo", + }, + }, + wantErr: false, + }, { name: "Cluster", fields: moveTestsFields{ @@ -1112,7 +1139,7 @@ func Test_objectMover_move_dryRun(t *testing.T) { dryRun: true, } - err := mover.move(graph, toProxy) + err := mover.move(graph, toProxy, nil) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1183,8 +1210,8 @@ func Test_objectMover_move(t *testing.T) { mover := objectMover{ fromProxy: graph.proxy, } - err := mover.move(graph, toProxy) + if tt.wantErr { g.Expect(err).To(HaveOccurred()) return @@ -1235,6 +1262,127 @@ func Test_objectMover_move(t *testing.T) { } } +func Test_objectMover_move_with_Mutator(t *testing.T) { + // NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process + // we use same mutator function for all tests and validate outcome based on input. + for _, tt := range moveTests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + toNamespace := "foobar" + updateKnownKinds := map[string][][]string{ + "Cluster": { + {"metadata", "namespace"}, + {"spec", "controlPlaneRef", "namespace"}, + {"spec", "infrastructureRef", "namespace"}, + {"unknown", "field", "does", "not", "cause", "errors"}, + }, + "KubeadmControlPlane": { + {"spec", "machineTemplate", "infrastructureRef", "namespace"}, + }, + "Machine": { + {"spec", "bootstrap", "configRef", "namespace"}, + {"spec", "infrastructureRef", "namespace"}, + }, + } + var namespaceMutator ResourceMutatorFunc = func(u *unstructured.Unstructured) error { + if u == nil || u.Object == nil { + return nil + } + if u.GetNamespace() != "" { + u.SetNamespace(toNamespace) + } + if fields, knownKind := updateKnownKinds[u.GetKind()]; knownKind { + for _, nsField := range fields { + _, exists, err := unstructured.NestedFieldNoCopy(u.Object, nsField...) + g.Expect(err).To(BeNil()) + if exists { + g.Expect(unstructured.SetNestedField(u.Object, toNamespace, nsField...)).To(Succeed()) + } + } + } + return nil + } + + // Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test. + graph := getObjectGraphWithObjs(tt.fields.objs) + + // Get all the types to be considered for discovery + g.Expect(getFakeDiscoveryTypes(graph)).To(Succeed()) + + // trigger discovery the content of the source cluster + g.Expect(graph.Discovery("")).To(Succeed()) + + // gets a fakeProxy to an empty cluster with all the required CRDs + toProxy := getFakeProxyWithCRDs() + + // Run move with mutators + mover := objectMover{ + fromProxy: graph.proxy, + } + + err := mover.move(graph, toProxy, namespaceMutator) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + + // check that the objects are removed from the source cluster and are created in the target cluster + csFrom, err := graph.proxy.NewClient() + g.Expect(err).NotTo(HaveOccurred()) + + csTo, err := toProxy.NewClient() + g.Expect(err).NotTo(HaveOccurred()) + + for _, node := range graph.uidToNode { + key := client.ObjectKey{ + Namespace: node.identity.Namespace, + Name: node.identity.Name, + } + + // objects are deleted from the source cluster + oFrom := &unstructured.Unstructured{} + oFrom.SetAPIVersion(node.identity.APIVersion) + oFrom.SetKind(node.identity.Kind) + + err := csFrom.Get(ctx, key, oFrom) + if err == nil { + if !node.isGlobal && !node.isGlobalHierarchy { + t.Errorf("%v not deleted in source cluster", key) + continue + } + } else if !apierrors.IsNotFound(err) { + t.Errorf("error = %v when checking for %v deleted in source cluster", err, key) + continue + } + + // objects are created in the target cluster + oTo := &unstructured.Unstructured{} + oTo.SetAPIVersion(node.identity.APIVersion) + oTo.SetKind(node.identity.Kind) + if !node.isGlobal { + key.Namespace = toNamespace + } + + if err := csTo.Get(ctx, key, oTo); err != nil { + t.Errorf("error = %v when checking for %v created in target cluster", err, key) + continue + } + if fields, knownKind := updateKnownKinds[oTo.GetKind()]; knownKind { + for _, nsField := range fields { + value, exists, err := unstructured.NestedFieldNoCopy(oTo.Object, nsField...) + g.Expect(err).To(BeNil()) + if exists { + g.Expect(value).To(Equal(toNamespace)) + } + } + } + } + }) + } +} + func Test_objectMover_checkProvisioningCompleted(t *testing.T) { type fields struct { objs []client.Object @@ -1797,6 +1945,11 @@ func Test_createTargetObject(t *testing.T) { }, }, want: func(g *WithT, toClient client.Client) { + ns := &corev1.Namespace{} + nsKey := client.ObjectKey{ + Name: "ns1", + } + g.Expect(toClient.Get(ctx, nsKey, ns)).To(Succeed()) c := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: "ns1", @@ -1932,7 +2085,7 @@ func Test_createTargetObject(t *testing.T) { fromProxy: tt.args.fromProxy, } - err := mover.createTargetObject(tt.args.node, tt.args.toProxy) + err := mover.createTargetObject(tt.args.node, tt.args.toProxy, nil, sets.New[string]()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) return diff --git a/cmd/clusterctl/client/move.go b/cmd/clusterctl/client/move.go index 32d90c65a578..38617143237c 100644 --- a/cmd/clusterctl/client/move.go +++ b/cmd/clusterctl/client/move.go @@ -38,6 +38,10 @@ type MoveOptions struct { // namespace will be used. Namespace string + // ExperimentalResourceMutatorFn accepts any number of resource mutator functions that are applied on all resources being moved. + // This is an experimental feature and is exposed only from the library and not (yet) through the CLI. + ExperimentalResourceMutators []cluster.ResourceMutatorFunc + // FromDirectory apply configuration from directory. FromDirectory string @@ -94,7 +98,7 @@ func (c *clusterctlClient) move(options MoveOptions) error { } } - return fromCluster.ObjectMover().Move(options.Namespace, toCluster, options.DryRun) + return fromCluster.ObjectMover().Move(options.Namespace, toCluster, options.DryRun, options.ExperimentalResourceMutators...) } func (c *clusterctlClient) fromDirectory(options MoveOptions) error { diff --git a/cmd/clusterctl/client/move_test.go b/cmd/clusterctl/client/move_test.go index a34ba55cc31b..aeafc33e6316 100644 --- a/cmd/clusterctl/client/move_test.go +++ b/cmd/clusterctl/client/move_test.go @@ -298,7 +298,7 @@ type fakeObjectMover struct { fromDirectoryErr error } -func (f *fakeObjectMover) Move(_ string, _ cluster.Client, _ bool) error { +func (f *fakeObjectMover) Move(_ string, _ cluster.Client, _ bool, _ ...cluster.ResourceMutatorFunc) error { return f.moveErr }