From 1aede9dcc5f227b23f982a528b7fbb5cdbbbf2e9 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Mon, 26 Aug 2024 16:00:38 +0530 Subject: [PATCH 1/2] KUBESAW-170: Replace custom ksctl adm restart logic with the one from kubectl rollout Signed-off-by: Feny Mehta --- go.mod | 1 + go.sum | 1 + pkg/cmd/adm/restart.go | 52 ++--------- pkg/cmd/adm/restart_test.go | 129 --------------------------- pkg/cmd/describe.go | 4 +- pkg/cmd/get.go | 3 +- pkg/cmd/logs.go | 3 +- pkg/{cmd => kubectl}/base_kubectl.go | 8 +- 8 files changed, 20 insertions(+), 181 deletions(-) rename pkg/{cmd => kubectl}/base_kubectl.go (92%) diff --git a/go.mod b/go.mod index 7bcbe05..89cfa55 100644 --- a/go.mod +++ b/go.mod @@ -83,6 +83,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/lithammer/dedent v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-isatty v0.0.18 // indirect diff --git a/go.sum b/go.sum index d28ef87..0ace6cd 100644 --- a/go.sum +++ b/go.sum @@ -439,6 +439,7 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index dcd65f0..589c74b 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -5,63 +5,25 @@ import ( "fmt" "time" - "github.com/kubesaw/ksctl/pkg/client" - "github.com/kubesaw/ksctl/pkg/cmd/flags" - "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" "github.com/kubesaw/ksctl/pkg/ioutils" + "github.com/kubesaw/ksctl/pkg/kubectl" "github.com/spf13/cobra" appsv1 "k8s.io/api/apps/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/genericclioptions" + kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" + cmdutil "k8s.io/kubectl/pkg/cmd/util" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) func NewRestartCmd() *cobra.Command { - var targetCluster string - command := &cobra.Command{ - Use: "restart -t ", - Short: "Restarts a deployment", - Long: `Restarts the deployment with the given name in the operator namespace. -If no deployment name is provided, then it lists all existing deployments in the namespace.`, - Args: cobra.RangeArgs(0, 1), - RunE: func(cmd *cobra.Command, args []string) error { - term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) - ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return restart(ctx, targetCluster, args...) - }, - } - command.Flags().StringVarP(&targetCluster, "target-cluster", "t", "", "The target cluster") - flags.MustMarkRequired(command, "target-cluster") - return command -} - -func restart(ctx *clicontext.CommandContext, clusterName string, deployments ...string) error { - cfg, err := configuration.LoadClusterConfig(ctx, clusterName) - if err != nil { - return err - } - cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) - if err != nil { - return err - } - - if len(deployments) == 0 { - err := printExistingDeployments(ctx.Terminal, cl, cfg.OperatorNamespace) - if err != nil { - ctx.Terminal.Printlnf("\nERROR: Failed to list existing deployments\n :%s", err.Error()) - } - return fmt.Errorf("at least one deployment name is required, include one or more of the above deployments to restart") - } - deploymentName := deployments[0] - - if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the deployment '%s' in namespace '%s'", deploymentName, cfg.OperatorNamespace)) { - return nil - } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, deploymentName) + return kubectl.SetUpKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + return kubectlrollout.NewCmdRolloutRestart(factory, ioStreams) + }) } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error { diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 32197c3..f352fb8 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -2,7 +2,6 @@ package adm import ( "context" - "fmt" "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" @@ -18,134 +17,6 @@ import ( runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestRestartDeployment(t *testing.T) { - // given - SetFileConfig(t, Host(), Member()) - - for _, clusterName := range []string{"host", "member1"} { - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member - } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", - } - term := NewFakeTerminalWithResponse("Y") - - t.Run("restart is successful for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 2, numberOfUpdateCalls) - }) - - t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName) - - // then - require.EqualError(t, err, "at least one deployment name is required, include one or more of the above deployments to restart") - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") - }) - - t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { - return fmt.Errorf("some error") - } - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.Error(t, err) - fakeClient.MockGet = nil - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - }) - - t.Run("restart fails - deployment not found for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "wrong-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") - }) - } -} - -func TestRestartDeploymentWithInsufficientPermissions(t *testing.T) { - // given - SetFileConfig(t, Host(NoToken()), Member(NoToken())) - for _, clusterName := range []string{"host", "member1"} { - // given - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member - } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", - } - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.Error(t, err) - assert.Equal(t, 0, numberOfUpdateCalls) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - } -} - func TestRestartHostOperator(t *testing.T) { // given SetFileConfig(t, Host()) diff --git a/pkg/cmd/describe.go b/pkg/cmd/describe.go index 41eed3f..ace9635 100644 --- a/pkg/cmd/describe.go +++ b/pkg/cmd/describe.go @@ -1,14 +1,16 @@ package cmd import ( + "github.com/kubesaw/ksctl/pkg/kubectl" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" kubectldesc "k8s.io/kubectl/pkg/cmd/describe" + cmdutil "k8s.io/kubectl/pkg/cmd/util" ) func NewDescribeCmd() *cobra.Command { - return setupKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + return kubectl.SetUpKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { return kubectldesc.NewCmdDescribe("ksctl", factory, ioStreams) }) } diff --git a/pkg/cmd/get.go b/pkg/cmd/get.go index 00bccee..0dd151d 100644 --- a/pkg/cmd/get.go +++ b/pkg/cmd/get.go @@ -1,6 +1,7 @@ package cmd import ( + "github.com/kubesaw/ksctl/pkg/kubectl" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" kubectlget "k8s.io/kubectl/pkg/cmd/get" @@ -8,7 +9,7 @@ import ( ) func NewGetCmd() *cobra.Command { - return setupKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + return kubectl.SetUpKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { return kubectlget.NewCmdGet("ksctl", factory, ioStreams) }) } diff --git a/pkg/cmd/logs.go b/pkg/cmd/logs.go index b884e0b..01a4e05 100644 --- a/pkg/cmd/logs.go +++ b/pkg/cmd/logs.go @@ -1,6 +1,7 @@ package cmd import ( + "github.com/kubesaw/ksctl/pkg/kubectl" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" kubectllogs "k8s.io/kubectl/pkg/cmd/logs" @@ -8,7 +9,7 @@ import ( ) func NewLogsCmd() *cobra.Command { - return setupKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { + return kubectl.SetUpKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { return kubectllogs.NewCmdLogs(factory, ioStreams) }) } diff --git a/pkg/cmd/base_kubectl.go b/pkg/kubectl/base_kubectl.go similarity index 92% rename from pkg/cmd/base_kubectl.go rename to pkg/kubectl/base_kubectl.go index c684299..61bb283 100644 --- a/pkg/cmd/base_kubectl.go +++ b/pkg/kubectl/base_kubectl.go @@ -1,4 +1,4 @@ -package cmd +package kubectl import ( "fmt" @@ -15,10 +15,10 @@ import ( cmdutil "k8s.io/kubectl/pkg/cmd/util" ) -type newCmd func(cmdutil.Factory, genericclioptions.IOStreams) *cobra.Command +type NewCmd func(cmdutil.Factory, genericclioptions.IOStreams) *cobra.Command -// setupKubectlCmd takes care of setting up the flags and PreRunE func on the given Kubectl command -func setupKubectlCmd(newCmd newCmd) *cobra.Command { +// SetupKubectlCmd takes care of setting up the flags and PreRunE func on the given Kubectl command +func SetUpKubectlCmd(newCmd NewCmd) *cobra.Command { kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) ioStreams := genericclioptions.IOStreams{ From 4798d12fa9c631b03c7dce2ef7a3102d7983fb5b Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Mon, 26 Aug 2024 16:48:39 +0530 Subject: [PATCH 2/2] restart host operator using rollout Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 103 --------------------- pkg/cmd/adm/restart_test.go | 123 -------------------------- pkg/cmd/adm/unregister_member.go | 32 ++++++- pkg/cmd/adm/unregister_member_test.go | 108 ++++++++++++++++++++++ 4 files changed, 139 insertions(+), 227 deletions(-) delete mode 100644 pkg/cmd/adm/restart_test.go diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 589c74b..7c138e4 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -1,23 +1,12 @@ package adm import ( - "context" - "fmt" - "time" - - clicontext "github.com/kubesaw/ksctl/pkg/context" - "github.com/kubesaw/ksctl/pkg/ioutils" "github.com/kubesaw/ksctl/pkg/kubectl" "github.com/spf13/cobra" - appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cli-runtime/pkg/genericclioptions" kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" cmdutil "k8s.io/kubectl/pkg/cmd/util" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) func NewRestartCmd() *cobra.Command { @@ -25,95 +14,3 @@ func NewRestartCmd() *cobra.Command { return kubectlrollout.NewCmdRolloutRestart(factory, ioStreams) }) } - -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error { - namespacedName := types.NamespacedName{ - Namespace: ns, - Name: deploymentName, - } - - originalReplicas, err := scaleToZero(cl, namespacedName) - if err != nil { - if apierrors.IsNotFound(err) { - ctx.Printlnf("\nERROR: The given deployment '%s' wasn't found.", deploymentName) - return printExistingDeployments(ctx, cl, ns) - } - return err - } - ctx.Println("The deployment was scaled to 0") - if err := scaleBack(ctx, cl, namespacedName, originalReplicas); err != nil { - ctx.Printlnf("Scaling the deployment '%s' in namespace '%s' back to '%d' replicas wasn't successful", originalReplicas) - ctx.Println("Please, try to contact administrators to scale the deployment back manually") - return err - } - - ctx.Printlnf("The deployment was scaled back to '%d'", originalReplicas) - return nil -} - -func restartHostOperator(ctx *clicontext.CommandContext, hostClient runtimeclient.Client, hostNamespace string) error { - deployments := &appsv1.DeploymentList{} - if err := hostClient.List(context.TODO(), deployments, - runtimeclient.InNamespace(hostNamespace), - runtimeclient.MatchingLabels{"olm.owner.namespace": "toolchain-host-operator"}); err != nil { - return err - } - if len(deployments.Items) != 1 { - return fmt.Errorf("there should be a single deployment matching the label olm.owner.namespace=toolchain-host-operator in %s ns, but %d was found. "+ - "It's not possible to restart the Host Operator deployment", hostNamespace, len(deployments.Items)) - } - - return restartDeployment(ctx, hostClient, hostNamespace, deployments.Items[0].Name) -} - -func printExistingDeployments(term ioutils.Terminal, cl runtimeclient.Client, ns string) error { - deployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), deployments, runtimeclient.InNamespace(ns)); err != nil { - return err - } - deploymentList := "\n" - for _, deployment := range deployments.Items { - deploymentList += fmt.Sprintf("%s\n", deployment.Name) - } - term.PrintContextSeparatorWithBodyf(deploymentList, "Existing deployments in %s namespace", ns) - return nil -} - -func scaleToZero(cl runtimeclient.Client, namespacedName types.NamespacedName) (int32, error) { - // get the deployment - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return 0, err - } - // keep original number of replicas so we can bring it back - originalReplicas := *deployment.Spec.Replicas - zero := int32(0) - deployment.Spec.Replicas = &zero - - // update the deployment so it scales to zero - return originalReplicas, cl.Update(context.TODO(), deployment) -} - -func scaleBack(term ioutils.Terminal, cl runtimeclient.Client, namespacedName types.NamespacedName, originalReplicas int32) error { - return wait.Poll(500*time.Millisecond, 10*time.Second, func() (done bool, err error) { - term.Println("") - term.Printlnf("Trying to scale the deployment back to '%d'", originalReplicas) - // get the updated - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return false, err - } - // check if the replicas number wasn't already reset by a controller - if *deployment.Spec.Replicas == originalReplicas { - return true, nil - } - // set the original - deployment.Spec.Replicas = &originalReplicas - // and update to scale back - if err := cl.Update(context.TODO(), deployment); err != nil { - term.Printlnf("error updating Deployment '%s': %s. Will retry again...", namespacedName.Name, err.Error()) - return false, nil - } - return true, nil - }) -} diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go deleted file mode 100644 index f352fb8..0000000 --- a/pkg/cmd/adm/restart_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package adm - -import ( - "context" - "testing" - - "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/kubesaw/ksctl/pkg/configuration" - clicontext "github.com/kubesaw/ksctl/pkg/context" - . "github.com/kubesaw/ksctl/pkg/test" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" -) - -func TestRestartHostOperator(t *testing.T) { - // given - SetFileConfig(t, Host()) - term := NewFakeTerminalWithResponse("") // it should not read the input - cfg, err := configuration.LoadClusterConfig(term, "host") - require.NoError(t, err) - namespacedName := types.NamespacedName{ - Namespace: "toolchain-host-operator", - Name: "host-operator-controller-manager", - } - - t.Run("host deployment is present and restart successful", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 2, numberOfUpdateCalls) - }) - - t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) - }) - - t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - deployment2 := deployment.DeepCopy() - deployment2.Name = "another" - newClient, fakeClient := NewFakeClients(t, deployment, deployment2) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) - }) -} - -func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespacedName.Namespace, - Name: namespacedName.Name, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - }, - } -} - -func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - deployment, ok := obj.(*appsv1.Deployment) - require.True(t, ok) - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - return fakeClient.Client.Update(ctx, obj, opts...) - } -} - -func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { - // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") - // on the other calls, it's the opposite - if *numberOfUpdateCalls == 0 { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) - // check the requested deployment's replicas field - assert.Equal(t, int32(0), *deployment.Spec.Replicas) - } else { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) - // check the requested deployment's replicas field - assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) - } - *numberOfUpdateCalls++ -} diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index fd177b7..1b94ddd 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -3,6 +3,7 @@ package adm import ( "context" "fmt" + "os" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/kubesaw/ksctl/pkg/client" @@ -12,6 +13,9 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/types" + "k8s.io/cli-runtime/pkg/genericclioptions" + kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" + cmdutil "k8s.io/kubectl/pkg/cmd/util" ) func NewUnregisterMemberCmd() *cobra.Command { @@ -62,5 +66,31 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - return restartHostOperator(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) + return restartHostOperator(hostClusterConfig) +} + +func restartHostOperator(hostConfig configuration.ClusterConfig) error { + kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() + hFactory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) + ioStreams := genericclioptions.IOStreams{ + In: nil, // Not to forward the Standard Input + Out: os.Stdout, + ErrOut: os.Stderr, + } + + hArgs := []string{"", "deployments", + "--namespace", hostConfig.OperatorNamespace, + "--server", hostConfig.ServerAPI, + "--token", hostConfig.Token} + + o := kubectlrollout.NewRolloutRestartOptions(ioStreams) + + if err := o.Complete(hFactory, nil, hArgs); err != nil { + panic(err) + } + o.LabelSelector = "olm.owner.namespace=toolchain-host-operator" + if err := o.Validate(); err != nil { + panic(err) + } + return o.RunRestart() } diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 72c2392..b02e889 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -1,13 +1,19 @@ package adm import ( + "context" "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" + "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { @@ -121,3 +127,105 @@ func TestUnregisterMemberLacksPermissions(t *testing.T) { require.EqualError(t, err, "ksctl command failed: the token in your ksctl.yaml file is missing") AssertToolchainClusterSpec(t, fakeClient, toolchainCluster) } + +func TestRestartHostOperator(t *testing.T) { + // given + SetFileConfig(t, Host()) + term := NewFakeTerminalWithResponse("") // it should not read the input + cfg, err := configuration.LoadClusterConfig(term, "host") + require.NoError(t, err) + namespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "host-operator-controller-manager", + } + + t.Run("host deployment is present and restart successful", func(t *testing.T) { + // given + deployment := newDeployment(namespacedName, 1) + deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} + _, fakeClient := NewFakeClients(t, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + + // when + err := restartHostOperator(cfg) + + // then + require.NoError(t, err) + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + assert.Equal(t, 2, numberOfUpdateCalls) + }) + + t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { + // given + deployment := newDeployment(namespacedName, 1) + _, fakeClient := NewFakeClients(t, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + + // when + err := restartHostOperator(cfg) + + // then + require.Error(t, err) + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + assert.Equal(t, 0, numberOfUpdateCalls) + }) + + t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { + // given + deployment := newDeployment(namespacedName, 1) + deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} + deployment2 := deployment.DeepCopy() + deployment2.Name = "another" + _, fakeClient := NewFakeClients(t, deployment, deployment2) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + + // when + err := restartHostOperator(cfg) + + // then + require.Error(t, err) + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + assert.Equal(t, 0, numberOfUpdateCalls) + }) +} + +func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + }, + } +} + +func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + deployment, ok := obj.(*appsv1.Deployment) + require.True(t, ok) + checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) + return fakeClient.Client.Update(ctx, obj, opts...) + } +} + +func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { + // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") + // on the other calls, it's the opposite + if *numberOfUpdateCalls == 0 { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) + // check the requested deployment's replicas field + assert.Equal(t, int32(0), *deployment.Spec.Replicas) + } else { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) + // check the requested deployment's replicas field + assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) + } + *numberOfUpdateCalls++ +}