Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

KUBESAW-170: Replace custom ksctl adm restart logic with the one from kubectl rollout #70

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lithammer/dedent v1.1.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
Expand Down
1 change: 1 addition & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,7 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
Expand Down
153 changes: 6 additions & 147 deletions pkg/cmd/adm/restart.go
Original file line number Diff line number Diff line change
@@ -1,157 +1,16 @@
package adm

import (
"context"
"fmt"
"time"

"github.com/kubesaw/ksctl/pkg/client"
"github.com/kubesaw/ksctl/pkg/cmd/flags"
"github.com/kubesaw/ksctl/pkg/configuration"
clicontext "github.com/kubesaw/ksctl/pkg/context"
"github.com/kubesaw/ksctl/pkg/ioutils"
"github.com/kubesaw/ksctl/pkg/kubectl"

"github.com/spf13/cobra"
appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"k8s.io/cli-runtime/pkg/genericclioptions"
kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)

func NewRestartCmd() *cobra.Command {
var targetCluster string
command := &cobra.Command{
Use: "restart -t <cluster-name> <deployment-name>",
Short: "Restarts a deployment",
Long: `Restarts the deployment with the given name in the operator namespace.
If no deployment name is provided, then it lists all existing deployments in the namespace.`,
Args: cobra.RangeArgs(0, 1),
RunE: func(cmd *cobra.Command, args []string) error {
term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout)
ctx := clicontext.NewCommandContext(term, client.DefaultNewClient)
return restart(ctx, targetCluster, args...)
},
}
command.Flags().StringVarP(&targetCluster, "target-cluster", "t", "", "The target cluster")
flags.MustMarkRequired(command, "target-cluster")
return command
}

func restart(ctx *clicontext.CommandContext, clusterName string, deployments ...string) error {
cfg, err := configuration.LoadClusterConfig(ctx, clusterName)
if err != nil {
return err
}
cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI)
if err != nil {
return err
}

if len(deployments) == 0 {
err := printExistingDeployments(ctx.Terminal, cl, cfg.OperatorNamespace)
if err != nil {
ctx.Terminal.Printlnf("\nERROR: Failed to list existing deployments\n :%s", err.Error())
}
return fmt.Errorf("at least one deployment name is required, include one or more of the above deployments to restart")
}
deploymentName := deployments[0]

if !ctx.AskForConfirmation(
ioutils.WithMessagef("restart the deployment '%s' in namespace '%s'", deploymentName, cfg.OperatorNamespace)) {
return nil
}
return restartDeployment(ctx, cl, cfg.OperatorNamespace, deploymentName)
}

func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error {
namespacedName := types.NamespacedName{
Namespace: ns,
Name: deploymentName,
}

originalReplicas, err := scaleToZero(cl, namespacedName)
if err != nil {
if apierrors.IsNotFound(err) {
ctx.Printlnf("\nERROR: The given deployment '%s' wasn't found.", deploymentName)
return printExistingDeployments(ctx, cl, ns)
}
return err
}
ctx.Println("The deployment was scaled to 0")
if err := scaleBack(ctx, cl, namespacedName, originalReplicas); err != nil {
ctx.Printlnf("Scaling the deployment '%s' in namespace '%s' back to '%d' replicas wasn't successful", originalReplicas)
ctx.Println("Please, try to contact administrators to scale the deployment back manually")
return err
}

ctx.Printlnf("The deployment was scaled back to '%d'", originalReplicas)
return nil
}

func restartHostOperator(ctx *clicontext.CommandContext, hostClient runtimeclient.Client, hostNamespace string) error {
deployments := &appsv1.DeploymentList{}
if err := hostClient.List(context.TODO(), deployments,
runtimeclient.InNamespace(hostNamespace),
runtimeclient.MatchingLabels{"olm.owner.namespace": "toolchain-host-operator"}); err != nil {
return err
}
if len(deployments.Items) != 1 {
return fmt.Errorf("there should be a single deployment matching the label olm.owner.namespace=toolchain-host-operator in %s ns, but %d was found. "+
"It's not possible to restart the Host Operator deployment", hostNamespace, len(deployments.Items))
}

return restartDeployment(ctx, hostClient, hostNamespace, deployments.Items[0].Name)
}

func printExistingDeployments(term ioutils.Terminal, cl runtimeclient.Client, ns string) error {
deployments := &appsv1.DeploymentList{}
if err := cl.List(context.TODO(), deployments, runtimeclient.InNamespace(ns)); err != nil {
return err
}
deploymentList := "\n"
for _, deployment := range deployments.Items {
deploymentList += fmt.Sprintf("%s\n", deployment.Name)
}
term.PrintContextSeparatorWithBodyf(deploymentList, "Existing deployments in %s namespace", ns)
return nil
}

func scaleToZero(cl runtimeclient.Client, namespacedName types.NamespacedName) (int32, error) {
// get the deployment
deployment := &appsv1.Deployment{}
if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil {
return 0, err
}
// keep original number of replicas so we can bring it back
originalReplicas := *deployment.Spec.Replicas
zero := int32(0)
deployment.Spec.Replicas = &zero

// update the deployment so it scales to zero
return originalReplicas, cl.Update(context.TODO(), deployment)
}

func scaleBack(term ioutils.Terminal, cl runtimeclient.Client, namespacedName types.NamespacedName, originalReplicas int32) error {
return wait.Poll(500*time.Millisecond, 10*time.Second, func() (done bool, err error) {
term.Println("")
term.Printlnf("Trying to scale the deployment back to '%d'", originalReplicas)
// get the updated
deployment := &appsv1.Deployment{}
if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil {
return false, err
}
// check if the replicas number wasn't already reset by a controller
if *deployment.Spec.Replicas == originalReplicas {
return true, nil
}
// set the original
deployment.Spec.Replicas = &originalReplicas
// and update to scale back
if err := cl.Update(context.TODO(), deployment); err != nil {
term.Printlnf("error updating Deployment '%s': %s. Will retry again...", namespacedName.Name, err.Error())
return false, nil
}
return true, nil
return kubectl.SetUpKubectlCmd(func(factory cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
return kubectlrollout.NewCmdRolloutRestart(factory, ioStreams)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

as I mentioned in our call and also in the jira story, the command should also wait until the rollout is complete

})
}
Loading
Loading