From efa325b22da20b091cfb2b94588dbab4e9bb3939 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Mon, 16 Oct 2023 10:29:42 -0600 Subject: [PATCH 01/31] implement 'IsHelmVM' function based on presence of configmap if embedded-cluster-config exists in kube-system it is helmvm add node metrics add node pod capacity and list of pods to node metrics implement per-node metrics endpoint with podlist --- pkg/handlers/handlers.go | 2 + pkg/handlers/helmvm_get.go | 21 ++++++- pkg/handlers/interface.go | 1 + pkg/handlers/mock/mock.go | 12 ++++ pkg/helmvm/helmvm_node.go | 105 +++++++++++++++++++++++++++++++ pkg/helmvm/helmvm_nodes.go | 126 +++++++++++++++---------------------- pkg/helmvm/node_join.go | 7 ++- pkg/helmvm/types/types.go | 3 + pkg/helmvm/util.go | 36 ++++++++++- 9 files changed, 234 insertions(+), 79 deletions(-) create mode 100644 pkg/helmvm/helmvm_node.go diff --git a/pkg/handlers/handlers.go b/pkg/handlers/handlers.go index aa711b7b89..aca714d3b2 100644 --- a/pkg/handlers/handlers.go +++ b/pkg/handlers/handlers.go @@ -287,6 +287,8 @@ func RegisterSessionAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOT HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DeleteHelmVMNode)) r.Name("GetHelmVMNodes").Path("/api/v1/helmvm/nodes").Methods("GET"). HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNodes)) + r.Name("GetHelmVMNode").Path("/api/v1/helmvm/node/{nodeName}").Methods("GET"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNode)) // Prometheus r.Name("SetPrometheusAddress").Path("/api/v1/prometheus").Methods("POST"). diff --git a/pkg/handlers/helmvm_get.go b/pkg/handlers/helmvm_get.go index cd440d116f..4f736996ed 100644 --- a/pkg/handlers/helmvm_get.go +++ b/pkg/handlers/helmvm_get.go @@ -3,6 +3,7 @@ package handlers import ( "net/http" + "github.com/gorilla/mux" "github.com/replicatedhq/kots/pkg/helmvm" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" @@ -16,7 +17,7 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { return } - nodes, err := helmvm.GetNodes(client) + nodes, err := helmvm.GetNodes(r.Context(), client) if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) @@ -24,3 +25,21 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { } JSON(w, http.StatusOK, nodes) } + +func (h *Handler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { + client, err := k8sutil.GetClientset() + if err != nil { + logger.Error(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + nodeName := mux.Vars(r)["nodeName"] + node, err := helmvm.GetNode(r.Context(), client, nodeName) + if err != nil { + logger.Error(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + JSON(w, http.StatusOK, node) +} diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go index c6cb2a00db..b69ae161fd 100644 --- a/pkg/handlers/interface.go +++ b/pkg/handlers/interface.go @@ -144,6 +144,7 @@ type KOTSHandler interface { DrainHelmVMNode(w http.ResponseWriter, r *http.Request) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) + GetHelmVMNode(w http.ResponseWriter, r *http.Request) // Prometheus SetPrometheusAddress(w http.ResponseWriter, r *http.Request) diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go index cf9fe09ede..f496224c74 100644 --- a/pkg/handlers/mock/mock.go +++ b/pkg/handlers/mock/mock.go @@ -778,6 +778,18 @@ func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNodes(w, r interface{}) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNodes", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNodes), w, r) } +// GetHelmVMNodes mocks base method. +func (m *MockKOTSHandler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GetHelmVMNode", w, r) +} + +// GetHelmVMNode indicates an expected call of GetHelmVMNode. +func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNode(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNode), w, r) +} + // GetIdentityServiceConfig mocks base method. func (m *MockKOTSHandler) GetIdentityServiceConfig(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go new file mode 100644 index 0000000000..7805400c1d --- /dev/null +++ b/pkg/helmvm/helmvm_node.go @@ -0,0 +1,105 @@ +package helmvm + +import ( + "context" + "fmt" + "math" + "strconv" + + "github.com/replicatedhq/kots/pkg/helmvm/types" + "github.com/replicatedhq/kots/pkg/k8sutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + metricsv "k8s.io/metrics/pkg/client/clientset/versioned" +) + +// GetNode will get a node with stats and podlist +func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) (*types.Node, error) { + node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("get node %s: %w", nodeName, err) + } + + clientConfig, err := k8sutil.GetClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to get cluster config: %w", err) + } + + metricsClient, err := metricsv.NewForConfig(clientConfig) + if err != nil { + return nil, fmt.Errorf("failed to create metrics client: %w", err) + } + + nodePods, err := podsOnNode(ctx, client, nodeName) + if err != nil { + return nil, fmt.Errorf("pods per node: %w", err) + } + + cpuCapacity := types.CapacityAvailable{} + memoryCapacity := types.CapacityAvailable{} + podCapacity := types.CapacityAvailable{} + + memoryCapacity.Capacity = float64(node.Status.Capacity.Memory().Value()) / math.Pow(2, 30) // capacity in GB + + cpuCapacity.Capacity, err = strconv.ParseFloat(node.Status.Capacity.Cpu().String(), 64) + if err != nil { + return nil, fmt.Errorf("parse CPU capacity %q for node %s: %w", node.Status.Capacity.Cpu().String(), node.Name, err) + } + + podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value()) + + nodeMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("list pod metrics: %w", err) + } + + if nodeMetrics.Usage.Memory() != nil { + memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeMetrics.Usage.Memory().Value())/math.Pow(2, 30) + } + + if nodeMetrics.Usage.Cpu() != nil { + cpuCapacity.Available = cpuCapacity.Capacity - nodeMetrics.Usage.Cpu().AsApproximateFloat64() + } + + podCapacity.Available = podCapacity.Capacity - float64(len(nodePods)) + + nodeLabelArray := []string{} + for k, v := range node.Labels { + nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v)) + } + + return &types.Node{ + Name: node.Name, + IsConnected: isConnected(*node), + IsReady: isReady(*node), + IsPrimaryNode: isPrimary(*node), + CanDelete: node.Spec.Unschedulable && !isConnected(*node), + KubeletVersion: node.Status.NodeInfo.KubeletVersion, + CPU: cpuCapacity, + Memory: memoryCapacity, + Pods: podCapacity, + Labels: nodeLabelArray, + Conditions: findNodeConditions(node.Status.Conditions), + PodList: nodePods, + }, nil +} + +func podsOnNode(ctx context.Context, client kubernetes.Interface, nodeName string) ([]corev1.Pod, error) { + namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("list namespaces: %w", err) + } + + toReturn := []corev1.Pod{} + + for _, ns := range namespaces.Items { + nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName)}) + if err != nil { + return nil, fmt.Errorf("list pods on %s in namespace %s: %w", nodeName, ns.Name, err) + } + + toReturn = append(toReturn, nsPods.Items...) + } + return toReturn, nil +} diff --git a/pkg/helmvm/helmvm_nodes.go b/pkg/helmvm/helmvm_nodes.go index e00dca2108..f8bfefff4b 100644 --- a/pkg/helmvm/helmvm_nodes.go +++ b/pkg/helmvm/helmvm_nodes.go @@ -2,34 +2,43 @@ package helmvm import ( "context" - "crypto/tls" - "encoding/json" "fmt" - "io" "math" - "net/http" - "os" "strconv" - "time" "github.com/pkg/errors" "github.com/replicatedhq/kots/pkg/helmvm/types" - "github.com/replicatedhq/kots/pkg/logger" + "github.com/replicatedhq/kots/pkg/k8sutil" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - statsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) // GetNodes will get a list of nodes with stats -func GetNodes(client kubernetes.Interface) (*types.HelmVMNodes, error) { - nodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) +func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNodes, error) { + nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, errors.Wrap(err, "list nodes") } + clientConfig, err := k8sutil.GetClusterConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get cluster config") + } + + metricsClient, err := metricsv.NewForConfig(clientConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create metrics client") + } + toReturn := types.HelmVMNodes{} + nodePods, err := podsPerNode(ctx, client) + if err != nil { + return nil, errors.Wrap(err, "pods per node") + } + for _, node := range nodes.Items { cpuCapacity := types.CapacityAvailable{} memoryCapacity := types.CapacityAvailable{} @@ -44,32 +53,21 @@ func GetNodes(client kubernetes.Interface) (*types.HelmVMNodes, error) { podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value()) - nodeIP := "" - for _, address := range node.Status.Addresses { - if address.Type == corev1.NodeInternalIP { - nodeIP = address.Address - } + nodeMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrap(err, "list pod metrics") } - if nodeIP == "" { - logger.Infof("Did not find address for node %s, %+v", node.Name, node.Status.Addresses) - } else { - nodeMetrics, err := getNodeMetrics(nodeIP) - if err != nil { - logger.Infof("Got error retrieving stats for node %q: %v", node.Name, err) - } else { - if nodeMetrics.Node.Memory != nil && nodeMetrics.Node.Memory.AvailableBytes != nil { - memoryCapacity.Available = float64(*nodeMetrics.Node.Memory.AvailableBytes) / math.Pow(2, 30) - } - - if nodeMetrics.Node.CPU != nil && nodeMetrics.Node.CPU.UsageNanoCores != nil { - cpuCapacity.Available = cpuCapacity.Capacity - (float64(*nodeMetrics.Node.CPU.UsageNanoCores) / math.Pow(10, 9)) - } - - podCapacity.Available = podCapacity.Capacity - float64(len(nodeMetrics.Pods)) - } + if nodeMetrics.Usage.Memory() != nil { + memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeMetrics.Usage.Memory().Value())/math.Pow(2, 30) } + if nodeMetrics.Usage.Cpu() != nil { + cpuCapacity.Available = cpuCapacity.Capacity - nodeMetrics.Usage.Cpu().AsApproximateFloat64() + } + + podCapacity.Available = podCapacity.Capacity - float64(nodePods[node.Name]) + nodeLabelArray := []string{} for k, v := range node.Labels { nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v)) @@ -124,49 +122,36 @@ func findNodeConditions(conditions []corev1.NodeCondition) types.NodeConditions return discoveredConditions } -// get kubelet PKI info from /etc/kubernetes/pki/kubelet, use it to hit metrics server at `http://${nodeIP}:10255/stats/summary` -func getNodeMetrics(nodeIP string) (*statsv1alpha1.Summary, error) { - client := http.Client{ - Timeout: time.Second, +// podsPerNode returns a map of node names to the number of pods, across all namespaces +func podsPerNode(ctx context.Context, client kubernetes.Interface) (map[string]int, error) { + namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrap(err, "list namespaces") } - port := 10255 - // only use mutual TLS if client cert exists - _, err := os.ReadFile("/etc/kubernetes/pki/kubelet/client.crt") - if err == nil { - cert, err := tls.LoadX509KeyPair("/etc/kubernetes/pki/kubelet/client.crt", "/etc/kubernetes/pki/kubelet/client.key") - if err != nil { - return nil, errors.Wrap(err, "get client keypair") - } + toReturn := map[string]int{} - // this will leak memory - client.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{cert}, - InsecureSkipVerify: true, - }, + for _, ns := range namespaces.Items { + nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "list pods in namespace %s", ns.Name) } - port = 10250 - } - r, err := client.Get(fmt.Sprintf("https://%s:%d/stats/summary", nodeIP, port)) - if err != nil { - return nil, errors.Wrapf(err, "get node %s stats", nodeIP) - } - defer r.Body.Close() + for _, pod := range nsPods.Items { + pod := pod + if pod.Spec.NodeName == "" { + continue + } - body, err := io.ReadAll(r.Body) - if err != nil { - return nil, errors.Wrapf(err, "read node %s stats response", nodeIP) - } + if _, ok := toReturn[pod.Spec.NodeName]; !ok { + toReturn[pod.Spec.NodeName] = 0 + } - summary := statsv1alpha1.Summary{} - err = json.Unmarshal(body, &summary) - if err != nil { - return nil, errors.Wrapf(err, "parse node %s stats response", nodeIP) + toReturn[pod.Spec.NodeName]++ + } } - return &summary, nil + return toReturn, nil } func isConnected(node corev1.Node) bool { @@ -201,12 +186,3 @@ func isPrimary(node corev1.Node) bool { return false } - -func internalIP(node corev1.Node) string { - for _, address := range node.Status.Addresses { - if address.Type == corev1.NodeInternalIP { - return address.Address - } - } - return "" -} diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index 6aad6255a9..4f47a5128f 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -8,5 +8,10 @@ import ( // GenerateAddNodeCommand will generate the HelmVM node add command for a primary or secondary node func GenerateAddNodeCommand(client kubernetes.Interface, primary bool) ([]string, *time.Time, error) { - return nil, nil, nil + tomorrow := time.Now().Add(time.Hour * 24) + if primary { + return []string{"this is a primary join command string", "that can be multiple strings"}, &tomorrow, nil + } else { + return []string{"this is a secondary join command string", "that can be multiple strings"}, &tomorrow, nil + } } diff --git a/pkg/helmvm/types/types.go b/pkg/helmvm/types/types.go index c298dfbd93..78ea23f248 100644 --- a/pkg/helmvm/types/types.go +++ b/pkg/helmvm/types/types.go @@ -1,5 +1,7 @@ package types +import corev1 "k8s.io/api/core/v1" + type HelmVMNodes struct { Nodes []Node `json:"nodes"` HA bool `json:"ha"` @@ -18,6 +20,7 @@ type Node struct { Pods CapacityAvailable `json:"pods"` Labels []string `json:"labels"` Conditions NodeConditions `json:"conditions"` + PodList []corev1.Pod `json:"podList"` } type CapacityAvailable struct { diff --git a/pkg/helmvm/util.go b/pkg/helmvm/util.go index 7d2817f93e..5dd2cdc11f 100644 --- a/pkg/helmvm/util.go +++ b/pkg/helmvm/util.go @@ -1,13 +1,45 @@ package helmvm import ( + "context" + "fmt" + corev1 "k8s.io/api/core/v1" + + kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) +const configMapName = "embedded-cluster-config" +const configMapNamespace = "kube-system" + +// ReadConfigMap will read the Kurl config from a configmap +func ReadConfigMap(client kubernetes.Interface) (*corev1.ConfigMap, error) { + return client.CoreV1().ConfigMaps(configMapNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) +} + func IsHelmVM(clientset kubernetes.Interface) (bool, error) { - return false, nil + if clientset == nil { + return false, fmt.Errorf("clientset is nil") + } + + configMapExists := false + _, err := ReadConfigMap(clientset) + if err == nil { + configMapExists = true + } else if kuberneteserrors.IsNotFound(err) { + configMapExists = false + } else if kuberneteserrors.IsUnauthorized(err) { + configMapExists = false + } else if kuberneteserrors.IsForbidden(err) { + configMapExists = false + } else if err != nil { + return false, fmt.Errorf("failed to get embedded cluster configmap: %w", err) + } + + return configMapExists, nil } func IsHA(clientset kubernetes.Interface) (bool, error) { - return false, nil + return true, nil } From 4740d8467374d2afd8740385131974021e3bfc15 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Wed, 18 Oct 2023 11:21:02 -0600 Subject: [PATCH 02/31] generate a node join token (#4072) * generate a node join token * two mutexes, and do not restart successful pods * generate the full node join command * all controllers are also workers * allow arbitrary node roles * role is controller not controller+worker --- pkg/handlers/helmvm_node_join_command.go | 4 +- pkg/helmvm/jointoken.go | 25 +++ pkg/helmvm/node_join.go | 239 ++++++++++++++++++++++- pkg/helmvm/util.go | 2 +- 4 files changed, 261 insertions(+), 9 deletions(-) create mode 100644 pkg/helmvm/jointoken.go diff --git a/pkg/handlers/helmvm_node_join_command.go b/pkg/handlers/helmvm_node_join_command.go index 6604b659d9..a17c39c0ef 100644 --- a/pkg/handlers/helmvm_node_join_command.go +++ b/pkg/handlers/helmvm_node_join_command.go @@ -22,7 +22,7 @@ func (h *Handler) GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, return } - command, expiry, err := helmvm.GenerateAddNodeCommand(client, false) + command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, "worker") if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) @@ -42,7 +42,7 @@ func (h *Handler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r return } - command, expiry, err := helmvm.GenerateAddNodeCommand(client, true) + command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, "controller") if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/helmvm/jointoken.go b/pkg/helmvm/jointoken.go new file mode 100644 index 0000000000..d723f6f015 --- /dev/null +++ b/pkg/helmvm/jointoken.go @@ -0,0 +1,25 @@ +package helmvm + +import ( + "encoding/base64" + "encoding/json" + + "github.com/google/uuid" +) + +// joinToken is a struct that holds both the actual token and the cluster id. This is marshaled +// and base64 encoded and used as argument to the join command in the other nodes. +type joinToken struct { + ClusterID uuid.UUID `json:"clusterID"` + Token string `json:"token"` + Role string `json:"role"` +} + +// Encode encodes a JoinToken to base64. +func (j *joinToken) Encode() (string, error) { + b, err := json.Marshal(j) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index 4f47a5128f..4bbf1e197c 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -1,17 +1,244 @@ package helmvm import ( + "context" + "fmt" + "strings" + "sync" "time" + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) +type joinCommandEntry struct { + Command []string + Creation *time.Time + Mut sync.Mutex +} + +var joinCommandMapMut = sync.Mutex{} +var joinCommandMap = map[string]*joinCommandEntry{} + // GenerateAddNodeCommand will generate the HelmVM node add command for a primary or secondary node -func GenerateAddNodeCommand(client kubernetes.Interface, primary bool) ([]string, *time.Time, error) { - tomorrow := time.Now().Add(time.Hour * 24) - if primary { - return []string{"this is a primary join command string", "that can be multiple strings"}, &tomorrow, nil - } else { - return []string{"this is a secondary join command string", "that can be multiple strings"}, &tomorrow, nil +// join commands will last for 24 hours, and will be cached for 1 hour after first generation +func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, nodeRole string) ([]string, *time.Time, error) { + // get the joinCommand struct entry for this node role + joinCommandMapMut.Lock() + if _, ok := joinCommandMap[nodeRole]; !ok { + joinCommandMap[nodeRole] = &joinCommandEntry{} + } + joinCommand := joinCommandMap[nodeRole] + joinCommandMapMut.Unlock() + + // lock the joinCommand struct entry + joinCommand.Mut.Lock() + defer joinCommand.Mut.Unlock() + + // if the joinCommand has been generated in the past hour, return it + if joinCommand.Creation != nil && time.Now().Before(joinCommand.Creation.Add(time.Hour)) { + expiry := joinCommand.Creation.Add(time.Hour * 24) + return joinCommand.Command, &expiry, nil + } + + newToken, err := runAddNodeCommandPod(ctx, client, nodeRole) + if err != nil { + return nil, nil, fmt.Errorf("failed to run add node command pod: %w", err) + } + + newCmd, err := generateAddNodeCommand(ctx, client, nodeRole, newToken) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate add node command: %w", err) } + + now := time.Now() + joinCommand.Command = newCmd + joinCommand.Creation = &now + + expiry := now.Add(time.Hour * 24) + return newCmd, &expiry, nil +} + +// run a pod that will generate the add node token +func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) { + podName := "k0s-token-generator-" + suffix := strings.Replace(nodeRole, "+", "-", -1) + podName += suffix + + // cleanup the pod if it already exists + err := client.CoreV1().Pods("kube-system").Delete(ctx, podName, metav1.DeleteOptions{}) + if err != nil { + if !kuberneteserrors.IsNotFound(err) { + return "", fmt.Errorf("failed to delete pod: %w", err) + } + } + + hostPathFile := corev1.HostPathFile + hostPathDir := corev1.HostPathDirectory + _, err = client.CoreV1().Pods("kube-system").Create(ctx, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: "kube-system", + Labels: map[string]string{ + "replicated.app/embedded-cluster": "true", + }, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + HostNetwork: true, + Volumes: []corev1.Volume{ + { + Name: "bin", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/usr/local/bin/k0s", + Type: &hostPathFile, + }, + }, + }, + { + Name: "lib", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/lib/k0s", + Type: &hostPathDir, + }, + }, + }, + { + Name: "etc", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/k0s", + Type: &hostPathDir, + }, + }, + }, + { + Name: "run", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/run/k0s", + Type: &hostPathDir, + }, + }, + }, + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node.k0sproject.io/role", + Operator: corev1.NodeSelectorOpIn, + Values: []string{ + "control-plane", + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "k0s-token-generator", + Image: "ubuntu:latest", // TODO use the kotsadm image here as we'll know it exists + Command: []string{"/mnt/k0s"}, + Args: []string{ + "token", + "create", + "--expiry", + "12h", + "--role", + nodeRole, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "bin", + MountPath: "/mnt/k0s", + }, + { + Name: "lib", + MountPath: "/var/lib/k0s", + }, + { + Name: "etc", + MountPath: "/etc/k0s", + }, + { + Name: "run", + MountPath: "/run/k0s", + }, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return "", fmt.Errorf("failed to create pod: %w", err) + } + + // wait for the pod to complete + for { + pod, err := client.CoreV1().Pods("kube-system").Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to get pod: %w", err) + } + + if pod.Status.Phase == corev1.PodSucceeded { + break + } + + if pod.Status.Phase == corev1.PodFailed { + return "", fmt.Errorf("pod failed") + } + + time.Sleep(time.Second) + } + + // get the logs from the completed pod + podLogs, err := client.CoreV1().Pods("kube-system").GetLogs(podName, &corev1.PodLogOptions{}).DoRaw(ctx) + if err != nil { + return "", fmt.Errorf("failed to get pod logs: %w", err) + } + + // the logs are just a join token, which needs to be added to other things to get a join command + return string(podLogs), nil +} + +// generate the add node command from the join token, the node roles, and info from the embedded-cluster-config configmap +func generateAddNodeCommand(ctx context.Context, client kubernetes.Interface, nodeRole string, token string) ([]string, error) { + cm, err := ReadConfigMap(client) + if err != nil { + return nil, fmt.Errorf("failed to read configmap: %w", err) + } + + clusterID := cm.Data["embedded-cluster-id"] + binaryName := cm.Data["embedded-binary-name"] + + clusterUUID := uuid.UUID{} + err = clusterUUID.UnmarshalText([]byte(clusterID)) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal cluster id %s: %w", clusterID, err) + } + + fullToken := joinToken{ + ClusterID: clusterUUID, + Token: token, + Role: nodeRole, + } + + b64token, err := fullToken.Encode() + if err != nil { + return nil, fmt.Errorf("unable to encode token: %w", err) + } + + return []string{binaryName + " node join", b64token}, nil } diff --git a/pkg/helmvm/util.go b/pkg/helmvm/util.go index 5dd2cdc11f..ce358abab0 100644 --- a/pkg/helmvm/util.go +++ b/pkg/helmvm/util.go @@ -3,8 +3,8 @@ package helmvm import ( "context" "fmt" - corev1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" From b4ea073b548b51b45ba8138b9a2603a0b61ab45e Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Wed, 18 Oct 2023 11:26:53 -0600 Subject: [PATCH 03/31] change node join command API to accept a list of roles --- pkg/handlers/handlers.go | 6 ++-- pkg/handlers/helmvm_node_join_command.go | 34 +++++++++--------- pkg/handlers/interface.go | 3 +- pkg/handlers/mock/mock.go | 46 +++++++++--------------- 4 files changed, 37 insertions(+), 52 deletions(-) diff --git a/pkg/handlers/handlers.go b/pkg/handlers/handlers.go index aca714d3b2..1ddde1d543 100644 --- a/pkg/handlers/handlers.go +++ b/pkg/handlers/handlers.go @@ -277,10 +277,8 @@ func RegisterSessionAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOT // HelmVM r.Name("HelmVM").Path("/api/v1/helmvm").HandlerFunc(NotImplemented) - r.Name("GenerateHelmVMNodeJoinCommandSecondary").Path("/api/v1/helmvm/generate-node-join-command-secondary").Methods("POST"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommandSecondary)) - r.Name("GenerateHelmVMNodeJoinCommandPrimary").Path("/api/v1/helmvm/generate-node-join-command-primary").Methods("POST"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommandPrimary)) + r.Name("GenerateHelmVMNodeJoinCommand").Path("/api/v1/helmvm/generate-node-join-command").Methods("POST"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommand)) r.Name("DrainHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}/drain").Methods("POST"). HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DrainHelmVMNode)) r.Name("DeleteHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}").Methods("DELETE"). diff --git a/pkg/handlers/helmvm_node_join_command.go b/pkg/handlers/helmvm_node_join_command.go index a17c39c0ef..6b8abe4654 100644 --- a/pkg/handlers/helmvm_node_join_command.go +++ b/pkg/handlers/helmvm_node_join_command.go @@ -1,6 +1,7 @@ package handlers import ( + "encoding/json" "net/http" "time" @@ -14,27 +15,18 @@ type GenerateHelmVMNodeJoinCommandResponse struct { Expiry string `json:"expiry"` } -func (h *Handler) GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request) { - client, err := k8sutil.GetClientset() - if err != nil { - logger.Error(err) - w.WriteHeader(http.StatusInternalServerError) - return - } +type GenerateHelmVMNodeJoinCommandRequest struct { + Roles []string `json:"roles"` +} - command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, "worker") - if err != nil { +func (h *Handler) GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) { + generateHelmVMNodeJoinCommandRequest := GenerateHelmVMNodeJoinCommandRequest{} + if err := json.NewDecoder(r.Body).Decode(&generateHelmVMNodeJoinCommandRequest); err != nil { logger.Error(err) - w.WriteHeader(http.StatusInternalServerError) + w.WriteHeader(http.StatusBadRequest) return } - JSON(w, http.StatusOK, GenerateHelmVMNodeJoinCommandResponse{ - Command: command, - Expiry: expiry.Format(time.RFC3339), - }) -} -func (h *Handler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request) { client, err := k8sutil.GetClientset() if err != nil { logger.Error(err) @@ -42,7 +34,15 @@ func (h *Handler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r return } - command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, "controller") + k0sRole := "worker" + for _, role := range generateHelmVMNodeJoinCommandRequest.Roles { + if role == "controller" { + k0sRole = "controller" + break + } + } + + command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, k0sRole) if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go index b69ae161fd..d98dd07e18 100644 --- a/pkg/handlers/interface.go +++ b/pkg/handlers/interface.go @@ -139,8 +139,7 @@ type KOTSHandler interface { GetKurlNodes(w http.ResponseWriter, r *http.Request) // HelmVM - GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request) - GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request) + GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) DrainHelmVMNode(w http.ResponseWriter, r *http.Request) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go index f496224c74..c186b8a9ac 100644 --- a/pkg/handlers/mock/mock.go +++ b/pkg/handlers/mock/mock.go @@ -442,28 +442,16 @@ func (mr *MockKOTSHandlerMockRecorder) GarbageCollectImages(w, r interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollectImages", reflect.TypeOf((*MockKOTSHandler)(nil).GarbageCollectImages), w, r) } -// GenerateHelmVMNodeJoinCommandPrimary mocks base method. -func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommandPrimary(w http.ResponseWriter, r *http.Request) { +// GenerateHelmVMNodeJoinCommand mocks base method. +func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommandPrimary", w, r) + m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommand", w, r) } -// GenerateHelmVMNodeJoinCommandPrimary indicates an expected call of GenerateHelmVMNodeJoinCommandPrimary. -func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommandPrimary(w, r interface{}) *gomock.Call { +// GenerateHelmVMNodeJoinCommand indicates an expected call of GenerateHelmVMNodeJoinCommand. +func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommand(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommandPrimary", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommandPrimary), w, r) -} - -// GenerateHelmVMNodeJoinCommandSecondary mocks base method. -func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommandSecondary(w http.ResponseWriter, r *http.Request) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommandSecondary", w, r) -} - -// GenerateHelmVMNodeJoinCommandSecondary indicates an expected call of GenerateHelmVMNodeJoinCommandSecondary. -func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommandSecondary(w, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommandSecondary", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommandSecondary), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommand), w, r) } // GenerateKurlNodeJoinCommandMaster mocks base method. @@ -766,28 +754,28 @@ func (mr *MockKOTSHandlerMockRecorder) GetGlobalSnapshotSettings(w, r interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGlobalSnapshotSettings", reflect.TypeOf((*MockKOTSHandler)(nil).GetGlobalSnapshotSettings), w, r) } -// GetHelmVMNodes mocks base method. -func (m *MockKOTSHandler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { +// GetHelmVMNode mocks base method. +func (m *MockKOTSHandler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GetHelmVMNodes", w, r) + m.ctrl.Call(m, "GetHelmVMNode", w, r) } -// GetHelmVMNodes indicates an expected call of GetHelmVMNodes. -func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNodes(w, r interface{}) *gomock.Call { +// GetHelmVMNode indicates an expected call of GetHelmVMNode. +func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNode(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNodes", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNodes), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNode), w, r) } // GetHelmVMNodes mocks base method. -func (m *MockKOTSHandler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { +func (m *MockKOTSHandler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GetHelmVMNode", w, r) + m.ctrl.Call(m, "GetHelmVMNodes", w, r) } -// GetHelmVMNode indicates an expected call of GetHelmVMNode. -func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNode(w, r interface{}) *gomock.Call { +// GetHelmVMNodes indicates an expected call of GetHelmVMNodes. +func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNodes(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNode), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNodes", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNodes), w, r) } // GetIdentityServiceConfig mocks base method. From bc55238277dc307eabcad4853c38701f87400f0c Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:53:27 -0600 Subject: [PATCH 04/31] Add view node page, add new add node modal (#4065) * wip: refactor helm cluster page, add view node page * add new add node modal * protect routes * move parenthesis * use test data for now * add material react table * start connecting additional api calls, add test pod data * add material react table to display pods * revert change * uncomment real queries * fix useparams import * fix params/routing, update api route * fix loading/refetch state * update generate add node request * add error handling, add mui react table to cluster manage page * move ts-ignore line * remove delete functionality for now --- web/package.json | 5 + web/src/Root.tsx | 40 +- web/src/components/UploadLicenseFile.tsx | 48 +- .../apps/HelmVMClusterManagement.jsx | 544 ----------------- .../apps/HelmVMClusterManagement.tsx | 577 ++++++++++++++++++ web/src/components/apps/HelmVMNodeRow.jsx | 278 --------- web/src/components/apps/HelmVMViewNode.jsx | 462 ++++++++++++++ web/tailwind.config.js | 65 +- web/yarn.lock | 360 ++++++++++- 9 files changed, 1514 insertions(+), 865 deletions(-) delete mode 100644 web/src/components/apps/HelmVMClusterManagement.jsx create mode 100644 web/src/components/apps/HelmVMClusterManagement.tsx delete mode 100644 web/src/components/apps/HelmVMNodeRow.jsx create mode 100644 web/src/components/apps/HelmVMViewNode.jsx diff --git a/web/package.json b/web/package.json index 21caa76d0c..f58cc7c981 100644 --- a/web/package.json +++ b/web/package.json @@ -121,9 +121,13 @@ "webpack-merge": "5.8.0" }, "dependencies": { + "@emotion/react": "^11.11.1", + "@emotion/styled": "^11.11.0", "@grafana/data": "^8.5.16", "@maji/react-prism": "^1.0.1", "@monaco-editor/react": "^4.4.5", + "@mui/icons-material": "^5.14.14", + "@mui/material": "^5.14.14", "@storybook/addon-storysource": "^6.5.16", "@tanstack/react-query": "^4.36.1", "@tanstack/react-query-devtools": "^4.36.1", @@ -144,6 +148,7 @@ "js-yaml": "3.14.0", "lodash": "4.17.21", "markdown-it": "^12.3.2", + "material-react-table": "^1.15.1", "monaco-editor": "^0.33.0", "monaco-editor-webpack-plugin": "^7.0.1", "node-polyfill-webpack-plugin": "^1.1.4", diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 1260d96b8c..05855d5abc 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -58,6 +58,7 @@ import SnapshotDetails from "@components/snapshots/SnapshotDetails"; import SnapshotRestore from "@components/snapshots/SnapshotRestore"; import AppSnapshots from "@components/snapshots/AppSnapshots"; import AppSnapshotRestore from "@components/snapshots/AppSnapshotRestore"; +import HelmVMViewNode from "@components/apps/HelmVMViewNode"; // react-query client const queryClient = new QueryClient(); @@ -100,6 +101,8 @@ type State = { selectedAppName: string | null; snapshotInProgressApps: string[]; themeState: ThemeState; + isKurl: boolean | null; + isHelmVM: boolean | null; }; let interval: ReturnType | undefined; @@ -131,6 +134,8 @@ const Root = () => { navbarLogo: null, }, app: null, + isKurl: null, + isHelmVM: null, } ); @@ -302,6 +307,8 @@ const Root = () => { adminConsoleMetadata: data.adminConsoleMetadata, featureFlags: data.consoleFeatureFlags, fetchingMetadata: false, + isKurl: data.isKurl, + isHelmVM: data.isHelmVM, }); }) .catch((err) => { @@ -531,6 +538,8 @@ const Root = () => { appSlugFromMetadata={state.appSlugFromMetadata || ""} fetchingMetadata={state.fetchingMetadata} onUploadSuccess={getAppsList} + isKurl={!!state.isKurl} + isHelmVM={!!state.isHelmVM} /> } /> @@ -573,16 +582,36 @@ const Root = () => { } /> } /> + {/* {state.adminConsoleMetadata?.isHelmVM && ( */} + + } + /> + {/* )} */} + {/* {(state.adminConsoleMetadata?.isKurl || + state.adminConsoleMetadata?.isHelmVM) && ( */} + ) : ( - + ) } /> + {/* )} */} + {/* {state.adminConsoleMetadata?.isHelmVM && ( */} + } + /> + {/* )} */} } @@ -761,12 +790,7 @@ const Root = () => { } /> - } + element={} /> {/* WHERE IS SELECTEDAPP */} {state.app?.isAppIdentityServiceSupported && ( diff --git a/web/src/components/UploadLicenseFile.tsx b/web/src/components/UploadLicenseFile.tsx index 19bc1cf95b..2fe1ed8a12 100644 --- a/web/src/components/UploadLicenseFile.tsx +++ b/web/src/components/UploadLicenseFile.tsx @@ -1,23 +1,23 @@ import React, { useEffect, useReducer } from "react"; -import { useNavigate } from "react-router-dom"; -import { Link } from "react-router-dom"; -import { KotsPageTitle } from "@components/Head"; -// TODO: upgrade this dependency -// @ts-ignore -import Dropzone from "react-dropzone"; +import { Link, useNavigate } from "react-router-dom"; import yaml from "js-yaml"; -import size from "lodash/size"; import isEmpty from "lodash/isEmpty"; import keyBy from "lodash/keyBy"; +import size from "lodash/size"; +// TODO: upgrade this dependency +// @ts-ignore +import Dropzone from "react-dropzone"; import Modal from "react-modal"; import Select from "react-select"; + +import { KotsPageTitle } from "@components/Head"; import { getFileContent } from "../utilities/utilities"; -import CodeSnippet from "./shared/CodeSnippet"; +import Icon from "./Icon"; import LicenseUploadProgress from "./LicenseUploadProgress"; +import CodeSnippet from "./shared/CodeSnippet"; import "../scss/components/troubleshoot/UploadSupportBundleModal.scss"; import "../scss/components/UploadLicenseFile.scss"; -import Icon from "./Icon"; type LicenseYaml = { spec: { @@ -26,17 +26,6 @@ type LicenseYaml = { }; }; -type Props = { - appsListLength: number; - appName: string; - appSlugFromMetadata: string; - fetchingMetadata: boolean; - isBackupRestore?: boolean; - onUploadSuccess: () => Promise; - logo: string | null; - snapshot?: { name: string }; -}; - type SelectedAppToInstall = { label: string; value: string; @@ -68,6 +57,20 @@ type UploadLicenseResponse = { slug: string; success?: boolean; }; + +type Props = { + appsListLength: number; + appName: string; + appSlugFromMetadata: string; + fetchingMetadata: boolean; + isBackupRestore?: boolean; + onUploadSuccess: () => Promise; + logo: string | null; + snapshot?: { name: string }; + isHelmVM: boolean; + isKurl: boolean; +}; + const UploadLicenseFile = (props: Props) => { const [state, setState] = useReducer( (currentState: State, newState: Partial) => ({ @@ -264,6 +267,11 @@ const UploadLicenseFile = (props: Props) => { return; } + if (props.isHelmVM && !props.isKurl) { + navigate(`/${data.slug}/cluster/manage`, { replace: true }); + return; + } + if (data.hasPreflight) { navigate(`/${data.slug}/preflight`, { replace: true }); return; diff --git a/web/src/components/apps/HelmVMClusterManagement.jsx b/web/src/components/apps/HelmVMClusterManagement.jsx deleted file mode 100644 index 4d1712f258..0000000000 --- a/web/src/components/apps/HelmVMClusterManagement.jsx +++ /dev/null @@ -1,544 +0,0 @@ -import React, { Component, Fragment } from "react"; -import classNames from "classnames"; -import dayjs from "dayjs"; -import { KotsPageTitle } from "@components/Head"; -import CodeSnippet from "../shared/CodeSnippet"; -import HelmVMNodeRow from "./HelmVMNodeRow"; -import Loader from "../shared/Loader"; -import { rbacRoles } from "../../constants/rbac"; -import { Utilities } from "../../utilities/utilities"; -import { Repeater } from "../../utilities/repeater"; -import ErrorModal from "../modals/ErrorModal"; -import Modal from "react-modal"; - -import "@src/scss/components/apps/HelmVMClusterManagement.scss"; -import Icon from "../Icon"; - -export class HelmVMClusterManagement extends Component { - state = { - generating: false, - command: "", - expiry: null, - displayAddNode: false, - selectedNodeType: "primary", - generateCommandErrMsg: "", - helmvm: null, - getNodeStatusJob: new Repeater(), - deletNodeError: "", - confirmDeleteNode: "", - showConfirmDrainModal: false, - nodeNameToDrain: "", - drainingNodeName: null, - drainNodeSuccessful: false, - }; - - componentDidMount() { - this.getNodeStatus(); - this.state.getNodeStatusJob.start(this.getNodeStatus, 1000); - } - - componentWillUnmount() { - this.state.getNodeStatusJob.stop(); - } - - getNodeStatus = async () => { - try { - const res = await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { - headers: { - Accept: "application/json", - }, - credentials: "include", - method: "GET", - }); - if (!res.ok) { - if (res.status === 401) { - Utilities.logoutUser(); - return; - } - console.log( - "failed to get node status list, unexpected status code", - res.status - ); - return; - } - const response = await res.json(); - this.setState({ - helmvm: response, - // if cluster doesn't support ha, then primary will be disabled. Force into secondary - selectedNodeType: !response.ha - ? "secondary" - : this.state.selectedNodeType, - }); - return response; - } catch (err) { - console.log(err); - throw err; - } - }; - - deleteNode = (name) => { - this.setState({ - confirmDeleteNode: name, - }); - }; - - cancelDeleteNode = () => { - this.setState({ - confirmDeleteNode: "", - }); - }; - - reallyDeleteNode = () => { - const name = this.state.confirmDeleteNode; - this.cancelDeleteNode(); - - fetch(`${process.env.API_ENDPOINT}/helmvm/nodes/${name}`, { - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - credentials: "include", - method: "DELETE", - }) - .then(async (res) => { - if (!res.ok) { - if (res.status === 401) { - Utilities.logoutUser(); - return; - } - this.setState({ - deleteNodeError: `Delete failed with status ${res.status}`, - }); - } - }) - .catch((err) => { - console.log(err); - }); - }; - - generateWorkerAddNodeCommand = async () => { - this.setState({ - generating: true, - command: "", - expiry: null, - generateCommandErrMsg: "", - }); - - fetch( - `${process.env.API_ENDPOINT}/helmvm/generate-node-join-command-secondary`, - { - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - credentials: "include", - method: "POST", - } - ) - .then(async (res) => { - if (!res.ok) { - this.setState({ - generating: false, - generateCommandErrMsg: `Failed to generate command with status ${res.status}`, - }); - } else { - const data = await res.json(); - this.setState({ - generating: false, - command: data.command, - expiry: data.expiry, - }); - } - }) - .catch((err) => { - console.log(err); - this.setState({ - generating: false, - generateCommandErrMsg: err ? err.message : "Something went wrong", - }); - }); - }; - - onDrainNodeClick = (name) => { - this.setState({ - showConfirmDrainModal: true, - nodeNameToDrain: name, - }); - }; - - drainNode = async (name) => { - this.setState({ showConfirmDrainModal: false, drainingNodeName: name }); - fetch(`${process.env.API_ENDPOINT}/helmvm/nodes/${name}/drain`, { - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - credentials: "include", - method: "POST", - }) - .then(async (res) => { - this.setState({ drainNodeSuccessful: true }); - setTimeout(() => { - this.setState({ - drainingNodeName: null, - drainNodeSuccessful: false, - }); - }, 3000); - }) - .catch((err) => { - console.log(err); - this.setState({ - drainingNodeName: null, - drainNodeSuccessful: false, - }); - }); - }; - - generatePrimaryAddNodeCommand = async () => { - this.setState({ - generating: true, - command: "", - expiry: null, - generateCommandErrMsg: "", - }); - - fetch( - `${process.env.API_ENDPOINT}/helmvm/generate-node-join-command-primary`, - { - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - credentials: "include", - method: "POST", - } - ) - .then(async (res) => { - if (!res.ok) { - this.setState({ - generating: false, - generateCommandErrMsg: `Failed to generate command with status ${res.status}`, - }); - } else { - const data = await res.json(); - this.setState({ - generating: false, - command: data.command, - expiry: data.expiry, - }); - } - }) - .catch((err) => { - console.log(err); - this.setState({ - generating: false, - generateCommandErrMsg: err ? err.message : "Something went wrong", - }); - }); - }; - - onAddNodeClick = () => { - this.setState( - { - displayAddNode: true, - }, - async () => { - await this.generateWorkerAddNodeCommand(); - } - ); - }; - - onSelectNodeType = (event) => { - const value = event.currentTarget.value; - this.setState( - { - selectedNodeType: value, - }, - async () => { - if (this.state.selectedNodeType === "secondary") { - await this.generateWorkerAddNodeCommand(); - } else { - await this.generatePrimaryAddNodeCommand(); - } - } - ); - }; - - ackDeleteNodeError = () => { - this.setState({ deleteNodeError: "" }); - }; - - render() { - const { helmvm } = this.state; - const { displayAddNode, generateCommandErrMsg } = this.state; - - if (!helmvm) { - return ( -
- -
- ); - } - - return ( -
- -
-
-
-

- Your nodes -

-
- {helmvm?.nodes && - helmvm?.nodes.map((node, i) => ( - - ))} -
-
- {helmvm?.isHelmVMEnabled && - Utilities.sessionRolesHasOneOf([rbacRoles.CLUSTER_ADMIN]) ? ( - !displayAddNode ? ( -
- -
- ) : ( -
-
-

- Add a Node -

-
-
-
- - -
-
- - -
-
- {this.state.generating && ( -
- -
- )} - {!this.state.generating && this.state.command?.length > 0 ? ( - -

- Run this command on the node you wish to join the - cluster -

- - Command has been copied to your clipboard - - } - > - {[this.state.command.join(" \\\n ")]} - - {this.state.expiry && ( - - {`Expires on ${dayjs(this.state.expiry).format( - "MMM Do YYYY, h:mm:ss a z" - )} UTC${(-1 * new Date().getTimezoneOffset()) / 60}`} - - )} -
- ) : ( - - {generateCommandErrMsg && ( -
- - {generateCommandErrMsg} - -
- )} -
- )} -
- ) - ) : null} -
-
- {this.state.deleteNodeError && ( - - )} - -
-

- Deleting this node may cause data loss. Are you sure you want to - proceed? -

-
- - -
-
-
- {this.state.showConfirmDrainModal && ( - - this.setState({ - showConfirmDrainModal: false, - nodeNameToDrain: "", - }) - } - shouldReturnFocusAfterClose={false} - contentLabel="Confirm Drain Node" - ariaHideApp={false} - className="Modal MediumSize" - > -
-

- Are you sure you want to drain {this.state.nodeNameToDrain}? -

-

- Draining this node may cause data loss. If you want to delete{" "} - {this.state.nodeNameToDrain} you must disconnect it after it has - been drained. -

-
- - -
-
-
- )} -
- ); - } -} - -export default HelmVMClusterManagement; diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx new file mode 100644 index 0000000000..ed0fcfa3f2 --- /dev/null +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -0,0 +1,577 @@ +import classNames from "classnames"; +import MaterialReactTable from "material-react-table"; +import React, { ChangeEvent, useMemo, useReducer, useState } from "react"; +import Modal from "react-modal"; +import { useQuery } from "react-query"; +import { Link, useParams } from "react-router-dom"; + +import { KotsPageTitle } from "@components/Head"; +import { useApps } from "@features/App"; +import { rbacRoles } from "../../constants/rbac"; +import { Utilities } from "../../utilities/utilities"; +import Icon from "../Icon"; +import CodeSnippet from "../shared/CodeSnippet"; + +import "@src/scss/components/apps/HelmVMClusterManagement.scss"; + +const testData = { + isHelmVMEnabled: true, + ha: false, + nodes: [ + { + name: "test-helmvm-node", + isConnected: true, + isReady: true, + isPrimaryNode: true, + canDelete: false, + kubeletVersion: "v1.28.2", + cpu: { + capacity: 8, + available: 7.466876775, + }, + memory: { + capacity: 31.33294677734375, + available: 24.23790740966797, + }, + pods: { + capacity: 110, + available: 77, + }, + labels: [ + "beta.kubernetes.io/arch:amd64", + "beta.kubernetes.io/os:linux", + "node-role.kubernetes.io/master:", + "node.kubernetes.io/exclude-from-external-load-balancers:", + "kubernetes.io/arch:amd64", + "kubernetes.io/hostname:laverya-kurl", + "kubernetes.io/os:linux", + "node-role.kubernetes.io/control-plane:", + ], + conditions: { + memoryPressure: false, + diskPressure: false, + pidPressure: false, + ready: true, + }, + }, + { + name: "test-helmvm-worker", + isConnected: true, + isReady: true, + isPrimaryNode: false, + canDelete: false, + kubeletVersion: "v1.28.2", + cpu: { + capacity: 4, + available: 3.761070507, + }, + memory: { + capacity: 15.50936508178711, + available: 11.742542266845703, + }, + pods: { + capacity: 110, + available: 94, + }, + labels: [ + "beta.kubernetes.io/arch:amd64", + "beta.kubernetes.io/os:linux", + "kubernetes.io/arch:amd64", + "kubernetes.io/os:linux", + "kurl.sh/cluster:true", + ], + conditions: { + memoryPressure: false, + diskPressure: false, + pidPressure: false, + ready: true, + }, + }, + ], +}; + +type State = { + displayAddNode: boolean; + confirmDeleteNode: string; + deleteNodeError: string; + showConfirmDrainModal: boolean; + nodeNameToDrain: string; + drainingNodeName: string | null; + drainNodeSuccessful: boolean; +}; + +const HelmVMClusterManagement = ({ + fromLicenseFlow = false, + appName, +}: { + fromLicenseFlow?: boolean; + appName?: string; +}) => { + const [state, setState] = useReducer( + (prevState: State, newState: Partial) => ({ + ...prevState, + ...newState, + }), + { + displayAddNode: false, + confirmDeleteNode: "", + deleteNodeError: "", + showConfirmDrainModal: false, + nodeNameToDrain: "", + drainingNodeName: null, + drainNodeSuccessful: false, + } + ); + const [selectedNodeTypes, setSelectedNodeTypes] = useState([]); + + const { data: appsData } = useApps(); + const app = appsData?.apps?.find((a) => a.name === appName); + const { slug } = useParams(); + + // #region queries + type NodesResponse = { + ha: boolean; + isHelmVMEnabled: boolean; + nodes: { + name: string; + isConnected: boolean; + isReady: boolean; + isPrimaryNode: boolean; + canDelete: boolean; + kubeletVersion: string; + cpu: { + capacity: number; + available: number; + }; + memory: { + capacity: number; + available: number; + }; + pods: { + capacity: number; + available: number; + }; + labels: string[]; + conditions: { + memoryPressure: boolean; + diskPressure: boolean; + pidPressure: boolean; + ready: boolean; + }; + }[]; + }; + + const { + data: nodesData, + isInitialLoading: nodesLoading, + error: nodesError, + } = useQuery({ + queryKey: "helmVmNodes", + queryFn: async () => { + const res = await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { + headers: { + Accept: "application/json", + }, + credentials: "include", + method: "GET", + }); + if (!res.ok) { + if (res.status === 401) { + Utilities.logoutUser(); + } + console.log( + "failed to get node status list, unexpected status code", + res.status + ); + try { + const error = await res.json(); + throw new Error( + error?.error?.message || error?.error || error?.message + ); + } catch (err) { + throw new Error("Unable to fetch nodes, please try again later."); + } + } + return res.json(); + }, + refetchInterval: (data) => (data ? 1000 : 0), + retry: false, + }); + + type AddNodeCommandResponse = { + command: string; + expiry: string; + }; + + const { + data: generateAddNodeCommand, + isLoading: generateAddNodeCommandLoading, + error: generateAddNodeCommandError, + } = useQuery({ + queryKey: ["generateAddNodeCommand", selectedNodeTypes], + queryFn: async ({ queryKey }) => { + const [, nodeTypes] = queryKey; + const res = await fetch( + `${process.env.API_ENDPOINT}/helmvm/generate-node-join-command`, + { + headers: { + "Content-Type": "application/json", + Accept: "application/json", + }, + credentials: "include", + method: "POST", + body: JSON.stringify({ + roles: nodeTypes, + }), + } + ); + if (!res.ok) { + if (res.status === 401) { + Utilities.logoutUser(); + } + console.log( + "failed to get generate node command, unexpected status code", + res.status + ); + try { + const error = await res.json(); + throw new Error( + error?.error?.message || error?.error || error?.message + ); + } catch (err) { + throw new Error( + "Unable to generate node join command, please try again later." + ); + } + } + return res.json(); + }, + enabled: selectedNodeTypes.length > 0, + }); + + // TODO: import useMutation + // const { + // mutate: addNodeType, + // isLoading: addNodeTypeLoading, + // error: addNodeTypeError, + // } = useMutation({ + // mutationFn: async () => { + // return ( + // await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { + // headers: { + // "Content-Type": "application/json", + // Accept: "application/json", + // }, + // credentials: "include", + // method: "POST", + // }) + // ).json(); + // }, + // }); + // #endregion + + const onAddNodeClick = () => { + setState({ + displayAddNode: true, + }); + }; + + // #region node type logic + const NODE_TYPES = ["controller"]; + + const determineDisabledState = () => { + // if (nodeType === "controller") { + // const numControllers = testData.nodes.reduce((acc, node) => { + // if (node.labels.includes("controller")) { + // acc += 1; + // } + // return acc; + // }, 0); + // return numControllers === 3; + // } + return false; + }; + + const handleSelectNodeType = (e: ChangeEvent) => { + let nodeType = e.currentTarget.value; + let types = selectedNodeTypes; + + if (selectedNodeTypes.includes(nodeType)) { + setSelectedNodeTypes(types.filter((type) => type !== nodeType)); + } else { + setSelectedNodeTypes([...types, nodeType]); + } + }; + // #endregion + + const columns = useMemo( + () => [ + { + accessorKey: "name", + header: "Name", + enableHiding: false, + enableColumnDragging: false, + size: 150, + }, + { + accessorKey: "roles", + header: "Role(s)", + size: 404, + }, + { + accessorKey: "status", + header: "Status", + size: 150, + }, + { + accessorKey: "disk", + header: "Disk", + size: 150, + }, + { + accessorKey: "cpu", + header: "CPU", + size: 150, + }, + { + accessorKey: "memory", + header: "Memory", + size: 150, + }, + { + accessorKey: "pause", + header: "Pause", + size: 100, + }, + { + accessorKey: "delete", + header: "Delete", + size: 100, + }, + ], + [] + ); + + const mappedNodes = useMemo(() => { + return (nodesData?.nodes || testData.nodes).map((n) => ({ + name: slug ? ( + + ) : ( + n.name + ), + roles: ( +
+ {n.labels.map((l) => ( + + {l} + + ))} +
+ ), + status: n.isReady ? "Ready" : "Not Ready", + disk: n.conditions.diskPressure ? "Disk Pressure" : "No Disk Pressure", + cpu: n.conditions.pidPressure ? "CPU Pressure" : "No CPU Pressure", + memory: n.conditions.memoryPressure + ? "Memory Pressure" + : "No Memory Pressure", + pause: ( + <> + + + ), + delete: ( + <> + + + ), + })); + }, [nodesData?.nodes?.toString()]); + // #endregion + + return ( +
+ +
+
+

+ Cluster Nodes +

+
+

+ This page lists the nodes that are configured and shows the + status/health of each. +

+ {Utilities.sessionRolesHasOneOf([rbacRoles.CLUSTER_ADMIN]) && ( + + )} +
+
+ {nodesLoading && ( +

+ Loading nodes... +

+ )} + {!nodesData && nodesError && ( +

+ {nodesError?.message} +

+ )} + {(nodesData?.nodes || testData?.nodes) && ( + + )} +
+ {fromLicenseFlow && ( + + Continue + + )} +
+
+ {/* MODALS */} + setState({ displayAddNode: false })} + contentLabel="Add Node" + className="Modal" + ariaHideApp={false} + > +
+
+

+ Add a Node +

+ setState({ displayAddNode: false })} + /> +
+

+ To add a node to this cluster, select the type of node you'd like to + add. Once you've selected a node type, we will generate a node join + command for you to use in the CLI. When the node successfully joins + the cluster, you will see it appear in the list of nodes on this + page. +

+
+ {NODE_TYPES.map((nodeType) => ( +
+ + +
+ ))} +
+
+ {generateAddNodeCommandLoading && ( +

+ Generating command... +

+ )} + {!generateAddNodeCommand && generateAddNodeCommandError && ( +

+ {generateAddNodeCommandError?.message} +

+ )} + {!generateAddNodeCommandLoading && generateAddNodeCommand?.command && ( + Copied! + } + > + {generateAddNodeCommand?.command || ""} + + )} +
+ {/* buttons */} +
+ +
+
+
+
+ ); +}; + +export default HelmVMClusterManagement; diff --git a/web/src/components/apps/HelmVMNodeRow.jsx b/web/src/components/apps/HelmVMNodeRow.jsx deleted file mode 100644 index 93f2c5489c..0000000000 --- a/web/src/components/apps/HelmVMNodeRow.jsx +++ /dev/null @@ -1,278 +0,0 @@ -import React from "react"; -import classNames from "classnames"; -import Loader from "../shared/Loader"; -import { rbacRoles } from "../../constants/rbac"; -import { getPercentageStatus, Utilities } from "../../utilities/utilities"; -import Icon from "../Icon"; - -export default function HelmVMNodeRow(props) { - const { node } = props; - - const DrainDeleteNode = () => { - const { drainNode, drainNodeSuccessful, drainingNodeName } = props; - if (drainNode && Utilities.sessionRolesHasOneOf(rbacRoles.DRAIN_NODE)) { - if ( - !drainNodeSuccessful && - drainingNodeName && - drainingNodeName === node?.name - ) { - return ( -
- - - - - Draining Node - -
- ); - } else if (drainNodeSuccessful && drainingNodeName === node?.name) { - return ( -
- - - Node successfully drained - -
- ); - } else { - return ( -
- -
- ); - } - } - }; - - return ( -
-
-
-

- {node?.name} -

- {node?.isPrimaryNode && ( - - Primary node - - )} -
-
-
-

- - {node?.isConnected ? "Connected" : "Disconnected"} -

-

-   -

-
-
-

- - {node?.pods?.available === -1 - ? `${node?.pods?.capacity} pods` - : `${ - node?.pods?.available === 0 - ? "0" - : node?.pods?.capacity - node?.pods?.available - } pods used`} -

- {node?.pods?.available !== -1 && ( -

- of {node?.pods?.capacity} pods total -

- )} -
-
-

- - {node?.cpu?.available === -1 - ? `${node?.cpu?.capacity} ${ - node?.cpu?.available === "1" ? "core" : "cores" - }` - : `${ - node?.cpu?.available === 0 - ? "0" - : (node?.cpu?.capacity - node?.cpu?.available).toFixed(1) - } ${ - node?.cpu?.available === "1" ? "core used" : "cores used" - }`} -

- {node?.pods?.available !== -1 && ( -

- of {node?.cpu?.capacity}{" "} - {node?.cpu?.available === "1" ? "core total" : "cores total"} -

- )} -
-
-

- - {node?.memory?.available === -1 - ? `${node?.memory?.capacity?.toFixed(1)} GB` - : `${ - node?.memory?.available === 0 - ? "0" - : ( - node?.memory?.capacity - node?.memory?.available - ).toFixed(1) - } GB used`} -

- {node?.pods?.available !== -1 && ( -

- of {node?.memory?.capacity?.toFixed(1)} GB total -

- )} -
-
-
-
-

- - {node?.kubeletVersion} -

-
-
-

- - {node?.conditions?.diskPressure - ? "No Space on Device" - : "No Disk Pressure"} -

-
-
-

- - {node?.conditions?.pidPressure - ? "Pressure on CPU" - : "No CPU Pressure"} -

-
-
-

- - {node?.conditions?.memoryPressure - ? "No Space on Memory" - : "No Memory Pressure"} -

-
-
- {/* LABELS */} -
- {node?.labels.length > 0 - ? node.labels.sort().map((label, i) => { - let labelToShow = label.replace(":", "="); - return ( -
- {labelToShow} -
- ); - }) - : null} -
-
-

- For more details run{" "} - - kubectl describe node {node?.name} - -

-
-
- -
- ); -} diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx new file mode 100644 index 0000000000..a9b8bae245 --- /dev/null +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -0,0 +1,462 @@ +import { MaterialReactTable } from "material-react-table"; +import React, { useMemo } from "react"; +import { useQuery } from "react-query"; +import { Link, useParams } from "react-router-dom"; + +const testData = { + isHelmVMEnabled: true, + ha: false, + nodes: [ + { + name: "test-helmvm-node", + isConnected: true, + isReady: true, + isPrimaryNode: true, + canDelete: false, + kubeletVersion: "v1.28.2", + cpu: { + capacity: 8, + available: 7.466876775, + }, + memory: { + capacity: 31.33294677734375, + available: 24.23790740966797, + }, + pods: { + capacity: 110, + available: 77, + }, + labels: [ + "beta.kubernetes.io/arch:amd64", + "beta.kubernetes.io/os:linux", + "node-role.kubernetes.io/master:", + "node.kubernetes.io/exclude-from-external-load-balancers:", + "kubernetes.io/arch:amd64", + "kubernetes.io/hostname:laverya-kurl", + "kubernetes.io/os:linux", + "node-role.kubernetes.io/control-plane:", + ], + conditions: { + memoryPressure: false, + diskPressure: false, + pidPressure: false, + ready: true, + }, + podList: [ + { + metadata: { + name: "example-es-85fc9df74-g9jbn", + generateName: "example-es-85fc9df74-", + namespace: "helmvm", + uid: "1caba3fb-bd52-430a-9cff-0eb0939317fa", + resourceVersion: "40284", + creationTimestamp: "2023-10-17T16:22:37Z", + labels: { + app: "example", + component: "es", + "kots.io/app-slug": "laverya-minimal-kots", + "kots.io/backup": "velero", + "pod-template-hash": "85fc9df74", + }, + annotations: { + "cni.projectcalico.org/containerID": + "c3fa12aad2ed6f726ecda31f7f94d1224c9f50a805a9efc67aaf4959e464434c", + "cni.projectcalico.org/podIP": "10.244.45.141/32", + "cni.projectcalico.org/podIPs": "10.244.45.141/32", + "kots.io/app-slug": "laverya-minimal-kots", + }, + ownerReferences: [ + { + apiVersion: "apps/v1", + kind: "ReplicaSet", + name: "example-es-85fc9df74", + uid: "b5008bca-1ad0-4107-8603-397fc3be74f8", + controller: true, + blockOwnerDeletion: true, + }, + ], + }, + spec: { + volumes: [ + { + name: "kube-api-access-fhfc4", + projected: { + sources: [ + { + serviceAccountToken: { + expirationSeconds: 3607, + path: "token", + }, + }, + { + configMap: { + name: "kube-root-ca.crt", + items: [{ key: "ca.crt", path: "ca.crt" }], + }, + }, + { + downwardAPI: { + items: [ + { + path: "namespace", + fieldRef: { + apiVersion: "v1", + fieldPath: "metadata.namespace", + }, + }, + ], + }, + }, + ], + defaultMode: 420, + }, + }, + ], + containers: [ + { + name: "es", + image: + "docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.21", + envFrom: [{ configMapRef: { name: "example-config" } }], + resources: { + limits: { cpu: "500m", memory: "256Mi" }, + requests: { cpu: "50m", memory: "16Mi" }, + }, + volumeMounts: [ + { + name: "kube-api-access-fhfc4", + readOnly: true, + mountPath: "/var/run/secrets/kubernetes.io/serviceaccount", + }, + ], + terminationMessagePath: "/dev/termination-log", + terminationMessagePolicy: "File", + imagePullPolicy: "IfNotPresent", + }, + ], + restartPolicy: "Always", + terminationGracePeriodSeconds: 30, + dnsPolicy: "ClusterFirst", + serviceAccountName: "default", + serviceAccount: "default", + nodeName: "laverya-helmvm", + securityContext: {}, + imagePullSecrets: [{ name: "laverya-minimal-kots-registry" }], + schedulerName: "default-scheduler", + tolerations: [ + { + key: "node.kubernetes.io/not-ready", + operator: "Exists", + effect: "NoExecute", + tolerationSeconds: 300, + }, + { + key: "node.kubernetes.io/unreachable", + operator: "Exists", + effect: "NoExecute", + tolerationSeconds: 300, + }, + ], + priority: 0, + enableServiceLinks: true, + preemptionPolicy: "PreemptLowerPriority", + }, + status: { + phase: "Running", + conditions: [ + { + type: "Initialized", + status: "True", + lastProbeTime: null, + lastTransitionTime: "2023-10-17T16:22:37Z", + }, + { + type: "Ready", + status: "False", + lastProbeTime: null, + lastTransitionTime: "2023-10-17T19:55:16Z", + reason: "ContainersNotReady", + message: "containers with unready status: [es]", + }, + { + type: "ContainersReady", + status: "False", + lastProbeTime: null, + lastTransitionTime: "2023-10-17T19:55:16Z", + reason: "ContainersNotReady", + message: "containers with unready status: [es]", + }, + { + type: "PodScheduled", + status: "True", + lastProbeTime: null, + lastTransitionTime: "2023-10-17T16:22:37Z", + }, + ], + hostIP: "10.128.0.44", + podIP: "10.244.45.141", + podIPs: [{ ip: "10.244.45.141" }], + startTime: "2023-10-17T16:22:37Z", + containerStatuses: [ + { + name: "es", + state: { + waiting: { + reason: "CrashLoopBackOff", + message: + "back-off 5m0s restarting failed container=es pod=example-es-85fc9df74-g9jbn_helmvm(1caba3fb-bd52-430a-9cff-0eb0939317fa)", + }, + }, + lastState: { + terminated: { + exitCode: 137, + reason: "OOMKilled", + startedAt: "2023-10-17T19:55:11Z", + finishedAt: "2023-10-17T19:55:13Z", + containerID: + "containerd://9cce5c792b7ad61d040f7b8aca042d13a714100c75ebc40e71eb5444bbb65e83", + }, + }, + ready: false, + restartCount: 46, + image: + "docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.21", + imageID: + "docker.elastic.co/elasticsearch/elasticsearch-oss@sha256:86e7750c4d896d41bd638b6e510e0610b98fd9fa48f8caeeed8ccd8424b1dc9f", + containerID: + "containerd://9cce5c792b7ad61d040f7b8aca042d13a714100c75ebc40e71eb5444bbb65e83", + started: false, + }, + ], + qosClass: "Burstable", + }, + }, + ], + }, + { + name: "test-helmvm-worker", + isConnected: true, + isReady: true, + isPrimaryNode: false, + canDelete: false, + kubeletVersion: "v1.28.2", + cpu: { + capacity: 4, + available: 3.761070507, + }, + memory: { + capacity: 15.50936508178711, + available: 11.742542266845703, + }, + pods: { + capacity: 110, + available: 94, + }, + labels: [ + "beta.kubernetes.io/arch:amd64", + "beta.kubernetes.io/os:linux", + "kubernetes.io/arch:amd64", + "kubernetes.io/os:linux", + "kurl.sh/cluster:true", + ], + conditions: { + memoryPressure: false, + diskPressure: false, + pidPressure: false, + ready: true, + }, + }, + ], +}; + +const HelmVMViewNode = () => { + const { slug, nodeName } = useParams(); + const { data: nodeData } = useQuery({ + queryKey: ["helmVmNode", nodeName], + queryFn: async ({ queryKey }) => { + const [, nodeName] = queryKey; + return ( + await fetch(`${process.env.API_ENDPOINT}/helmvm/node/${nodeName}`, { + headers: { + Accept: "application/json", + }, + credentials: "include", + method: "GET", + }) + ).json(); + }, + onError: (err) => { + if (err.status === 401) { + Utilities.logoutUser(); + return; + } + console.log( + "failed to get node status list, unexpected status code", + err.status + ); + }, + onSuccess: (data) => { + setState({ + // if cluster doesn't support ha, then primary will be disabled. Force into secondary + selectedNodeType: !data.ha ? "secondary" : state.selectedNodeType, + }); + }, + config: { + retry: false, + }, + }); + + const node = nodeData || testData.nodes[0]; + + // #region table data + const columns = useMemo( + () => [ + { + accessorKey: "name", + header: "Name", + enableHiding: false, + enableColumnDragging: false, + size: 150, + }, + { + accessorKey: "status", + header: "Status", + size: 150, + }, + { + accessorKey: "disk", + header: "Disk", + size: 150, + }, + { + accessorKey: "cpu", + header: "CPU", + size: 150, + }, + { + accessorKey: "memory", + header: "Memory", + size: 150, + }, + { + accessorKey: "canDelete", + header: "Delete Pod", + size: 150, + }, + ], + [] + ); + + const mappedPods = useMemo(() => { + return node?.podList?.map((p) => ({ + name: p.metadata.name, + status: p.status.phase, + disk: null, + cpu: null, + memory: null, + canDelete: ( + <> + + + ), + })); + }, [node?.podList?.toString()]); + // #endregion + + return ( +
+ {/* Breadcrumbs */} +

+ + Cluster Nodes + {" "} + / {node?.name} +

+ {/* Node Info */} +
+

+ Node Info +

+
+

Name

+

{node?.name}

+
+
+ {/* Pods table */} +
+

Pods

+ +
+ {/* Troubleshooting */} +
+

+ Troubleshooting +

+
+ {/* Danger Zone */} +
+

+ Danger Zone +

+ +
+
+ ); +}; + +export default HelmVMViewNode; diff --git a/web/tailwind.config.js b/web/tailwind.config.js index 1ea5c980de..d89f3af2a5 100644 --- a/web/tailwind.config.js +++ b/web/tailwind.config.js @@ -13,17 +13,60 @@ module.exports = { "teal-muted-dark": "#577981", "teal-medium": "#097992", gray: { - 100: "#dfdfdf", + 100: "#dedede", 200: "#c4c8ca", 300: "#b3b3b3", + 410: "#9b9b9b", 400: "#959595", 500: "#717171", 600: "#585858", 700: "#4f4f4f", - 800: "#323232" + 800: "#323232", + 900: "#2c2c2c", + }, + blue: { + 50: "#ecf4fe", + 75: "#b3d2fc", + 200: "#65a4f8", + 300: "#4591f7", + 400: "#3066ad", + }, + green: { + 50: "#e7f7f3", + 75: "#9cdfcf", + 100: "#73d2bb", + 200: "#37bf9e", + 300: "#0eb28a", + 400: "#0a7d61", + 500: "#096d54", + }, + indigo: { + 100: "#f0f1ff", + 200: "#c2c7fd", + 300: "#a9b0fd", + 400: "#838efc", + 500: "#6a77fb", + 600: "#4a53b0", + 700: "#414999", }, neutral: { - 700: "#4A4A4A" + 700: "#4A4A4A", + }, + teal: { + 300: "#4db9c0", + 400: "#38a3a8", + }, + pink: { + 50: "#fff0f3", + 100: "#ffc1cf", + 200: "#fea7bc", + 300: "#fe819f", + 400: "#fe678b", + 500: "#b24861", + 600: "#9b3f55", + }, + purple: { + 400: "#7242b0", }, error: "#bc4752", "error-xlight": "#fbedeb", @@ -34,26 +77,26 @@ module.exports = { "warning-bright": "#ec8f39", "info-bright": "#76bbca", "disabled-teal": "#76a6cf", - "dark-neon-green": "#38cc97" + "dark-neon-green": "#38cc97", }, extend: { borderRadius: { xs: "0.125rem", sm: "0.187rem", - variants: ["first", "last"] + variants: ["first", "last"], }, fontFamily: { - sans: ["Open Sans", ...defaultTheme.fontFamily.sans] - } - } + sans: ["Open Sans", ...defaultTheme.fontFamily.sans], + }, + }, }, corePlugins: { - preflight: false + preflight: false, }, plugins: [ plugin(function ({ addVariant }) { addVariant("is-enabled", "&:not([disabled])"); addVariant("is-disabled", "&[disabled]"); - }) - ] + }), + ], }; diff --git a/web/yarn.lock b/web/yarn.lock index d8d7d15bd8..e0d72198b0 100644 --- a/web/yarn.lock +++ b/web/yarn.lock @@ -350,6 +350,13 @@ dependencies: "@babel/types" "^7.18.6" +"@babel/helper-module-imports@^7.16.7": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz#16146307acdc40cc00c3b2c647713076464bdbf0" + integrity sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w== + dependencies: + "@babel/types" "^7.22.15" + "@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.20.11", "@babel/helper-module-transforms@^7.20.2", "@babel/helper-module-transforms@^7.21.0": version "7.21.0" resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.21.0.tgz#89a8f86ad748870e3d024e470b2e8405e869db67" @@ -447,11 +454,21 @@ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz#38d3acb654b4701a9b77fb0615a96f775c3a9e63" integrity sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw== +"@babel/helper-string-parser@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f" + integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw== + "@babel/helper-validator-identifier@^7.16.7", "@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1": version "7.19.1" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2" integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w== +"@babel/helper-validator-identifier@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" + integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== + "@babel/helper-validator-option@^7.18.6", "@babel/helper-validator-option@^7.21.0": version "7.21.0" resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz#8224c7e13ace4bafdc4004da2cf064ef42673180" @@ -1390,6 +1407,13 @@ dependencies: regenerator-runtime "^0.13.4" +"@babel/runtime@^7.23.1", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7": + version "7.23.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.2.tgz#062b0ac103261d68a966c4c7baf2ae3e62ec3885" + integrity sha512-mM8eg4yl5D6i3lu2QKPuPH4FArvJ8KhTofbE7jwMUv9KX5mBvwPAqnV3MlyBNqdp9RyRKP6Yck8TrfYrPvX3bg== + dependencies: + regenerator-runtime "^0.14.0" + "@babel/runtime@^7.3.1": version "7.20.13" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.20.13.tgz#7055ab8a7cff2b8f6058bf6ae45ff84ad2aded4b" @@ -1535,6 +1559,15 @@ "@babel/helper-validator-identifier" "^7.19.1" to-fast-properties "^2.0.0" +"@babel/types@^7.22.15": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.0.tgz#8c1f020c9df0e737e4e247c0619f58c68458aaeb" + integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg== + dependencies: + "@babel/helper-string-parser" "^7.22.5" + "@babel/helper-validator-identifier" "^7.22.20" + to-fast-properties "^2.0.0" + "@base2/pretty-print-object@1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@base2/pretty-print-object/-/pretty-print-object-1.0.1.tgz#371ba8be66d556812dc7fb169ebc3c08378f69d4" @@ -1614,6 +1647,23 @@ resolved "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz" integrity sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw== +"@emotion/babel-plugin@^11.11.0": + version "11.11.0" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.11.0.tgz#c2d872b6a7767a9d176d007f5b31f7d504bb5d6c" + integrity sha512-m4HEDZleaaCH+XgDDsPF15Ht6wTLsgDTeR3WYj9Q/k76JtWhrJjcP4+/XlG8LGT/Rol9qUfOIztXeA84ATpqPQ== + dependencies: + "@babel/helper-module-imports" "^7.16.7" + "@babel/runtime" "^7.18.3" + "@emotion/hash" "^0.9.1" + "@emotion/memoize" "^0.8.1" + "@emotion/serialize" "^1.1.2" + babel-plugin-macros "^3.1.0" + convert-source-map "^1.5.0" + escape-string-regexp "^4.0.0" + find-root "^1.1.0" + source-map "^0.5.7" + stylis "4.2.0" + "@emotion/babel-utils@^0.6.4": version "0.6.10" resolved "https://registry.npmjs.org/@emotion/babel-utils/-/babel-utils-0.6.10.tgz" @@ -1626,11 +1676,27 @@ find-root "^1.1.0" source-map "^0.7.2" +"@emotion/cache@^11.11.0": + version "11.11.0" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.11.0.tgz#809b33ee6b1cb1a625fef7a45bc568ccd9b8f3ff" + integrity sha512-P34z9ssTCBi3e9EI1ZsWpNHcfY1r09ZO0rZbRO2ob3ZQMnFI35jB536qoXbkdesr5EUhYi22anuEJuyxifaqAQ== + dependencies: + "@emotion/memoize" "^0.8.1" + "@emotion/sheet" "^1.2.2" + "@emotion/utils" "^1.2.1" + "@emotion/weak-memoize" "^0.3.1" + stylis "4.2.0" + "@emotion/hash@^0.6.2", "@emotion/hash@^0.6.6": version "0.6.6" resolved "https://registry.npmjs.org/@emotion/hash/-/hash-0.6.6.tgz" integrity sha512-ojhgxzUHZ7am3D2jHkMzPpsBAiB005GF5YU4ea+8DNPybMk01JJUM9V9YRlF/GE95tcOm8DxQvWA2jq19bGalQ== +"@emotion/hash@^0.9.1": + version "0.9.1" + resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.9.1.tgz#4ffb0055f7ef676ebc3a5a91fb621393294e2f43" + integrity sha512-gJB6HLm5rYwSLI6PQa+X1t5CFGrv1J1TWG+sOyMCeKz2ojaj6Fnl/rZEspogG+cvqbt4AE/2eIyD2QfLKTBNlQ== + "@emotion/is-prop-valid@^1.1.0": version "1.1.3" resolved "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.1.3.tgz" @@ -1638,6 +1704,13 @@ dependencies: "@emotion/memoize" "^0.7.4" +"@emotion/is-prop-valid@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-1.2.1.tgz#23116cf1ed18bfeac910ec6436561ecb1a3885cc" + integrity sha512-61Mf7Ufx4aDxx1xlDeOm8aFFigGHE4z+0sKCa+IHCeZKiyP9RLD0Mmx7m8b9/Cf37f7NAvQOOJAbQQGVr5uERw== + dependencies: + "@emotion/memoize" "^0.8.1" + "@emotion/memoize@^0.6.1", "@emotion/memoize@^0.6.6": version "0.6.6" resolved "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.6.6.tgz" @@ -1648,6 +1721,25 @@ resolved "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.7.5.tgz" integrity sha512-igX9a37DR2ZPGYtV6suZ6whr8pTFtyHL3K/oLUotxpSVO2ASaprmAe2Dkq7tBo7CRY7MMDrAa9nuQP9/YG8FxQ== +"@emotion/memoize@^0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.8.1.tgz#c1ddb040429c6d21d38cc945fe75c818cfb68e17" + integrity sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA== + +"@emotion/react@^11.11.1": + version "11.11.1" + resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.11.1.tgz#b2c36afac95b184f73b08da8c214fdf861fa4157" + integrity sha512-5mlW1DquU5HaxjLkfkGN1GA/fvVGdyHURRiX/0FHl2cfIfRxSOfmxEH5YS43edp0OldZrZ+dkBKbngxcNCdZvA== + dependencies: + "@babel/runtime" "^7.18.3" + "@emotion/babel-plugin" "^11.11.0" + "@emotion/cache" "^11.11.0" + "@emotion/serialize" "^1.1.2" + "@emotion/use-insertion-effect-with-fallbacks" "^1.0.1" + "@emotion/utils" "^1.2.1" + "@emotion/weak-memoize" "^0.3.1" + hoist-non-react-statics "^3.3.1" + "@emotion/serialize@^0.9.1": version "0.9.1" resolved "https://registry.npmjs.org/@emotion/serialize/-/serialize-0.9.1.tgz" @@ -1658,6 +1750,34 @@ "@emotion/unitless" "^0.6.7" "@emotion/utils" "^0.8.2" +"@emotion/serialize@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.1.2.tgz#017a6e4c9b8a803bd576ff3d52a0ea6fa5a62b51" + integrity sha512-zR6a/fkFP4EAcCMQtLOhIgpprZOwNmCldtpaISpvz348+DP4Mz8ZoKaGGCQpbzepNIUWbq4w6hNZkwDyKoS+HA== + dependencies: + "@emotion/hash" "^0.9.1" + "@emotion/memoize" "^0.8.1" + "@emotion/unitless" "^0.8.1" + "@emotion/utils" "^1.2.1" + csstype "^3.0.2" + +"@emotion/sheet@^1.2.2": + version "1.2.2" + resolved "https://registry.yarnpkg.com/@emotion/sheet/-/sheet-1.2.2.tgz#d58e788ee27267a14342303e1abb3d508b6d0fec" + integrity sha512-0QBtGvaqtWi+nx6doRwDdBIzhNdZrXUppvTM4dtZZWEGTXL/XE/yJxLMGlDT1Gt+UHH5IX1n+jkXyytE/av7OA== + +"@emotion/styled@^11.11.0": + version "11.11.0" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.11.0.tgz#26b75e1b5a1b7a629d7c0a8b708fbf5a9cdce346" + integrity sha512-hM5Nnvu9P3midq5aaXj4I+lnSfNi7Pmd4EWk1fOZ3pxookaQTNew6bp4JaCBYM4HVFZF9g7UjJmsUmC2JlxOng== + dependencies: + "@babel/runtime" "^7.18.3" + "@emotion/babel-plugin" "^11.11.0" + "@emotion/is-prop-valid" "^1.2.1" + "@emotion/serialize" "^1.1.2" + "@emotion/use-insertion-effect-with-fallbacks" "^1.0.1" + "@emotion/utils" "^1.2.1" + "@emotion/stylis@^0.7.0": version "0.7.1" resolved "https://registry.npmjs.org/@emotion/stylis/-/stylis-0.7.1.tgz" @@ -1678,11 +1798,31 @@ resolved "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz" integrity sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg== +"@emotion/unitless@^0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.1.tgz#182b5a4704ef8ad91bde93f7a860a88fd92c79a3" + integrity sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ== + +"@emotion/use-insertion-effect-with-fallbacks@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.0.1.tgz#08de79f54eb3406f9daaf77c76e35313da963963" + integrity sha512-jT/qyKZ9rzLErtrjGgdkMBn2OP8wl0G3sQlBb3YPryvKHsjvINUhVaPFfP+fpBcOkmrVOVEEHQFJ7nbj2TH2gw== + "@emotion/utils@^0.8.2": version "0.8.2" resolved "https://registry.npmjs.org/@emotion/utils/-/utils-0.8.2.tgz" integrity sha512-rLu3wcBWH4P5q1CGoSSH/i9hrXs7SlbRLkoq9IGuoPYNGQvDJ3pt/wmOM+XgYjIDRMVIdkUWt0RsfzF50JfnCw== +"@emotion/utils@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.2.1.tgz#bbab58465738d31ae4cb3dbb6fc00a5991f755e4" + integrity sha512-Y2tGf3I+XVnajdItskUCn6LX+VUDmP6lTL4fcqsXAv43dnlbZiuW4MWQW38rW/BVWSE7Q/7+XQocmpnRYILUmg== + +"@emotion/weak-memoize@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz#d0fce5d07b0620caa282b5131c297bb60f9d87e6" + integrity sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww== + "@eslint/eslintrc@^1.3.0": version "1.3.0" resolved "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-1.3.0.tgz" @@ -1698,6 +1838,33 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" +"@floating-ui/core@^1.4.2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.5.0.tgz#5c05c60d5ae2d05101c3021c1a2a350ddc027f8c" + integrity sha512-kK1h4m36DQ0UHGj5Ah4db7R0rHemTqqO0QLvUqi1/mUUp3LuAWbWxdxSIf/XsnH9VS6rRVPLJCncjRzUvyCLXg== + dependencies: + "@floating-ui/utils" "^0.1.3" + +"@floating-ui/dom@^1.5.1": + version "1.5.3" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.5.3.tgz#54e50efcb432c06c23cd33de2b575102005436fa" + integrity sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA== + dependencies: + "@floating-ui/core" "^1.4.2" + "@floating-ui/utils" "^0.1.3" + +"@floating-ui/react-dom@^2.0.2": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.0.2.tgz#fab244d64db08e6bed7be4b5fcce65315ef44d20" + integrity sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ== + dependencies: + "@floating-ui/dom" "^1.5.1" + +"@floating-ui/utils@^0.1.3": + version "0.1.6" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.1.6.tgz#22958c042e10b67463997bd6ea7115fe28cbcaf9" + integrity sha512-OfX7E2oUDYxtBvsuS4e/jSn4Q9Qb6DzgeYtsAdkPZ47znpoNsMgZw0+tVijiv3uGNR6dgNlty6r9rzIzHjtd/A== + "@gar/promisify@^1.0.1": version "1.1.3" resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6" @@ -2204,6 +2371,97 @@ call-me-maybe "^1.0.1" glob-to-regexp "^0.3.0" +"@mui/base@5.0.0-beta.20": + version "5.0.0-beta.20" + resolved "https://registry.yarnpkg.com/@mui/base/-/base-5.0.0-beta.20.tgz#14fcdfe0350f2aad06ab6c37c4c91dacaab8f600" + integrity sha512-CS2pUuqxST7ch9VNDCklRYDbJ3rru20Tx7na92QvVVKfu3RL4z/QLuVIc8jYGsdCnauMaeUSlFNLAJNb0yXe6w== + dependencies: + "@babel/runtime" "^7.23.1" + "@floating-ui/react-dom" "^2.0.2" + "@mui/types" "^7.2.6" + "@mui/utils" "^5.14.13" + "@popperjs/core" "^2.11.8" + clsx "^2.0.0" + prop-types "^15.8.1" + +"@mui/core-downloads-tracker@^5.14.14": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.14.14.tgz#a54894e9b4dc908ab2d59eac543219d9018448e6" + integrity sha512-Rw/xKiTOUgXD8hdKqj60aC6QcGprMipG7ne2giK6Mz7b4PlhL/xog9xLeclY3BxsRLkZQ05egFnIEY1CSibTbw== + +"@mui/icons-material@^5.14.14": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/icons-material/-/icons-material-5.14.14.tgz#02d33f51f0b9de238d5c47b0a31ff330144393c4" + integrity sha512-vwuaMsKvI7AWTeYqR8wYbpXijuU8PzMAJWRAq2DDIuOZPxjKyHlr8WQ25+azZYkIXtJ7AqnVb1ZmHdEyB4/kug== + dependencies: + "@babel/runtime" "^7.23.1" + +"@mui/material@^5.14.14": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.14.14.tgz#e47f3992b609002cd57a71f70e829dc2d286028c" + integrity sha512-cAmCwAHFQXxb44kWbVFkhKATN8tACgMsFwrXo8ro6WzYW73U/qsR5AcCiJIhCyYYg+gcftfkmNcpRaV3JjhHCg== + dependencies: + "@babel/runtime" "^7.23.1" + "@mui/base" "5.0.0-beta.20" + "@mui/core-downloads-tracker" "^5.14.14" + "@mui/system" "^5.14.14" + "@mui/types" "^7.2.6" + "@mui/utils" "^5.14.13" + "@types/react-transition-group" "^4.4.7" + clsx "^2.0.0" + csstype "^3.1.2" + prop-types "^15.8.1" + react-is "^18.2.0" + react-transition-group "^4.4.5" + +"@mui/private-theming@^5.14.14": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.14.14.tgz#035dde1eb30c896c69a12b7dee1dce3a323c66e9" + integrity sha512-n77au3CQj9uu16hak2Y+rvbGSBaJKxziG/gEbOLVGrAuqZ+ycVSkorCfN6Y/4XgYOpG/xvmuiY3JwhAEOzY3iA== + dependencies: + "@babel/runtime" "^7.23.1" + "@mui/utils" "^5.14.13" + prop-types "^15.8.1" + +"@mui/styled-engine@^5.14.13": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.14.14.tgz#b0ededf531fff1ef110f7b263c2d3d95a0b8ec9a" + integrity sha512-sF3DS2PVG+cFWvkVHQQaGFpL1h6gSwOW3L91pdxPLQDHDZ5mZ/X0SlXU5XA+WjypoysG4urdAQC7CH/BRvUiqg== + dependencies: + "@babel/runtime" "^7.23.1" + "@emotion/cache" "^11.11.0" + csstype "^3.1.2" + prop-types "^15.8.1" + +"@mui/system@^5.14.14": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.14.14.tgz#f33327e74230523169107ace960e8bb51cbdbab7" + integrity sha512-y4InFmCgGGWXnz+iK4jRTWVikY0HgYnABjz4wgiUgEa2W1H8M4ow+27BegExUWPkj4TWthQ2qG9FOGSMtI+PKA== + dependencies: + "@babel/runtime" "^7.23.1" + "@mui/private-theming" "^5.14.14" + "@mui/styled-engine" "^5.14.13" + "@mui/types" "^7.2.6" + "@mui/utils" "^5.14.13" + clsx "^2.0.0" + csstype "^3.1.2" + prop-types "^15.8.1" + +"@mui/types@^7.2.6": + version "7.2.6" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.6.tgz#d72b9e9eb0032e107e76033932d65c3f731d2608" + integrity sha512-7sjLQrUmBwufm/M7jw/quNiPK/oor2+pGUQP2CULRcFCArYTq78oJ3D5esTaL0UMkXKJvDqXn6Ike69yAOBQng== + +"@mui/utils@^5.14.13": + version "5.14.14" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.14.14.tgz#7b2a0bcfb44c3376fc81f85500f9bd01706682ac" + integrity sha512-3AKp8uksje5sRfVrtgG9Q/2TBsHWVBUtA0NaXliZqGcXo8J+A+Agp0qUW2rJ+ivgPWTCCubz9FZVT2IQZ3bGsw== + dependencies: + "@babel/runtime" "^7.23.1" + "@types/prop-types" "^15.7.7" + prop-types "^15.8.1" + react-is "^18.2.0" + "@nicolo-ribaudo/chokidar-2@2.1.8-no-fsevents.3": version "2.1.8-no-fsevents.3" resolved "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz" @@ -2345,6 +2603,11 @@ resolved "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz" integrity sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g== +"@popperjs/core@^2.11.8": + version "2.11.8" + resolved "https://registry.yarnpkg.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" + integrity sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A== + "@remix-run/router@1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.5.0.tgz#57618e57942a5f0131374a9fdb0167e25a117fdc" @@ -3661,7 +3924,7 @@ regenerator-runtime "^0.13.7" resolve-from "^5.0.0" -"@tanstack/match-sorter-utils@^8.7.0": +"@tanstack/match-sorter-utils@8.8.4", "@tanstack/match-sorter-utils@^8.7.0": version "8.8.4" resolved "https://registry.yarnpkg.com/@tanstack/match-sorter-utils/-/match-sorter-utils-8.8.4.tgz#0b2864d8b7bac06a9f84cb903d405852cc40a457" integrity sha512-rKH8LjZiszWEvmi01NR72QWZ8m4xmXre0OOwlRGnjU01Eqz/QnN+cqpty2PJ0efHblq09+KilvyR7lsbzmXVEw== @@ -3690,6 +3953,30 @@ "@tanstack/query-core" "4.36.1" use-sync-external-store "^1.2.0" +"@tanstack/react-table@8.10.7": + version "8.10.7" + resolved "https://registry.yarnpkg.com/@tanstack/react-table/-/react-table-8.10.7.tgz#733f4bee8cf5aa19582f944dd0fd3224b21e8c94" + integrity sha512-bXhjA7xsTcsW8JPTTYlUg/FuBpn8MNjiEPhkNhIGCUR6iRQM2+WEco4OBpvDeVcR9SE+bmWLzdfiY7bCbCSVuA== + dependencies: + "@tanstack/table-core" "8.10.7" + +"@tanstack/react-virtual@3.0.0-beta.65": + version "3.0.0-beta.65" + resolved "https://registry.yarnpkg.com/@tanstack/react-virtual/-/react-virtual-3.0.0-beta.65.tgz#a29a10c761afd00c8000dc38adf60088656e0e62" + integrity sha512-Q21cUoE0C8Oyzy3RAMV+u4BuB+RwIf2/oQRCWksmIBp1PqLEtvXhAldh7v/wUt7WKEkislKDICZAvbYYs7EAyQ== + dependencies: + "@tanstack/virtual-core" "3.0.0-beta.65" + +"@tanstack/table-core@8.10.7": + version "8.10.7" + resolved "https://registry.yarnpkg.com/@tanstack/table-core/-/table-core-8.10.7.tgz#577e8a635048875de4c9d6d6a3c21d26ff9f9d08" + integrity sha512-KQk5OMg5OH6rmbHZxuNROvdI+hKDIUxANaHlV+dPlNN7ED3qYQ/WkpY2qlXww1SIdeMlkIhpN/2L00rof0fXFw== + +"@tanstack/virtual-core@3.0.0-beta.65": + version "3.0.0-beta.65" + resolved "https://registry.yarnpkg.com/@tanstack/virtual-core/-/virtual-core-3.0.0-beta.65.tgz#fac199321db7787db9463082903dca23c0850c5c" + integrity sha512-ObP2pvXBdbivinr7BWDbGqYt4TK8wNzYsOWio+qBkDx5AJFuvqcdJxcCCYnv4dzVTe5ELA1MT4tkt8NB/tnEdA== + "@testing-library/dom@^8.3.0": version "8.19.0" resolved "https://registry.yarnpkg.com/@testing-library/dom/-/dom-8.19.0.tgz#bd3f83c217ebac16694329e413d9ad5fdcfd785f" @@ -4183,6 +4470,11 @@ resolved "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz" integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== +"@types/prop-types@^15.7.7": + version "15.7.8" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.8.tgz#805eae6e8f41bd19e88917d2ea200dc992f405d3" + integrity sha512-kMpQpfZKSCBqltAJwskgePRaYRFukDkm1oItcAbC3gNELR20XIBcN9VRgg4+m8DKsTfkWeA4m4Imp4DDuWy7FQ== + "@types/q@1.0.7": version "1.0.7" resolved "https://registry.npmjs.org/@types/q/-/q-1.0.7.tgz" @@ -4252,6 +4544,13 @@ dependencies: "@types/react" "*" +"@types/react-transition-group@^4.4.7": + version "4.4.7" + resolved "https://registry.yarnpkg.com/@types/react-transition-group/-/react-transition-group-4.4.7.tgz#bf69f269d74aa78b99097673ca6dd6824a68ef1c" + integrity sha512-ICCyBl5mvyqYp8Qeq9B5G/fyBSRC0zx3XM3sCC6KkcMsNeAHqXBKkmat4GqdJET5jtYUpZXrxI5flve5qhi2Eg== + dependencies: + "@types/react" "*" + "@types/react@*": version "18.0.20" resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.20.tgz#e4c36be3a55eb5b456ecf501bd4a00fd4fd0c9ab" @@ -5657,7 +5956,7 @@ babel-plugin-macros@^2.0.0: cosmiconfig "^6.0.0" resolve "^1.12.0" -babel-plugin-macros@^3.0.1: +babel-plugin-macros@^3.0.1, babel-plugin-macros@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz#9ef6dc74deb934b4db344dc973ee851d148c50c1" integrity sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg== @@ -6801,6 +7100,11 @@ clsx@^1.0.4: resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.2.1.tgz#0ddc4a20a549b59c93a4116bb26f5294ca17dc12" integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg== +clsx@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.0.0.tgz#12658f3fd98fafe62075595a5c30e43d18f3d00b" + integrity sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q== + co@^4.6.0: version "4.6.0" resolved "https://registry.npmjs.org/co/-/co-4.6.0.tgz" @@ -7361,6 +7665,11 @@ csstype@^3.0.2: resolved "https://registry.npmjs.org/csstype/-/csstype-3.0.11.tgz" integrity sha512-sa6P2wJ+CAbgyy4KFssIb/JNMLxFvKF1pCYCSXS8ZMuqZnMsrxqI2E5sPyoTpxoPU/gVZMzr2zjOfg8GIZOMsw== +csstype@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b" + integrity sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ== + currently-unhandled@^0.4.1: version "0.4.1" resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" @@ -7816,6 +8125,14 @@ dom-helpers@^3.4.0: dependencies: "@babel/runtime" "^7.1.2" +dom-helpers@^5.0.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/dom-helpers/-/dom-helpers-5.2.1.tgz#d9400536b2bf8225ad98fe052e029451ac40e902" + integrity sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA== + dependencies: + "@babel/runtime" "^7.8.7" + csstype "^3.0.2" + dom-serializer@^1.0.1: version "1.4.1" resolved "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz" @@ -9768,6 +10085,11 @@ he@^1.2.0: resolved "https://registry.npmjs.org/he/-/he-1.2.0.tgz" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== +highlight-words@1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/highlight-words/-/highlight-words-1.2.2.tgz#9875b75d11814d7356b24f23feeb7d77761fa867" + integrity sha512-Mf4xfPXYm8Ay1wTibCrHpNWeR2nUMynMVFkXCi4mbl+TEgmNOe+I4hV7W3OCZcSvzGL6kupaqpfHOemliMTGxQ== + highlight.js@^10.4.1, highlight.js@~10.7.0: version "10.7.3" resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" @@ -9799,7 +10121,7 @@ hoek@4.2.1: resolved "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz" integrity sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA== -hoist-non-react-statics@^3.0.0, hoist-non-react-statics@^3.1.0, hoist-non-react-statics@^3.3.0: +hoist-non-react-statics@^3.0.0, hoist-non-react-statics@^3.1.0, hoist-non-react-statics@^3.3.0, hoist-non-react-statics@^3.3.1: version "3.3.2" resolved "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz" integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== @@ -11756,6 +12078,16 @@ marked@4.0.12: resolved "https://registry.npmjs.org/marked/-/marked-4.0.12.tgz" integrity sha512-hgibXWrEDNBWgGiK18j/4lkS6ihTe9sxtV4Q1OQppb/0zzyPSzoFANBa5MfsG/zgsWklmNnhm0XACZOH/0HBiQ== +material-react-table@^1.15.1: + version "1.15.1" + resolved "https://registry.yarnpkg.com/material-react-table/-/material-react-table-1.15.1.tgz#c2bdfdd9c9636acbb2e8ffd5553a82395a2d9f4a" + integrity sha512-TXidRV7lGtCV5G/ON9Y38TztRcmpKFodFmyTCjvlKXCl5/9X+KY4waP8U0l16FFslg1f7HGWhfkqV5OfUfEIoA== + dependencies: + "@tanstack/match-sorter-utils" "8.8.4" + "@tanstack/react-table" "8.10.7" + "@tanstack/react-virtual" "3.0.0-beta.65" + highlight-words "1.2.2" + md5.js@^1.3.4: version "1.3.5" resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" @@ -13869,7 +14201,7 @@ react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== -react-is@^18.0.0: +react-is@^18.0.0, react-is@^18.2.0: version "18.2.0" resolved "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz" integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== @@ -13997,6 +14329,16 @@ react-transition-group@^2.2.1: prop-types "^15.6.2" react-lifecycles-compat "^3.0.4" +react-transition-group@^4.4.5: + version "4.4.5" + resolved "https://registry.yarnpkg.com/react-transition-group/-/react-transition-group-4.4.5.tgz#e53d4e3f3344da8521489fbef8f2581d42becdd1" + integrity sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g== + dependencies: + "@babel/runtime" "^7.5.5" + dom-helpers "^5.0.1" + loose-envify "^1.4.0" + prop-types "^15.6.2" + react-vis@^1.11.7: version "1.11.7" resolved "https://registry.npmjs.org/react-vis/-/react-vis-1.11.7.tgz" @@ -14193,6 +14535,11 @@ regenerator-runtime@^0.13.11, regenerator-runtime@^0.13.2, regenerator-runtime@^ resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== +regenerator-runtime@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" + integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== + regenerator-transform@^0.15.1: version "0.15.1" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.1.tgz#f6c4e99fc1b4591f780db2586328e4d9a9d8dc56" @@ -15485,6 +15832,11 @@ stylis-rule-sheet@^0.0.10: resolved "https://registry.npmjs.org/stylis-rule-sheet/-/stylis-rule-sheet-0.0.10.tgz" integrity sha512-nTbZoaqoBnmK+ptANthb10ZRZOGC+EmTLLUxeYIuHNkEKcmKgXX1XWKkUBT2Ac4es3NybooPe0SmvKdhKJZAuw== +stylis@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.2.0.tgz#79daee0208964c8fe695a42fcffcac633a211a51" + integrity sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw== + stylis@^3.5.0: version "3.5.4" resolved "https://registry.npmjs.org/stylis/-/stylis-3.5.4.tgz" From ccfeafb8d4ef1b8299d75bfd06a478843b685abf Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Wed, 18 Oct 2023 16:36:35 -0600 Subject: [PATCH 05/31] update tanstack query imports --- web/src/components/apps/HelmVMClusterManagement.tsx | 6 +++--- web/src/components/apps/HelmVMViewNode.jsx | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index ed0fcfa3f2..45971a563f 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -2,7 +2,7 @@ import classNames from "classnames"; import MaterialReactTable from "material-react-table"; import React, { ChangeEvent, useMemo, useReducer, useState } from "react"; import Modal from "react-modal"; -import { useQuery } from "react-query"; +import { useQuery } from "@tanstack/react-query"; import { Link, useParams } from "react-router-dom"; import { KotsPageTitle } from "@components/Head"; @@ -165,8 +165,8 @@ const HelmVMClusterManagement = ({ data: nodesData, isInitialLoading: nodesLoading, error: nodesError, - } = useQuery({ - queryKey: "helmVmNodes", + } = useQuery({ + queryKey: ["helmVmNodes"], queryFn: async () => { const res = await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { headers: { diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx index a9b8bae245..15fa3ee4d7 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -1,6 +1,6 @@ import { MaterialReactTable } from "material-react-table"; import React, { useMemo } from "react"; -import { useQuery } from "react-query"; +import { useQuery } from "@tanstack/react-query"; import { Link, useParams } from "react-router-dom"; const testData = { From 7a9f91436025f75519623d7478411f3c5128a513 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Wed, 18 Oct 2023 16:52:40 -0600 Subject: [PATCH 06/31] shorter embedded-cluster join commands (#4075) * shorter commands wip * actually return the token * actually return token * return entire commands * handle error * fix lolgic * imports --- migrations/tables/k0s_tokens.yaml | 21 ++++ pkg/handlers/handlers.go | 7 +- pkg/handlers/helmvm_node_join_command.go | 85 ++++++++++++-- pkg/handlers/interface.go | 2 +- pkg/handlers/mock/mock.go | 6 +- pkg/helmvm/node_join.go | 136 +++++++++++++++-------- pkg/helmvm/util.go | 9 ++ pkg/store/kotsstore/k0s_store.go | 68 ++++++++++++ 8 files changed, 272 insertions(+), 62 deletions(-) create mode 100644 migrations/tables/k0s_tokens.yaml create mode 100644 pkg/store/kotsstore/k0s_store.go diff --git a/migrations/tables/k0s_tokens.yaml b/migrations/tables/k0s_tokens.yaml new file mode 100644 index 0000000000..1ae6760972 --- /dev/null +++ b/migrations/tables/k0s_tokens.yaml @@ -0,0 +1,21 @@ +apiVersion: schemas.schemahero.io/v1alpha4 +kind: Table +metadata: + name: k0s-tokens +spec: + name: k0s_tokens + requires: [] + schema: + rqlite: + strict: true + primaryKey: + - token + columns: + - name: token + type: text + constraints: + notNull: true + - name: roles + type: text + constraints: + notNull: true diff --git a/pkg/handlers/handlers.go b/pkg/handlers/handlers.go index 1ddde1d543..8ee55ea376 100644 --- a/pkg/handlers/handlers.go +++ b/pkg/handlers/handlers.go @@ -277,8 +277,8 @@ func RegisterSessionAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOT // HelmVM r.Name("HelmVM").Path("/api/v1/helmvm").HandlerFunc(NotImplemented) - r.Name("GenerateHelmVMNodeJoinCommand").Path("/api/v1/helmvm/generate-node-join-command").Methods("POST"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateHelmVMNodeJoinCommand)) + r.Name("GenerateK0sNodeJoinCommand").Path("/api/v1/helmvm/generate-node-join-command").Methods("POST"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateK0sNodeJoinCommand)) r.Name("DrainHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}/drain").Methods("POST"). HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DrainHelmVMNode)) r.Name("DeleteHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}").Methods("DELETE"). @@ -355,6 +355,9 @@ func RegisterUnauthenticatedRoutes(handler *Handler, kotsStore store.Store, debu // These handlers should be called by the application only. loggingRouter.Path("/license/v1/license").Methods("GET").HandlerFunc(handler.GetPlatformLicenseCompatibility) loggingRouter.Path("/api/v1/app/custom-metrics").Methods("POST").HandlerFunc(handler.GetSendCustomAppMetricsHandler(kotsStore)) + + // This handler requires a valid token in the query + loggingRouter.Path("/api/v1/embedded-cluster/join").Methods("GET").HandlerFunc(handler.GetK0sNodeJoinCommand) } func RegisterLicenseIDAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOTSHandler) { diff --git a/pkg/handlers/helmvm_node_join_command.go b/pkg/handlers/helmvm_node_join_command.go index 6b8abe4654..b4d6a0da4f 100644 --- a/pkg/handlers/helmvm_node_join_command.go +++ b/pkg/handlers/helmvm_node_join_command.go @@ -2,54 +2,115 @@ package handlers import ( "encoding/json" + "fmt" "net/http" - "time" "github.com/replicatedhq/kots/pkg/helmvm" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" + "github.com/replicatedhq/kots/pkg/store/kotsstore" ) -type GenerateHelmVMNodeJoinCommandResponse struct { +type GenerateK0sNodeJoinCommandResponse struct { Command []string `json:"command"` - Expiry string `json:"expiry"` +} + +type GetK0sNodeJoinCommandResponse struct { + ClusterID string `json:"clusterID"` + K0sJoinCommand string `json:"k0sJoinCommand"` + K0sToken string `json:"k0sToken"` } type GenerateHelmVMNodeJoinCommandRequest struct { Roles []string `json:"roles"` } -func (h *Handler) GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) { +func (h *Handler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { generateHelmVMNodeJoinCommandRequest := GenerateHelmVMNodeJoinCommandRequest{} if err := json.NewDecoder(r.Body).Decode(&generateHelmVMNodeJoinCommandRequest); err != nil { - logger.Error(err) + logger.Error(fmt.Errorf("failed to decode request body: %w", err)) w.WriteHeader(http.StatusBadRequest) return } + store := kotsstore.StoreFromEnv() + token, err := store.SetK0sInstallCommandRoles(generateHelmVMNodeJoinCommandRequest.Roles) + if err != nil { + logger.Error(fmt.Errorf("failed to set k0s install command roles: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + + client, err := k8sutil.GetClientset() + if err != nil { + logger.Error(fmt.Errorf("failed to get clientset: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + nodeJoinCommand, err := helmvm.GenerateAddNodeCommand(r.Context(), client, token) + if err != nil { + logger.Error(fmt.Errorf("failed to generate add node command: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + + JSON(w, http.StatusOK, GenerateK0sNodeJoinCommandResponse{ + Command: []string{nodeJoinCommand}, + }) +} + +// this function relies on the token being valid for authentication +func (h *Handler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { + // read query string, ensure that the token is valid + token := r.URL.Query().Get("token") + store := kotsstore.StoreFromEnv() + roles, err := store.GetK0sInstallCommandRoles(token) + if err != nil { + logger.Error(fmt.Errorf("failed to get k0s install command roles: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + + // use roles to generate join token etc client, err := k8sutil.GetClientset() if err != nil { - logger.Error(err) + logger.Error(fmt.Errorf("failed to get clientset: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } k0sRole := "worker" - for _, role := range generateHelmVMNodeJoinCommandRequest.Roles { + for _, role := range roles { if role == "controller" { k0sRole = "controller" break } } - command, expiry, err := helmvm.GenerateAddNodeCommand(r.Context(), client, k0sRole) + k0sToken, err := helmvm.GenerateAddNodeToken(r.Context(), client, k0sRole) + if err != nil { + logger.Error(fmt.Errorf("failed to generate add node token: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + + k0sJoinCommand, err := helmvm.GenerateK0sJoinCommand(r.Context(), client, roles) if err != nil { - logger.Error(err) + logger.Error(fmt.Errorf("failed to generate k0s join command: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } - JSON(w, http.StatusOK, GenerateHelmVMNodeJoinCommandResponse{ - Command: command, - Expiry: expiry.Format(time.RFC3339), + + clusterID, err := helmvm.ClusterID(client) + if err != nil { + logger.Error(fmt.Errorf("failed to get cluster id: %w", err)) + w.WriteHeader(http.StatusInternalServerError) + return + } + + JSON(w, http.StatusOK, GetK0sNodeJoinCommandResponse{ + ClusterID: clusterID, + K0sJoinCommand: k0sJoinCommand, + K0sToken: k0sToken, }) } diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go index d98dd07e18..0a1fa800fe 100644 --- a/pkg/handlers/interface.go +++ b/pkg/handlers/interface.go @@ -139,7 +139,7 @@ type KOTSHandler interface { GetKurlNodes(w http.ResponseWriter, r *http.Request) // HelmVM - GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) + GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) DrainHelmVMNode(w http.ResponseWriter, r *http.Request) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go index c186b8a9ac..0898775f70 100644 --- a/pkg/handlers/mock/mock.go +++ b/pkg/handlers/mock/mock.go @@ -443,15 +443,15 @@ func (mr *MockKOTSHandlerMockRecorder) GarbageCollectImages(w, r interface{}) *g } // GenerateHelmVMNodeJoinCommand mocks base method. -func (m *MockKOTSHandler) GenerateHelmVMNodeJoinCommand(w http.ResponseWriter, r *http.Request) { +func (m *MockKOTSHandler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GenerateHelmVMNodeJoinCommand", w, r) + m.ctrl.Call(m, "GenerateK0sNodeJoinCommand", w, r) } // GenerateHelmVMNodeJoinCommand indicates an expected call of GenerateHelmVMNodeJoinCommand. func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommand(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateHelmVMNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateHelmVMNodeJoinCommand), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateK0sNodeJoinCommand), w, r) } // GenerateKurlNodeJoinCommandMaster mocks base method. diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index 4bbf1e197c..aa6544f044 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -3,63 +3,56 @@ package helmvm import ( "context" "fmt" + "os" "strings" "sync" "time" - "github.com/google/uuid" corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) -type joinCommandEntry struct { - Command []string +type joinTokenEntry struct { + Token string Creation *time.Time Mut sync.Mutex } -var joinCommandMapMut = sync.Mutex{} -var joinCommandMap = map[string]*joinCommandEntry{} +var joinTokenMapMut = sync.Mutex{} +var joinTokenMap = map[string]*joinTokenEntry{} -// GenerateAddNodeCommand will generate the HelmVM node add command for a primary or secondary node +// GenerateAddNodeToken will generate the HelmVM node add command for a primary or secondary node // join commands will last for 24 hours, and will be cached for 1 hour after first generation -func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, nodeRole string) ([]string, *time.Time, error) { - // get the joinCommand struct entry for this node role - joinCommandMapMut.Lock() - if _, ok := joinCommandMap[nodeRole]; !ok { - joinCommandMap[nodeRole] = &joinCommandEntry{} +func GenerateAddNodeToken(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) { + // get the joinToken struct entry for this node role + joinTokenMapMut.Lock() + if _, ok := joinTokenMap[nodeRole]; !ok { + joinTokenMap[nodeRole] = &joinTokenEntry{} } - joinCommand := joinCommandMap[nodeRole] - joinCommandMapMut.Unlock() + joinToken := joinTokenMap[nodeRole] + joinTokenMapMut.Unlock() - // lock the joinCommand struct entry - joinCommand.Mut.Lock() - defer joinCommand.Mut.Unlock() + // lock the joinToken struct entry + joinToken.Mut.Lock() + defer joinToken.Mut.Unlock() - // if the joinCommand has been generated in the past hour, return it - if joinCommand.Creation != nil && time.Now().Before(joinCommand.Creation.Add(time.Hour)) { - expiry := joinCommand.Creation.Add(time.Hour * 24) - return joinCommand.Command, &expiry, nil + // if the joinToken has been generated in the past hour, return it + if joinToken.Creation != nil && time.Now().Before(joinToken.Creation.Add(time.Hour)) { + return joinToken.Token, nil } newToken, err := runAddNodeCommandPod(ctx, client, nodeRole) if err != nil { - return nil, nil, fmt.Errorf("failed to run add node command pod: %w", err) - } - - newCmd, err := generateAddNodeCommand(ctx, client, nodeRole, newToken) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate add node command: %w", err) + return "", fmt.Errorf("failed to run add node command pod: %w", err) } now := time.Now() - joinCommand.Command = newCmd - joinCommand.Creation = &now + joinToken.Token = newToken + joinToken.Creation = &now - expiry := now.Add(time.Hour * 24) - return newCmd, &expiry, nil + return newToken, nil } // run a pod that will generate the add node token @@ -213,32 +206,87 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node return string(podLogs), nil } -// generate the add node command from the join token, the node roles, and info from the embedded-cluster-config configmap -func generateAddNodeCommand(ctx context.Context, client kubernetes.Interface, nodeRole string, token string) ([]string, error) { +// GenerateAddNodeCommand returns the command a user should run to add a node with the provided token +// the command will be of the form 'helmvm node join ip:port UUID' +func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, token string) (string, error) { cm, err := ReadConfigMap(client) if err != nil { - return nil, fmt.Errorf("failed to read configmap: %w", err) + return "", fmt.Errorf("failed to read configmap: %w", err) } - clusterID := cm.Data["embedded-cluster-id"] binaryName := cm.Data["embedded-binary-name"] - clusterUUID := uuid.UUID{} - err = clusterUUID.UnmarshalText([]byte(clusterID)) + // get the IP of a controller node + nodeIP, err := getControllerNodeIP(ctx, client) + if err != nil { + return "", fmt.Errorf("failed to get controller node IP: %w", err) + } + + // get the port of the 'admin-console' service + port, err := getAdminConsolePort(ctx, client) + if err != nil { + return "", fmt.Errorf("failed to get admin console port: %w", err) + } + + return fmt.Sprintf("%s node join %s:%d %s", binaryName, nodeIP, port, token), nil +} + +// GenerateK0sJoinCommand returns the k0s node join command, without the token but with all other required flags +// (including node labels generated from the roles etc) +func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, roles []string) (string, error) { + k0sRole := "worker" + for _, role := range roles { + if role == "controller" { + k0sRole = "controller" + } + } + + cmd := []string{"/usr/local/bin/k0s", "install", k0sRole, "--force"} + if k0sRole == "controller" { + cmd = append(cmd, "--enable-worker") + } + + return strings.Join(cmd, " "), nil +} + +// gets the port of the 'admin-console' service +func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) { + svc, err := client.CoreV1().Services(os.Getenv("POD_NAMESPACE")).Get(ctx, "admin-console", metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("failed to unmarshal cluster id %s: %w", clusterID, err) + return -1, fmt.Errorf("failed to get admin-console service: %w", err) } - fullToken := joinToken{ - ClusterID: clusterUUID, - Token: token, - Role: nodeRole, + for _, port := range svc.Spec.Ports { + if port.Name == "http" { + return port.NodePort, nil + } } + return -1, fmt.Errorf("did not find port 'http' in service 'admin-console'") +} - b64token, err := fullToken.Encode() +// getControllerNodeIP gets the IP of a healthy controller node +func getControllerNodeIP(ctx context.Context, client kubernetes.Interface) (string, error) { + nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { - return nil, fmt.Errorf("unable to encode token: %w", err) + return "", fmt.Errorf("failed to list nodes: %w", err) + } + + for _, node := range nodes.Items { + if cp, ok := node.Labels["node-role.kubernetes.io/control-plane"]; !ok || cp != "true" { + continue + } + + for _, condition := range node.Status.Conditions { + if condition.Type == "Ready" && condition.Status == "True" { + for _, address := range node.Status.Addresses { + if address.Type == "InternalIP" { + return address.Address, nil + } + } + } + } + } - return []string{binaryName + " node join", b64token}, nil + return "", fmt.Errorf("failed to find healthy controller node") } diff --git a/pkg/helmvm/util.go b/pkg/helmvm/util.go index ce358abab0..65d93f0c9f 100644 --- a/pkg/helmvm/util.go +++ b/pkg/helmvm/util.go @@ -43,3 +43,12 @@ func IsHelmVM(clientset kubernetes.Interface) (bool, error) { func IsHA(clientset kubernetes.Interface) (bool, error) { return true, nil } + +func ClusterID(client kubernetes.Interface) (string, error) { + configMap, err := ReadConfigMap(client) + if err != nil { + return "", fmt.Errorf("failed to read configmap: %w", err) + } + + return configMap.Data["embedded-cluster-id"], nil +} diff --git a/pkg/store/kotsstore/k0s_store.go b/pkg/store/kotsstore/k0s_store.go new file mode 100644 index 0000000000..61c717dd0b --- /dev/null +++ b/pkg/store/kotsstore/k0s_store.go @@ -0,0 +1,68 @@ +package kotsstore + +import ( + "encoding/json" + "fmt" + "github.com/google/uuid" + "github.com/replicatedhq/kots/pkg/persistence" + "github.com/rqlite/gorqlite" +) + +func (s *KOTSStore) SetK0sInstallCommandRoles(roles []string) (string, error) { + db := persistence.MustGetDBSession() + + installID := uuid.New().String() + + query := `delete from k0s_tokens where token = ?` + wr, err := db.WriteOneParameterized(gorqlite.ParameterizedStatement{ + Query: query, + Arguments: []interface{}{installID}, + }) + if err != nil { + return "", fmt.Errorf("delete k0s join token: %v: %v", err, wr.Err) + } + + jsonRoles, err := json.Marshal(roles) + if err != nil { + return "", fmt.Errorf("failed to marshal roles: %w", err) + } + + query = `insert into k0s_tokens (token, roles) values (?, ?)` + wr, err = db.WriteOneParameterized(gorqlite.ParameterizedStatement{ + Query: query, + Arguments: []interface{}{installID, string(jsonRoles)}, + }) + if err != nil { + return "", fmt.Errorf("insert k0s join token: %v: %v", err, wr.Err) + } + + return installID, nil +} + +func (s *KOTSStore) GetK0sInstallCommandRoles(token string) ([]string, error) { + db := persistence.MustGetDBSession() + query := `select roles from k0s_tokens where token = ?` + rows, err := db.QueryOneParameterized(gorqlite.ParameterizedStatement{ + Query: query, + Arguments: []interface{}{token}, + }) + if err != nil { + return nil, fmt.Errorf("failed to query: %v: %v", err, rows.Err) + } + if !rows.Next() { + return nil, ErrNotFound + } + + rolesStr := "" + if err = rows.Scan(&rolesStr); err != nil { + return nil, fmt.Errorf("failed to scan roles: %w", err) + } + + rolesArr := []string{} + err = json.Unmarshal([]byte(rolesStr), &rolesArr) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal roles: %w", err) + } + + return rolesArr, nil +} From e9f65d52e69e382fff6dfd4ad010a8dadca87e03 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:31:55 -0600 Subject: [PATCH 07/31] update percentages, add pods, fix link, show expiry (#4077) --- .../apps/HelmVMClusterManagement.tsx | 79 +++++++++---------- 1 file changed, 36 insertions(+), 43 deletions(-) diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index 45971a563f..8b5228d93c 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -248,26 +248,6 @@ const HelmVMClusterManagement = ({ }, enabled: selectedNodeTypes.length > 0, }); - - // TODO: import useMutation - // const { - // mutate: addNodeType, - // isLoading: addNodeTypeLoading, - // error: addNodeTypeError, - // } = useMutation({ - // mutationFn: async () => { - // return ( - // await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { - // headers: { - // "Content-Type": "application/json", - // Accept: "application/json", - // }, - // credentials: "include", - // method: "POST", - // }) - // ).json(); - // }, - // }); // #endregion const onAddNodeClick = () => { @@ -323,11 +303,6 @@ const HelmVMClusterManagement = ({ header: "Status", size: 150, }, - { - accessorKey: "disk", - header: "Disk", - size: 150, - }, { accessorKey: "cpu", header: "CPU", @@ -338,6 +313,11 @@ const HelmVMClusterManagement = ({ header: "Memory", size: 150, }, + { + accessorKey: "pods", + header: "Pods", + size: 150, + }, { accessorKey: "pause", header: "Pause", @@ -352,31 +332,39 @@ const HelmVMClusterManagement = ({ [] ); + const calculateUtilization = (capacity: number, available: number) => { + const used = capacity - available; + return Math.round((used / capacity) * 100); + }; + const mappedNodes = useMemo(() => { return (nodesData?.nodes || testData.nodes).map((n) => ({ name: slug ? ( + > + {n.name} + ) : ( n.name ), roles: (
{n.labels.map((l) => ( - + {l} ))}
), status: n.isReady ? "Ready" : "Not Ready", - disk: n.conditions.diskPressure ? "Disk Pressure" : "No Disk Pressure", - cpu: n.conditions.pidPressure ? "CPU Pressure" : "No CPU Pressure", - memory: n.conditions.memoryPressure - ? "Memory Pressure" - : "No Memory Pressure", + cpu: `${calculateUtilization(n.cpu.capacity, n.cpu.available)}%`, + memory: `${calculateUtilization(n.memory.capacity, n.memory.available)}%`, + pods: `${n.pods.capacity - n.pods.available} / ${n.pods.capacity}`, pause: ( <> @@ -536,7 +524,7 @@ const HelmVMClusterManagement = ({ ))}
- {generateAddNodeCommandLoading && ( + {selectedNodeTypes.length > 0 && generateAddNodeCommandLoading && (

Generating command...

@@ -547,16 +535,21 @@ const HelmVMClusterManagement = ({

)} {!generateAddNodeCommandLoading && generateAddNodeCommand?.command && ( - Copied! - } - > - {generateAddNodeCommand?.command || ""} - + <> + Copied! + } + > + {generateAddNodeCommand?.command} + +

+ Command expires: {generateAddNodeCommand?.expiry} +

+ )}
{/* buttons */} From 92106b981d90f2ab88ab199c46e8bfd1ddadfaef Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:41:05 -0600 Subject: [PATCH 08/31] fix routing, add missing slash (#4079) --- web/src/Root.tsx | 5 +---- web/src/components/apps/HelmVMClusterManagement.tsx | 6 +++--- web/src/components/apps/HelmVMViewNode.jsx | 4 ++-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 05855d5abc..866fb6fd44 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -607,10 +607,7 @@ const Root = () => { /> {/* )} */} {/* {state.adminConsoleMetadata?.isHelmVM && ( */} - } - /> + } /> {/* )} */} { return (nodesData?.nodes || testData.nodes).map((n) => ({ name: slug ? ( + n.name + ) : ( {n.name} - ) : ( - n.name ), roles: (
diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx index 15fa3ee4d7..e30b0a9148 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -270,7 +270,7 @@ const testData = { }; const HelmVMViewNode = () => { - const { slug, nodeName } = useParams(); + const { nodeName } = useParams(); const { data: nodeData } = useQuery({ queryKey: ["helmVmNode", nodeName], queryFn: async ({ queryKey }) => { @@ -368,7 +368,7 @@ const HelmVMViewNode = () => { {/* Breadcrumbs */}

Cluster Nodes From 0bfbb3806c60449521a39ba3e7b1c5b2ee47d2b7 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 10:51:01 -0600 Subject: [PATCH 09/31] remove test data, uncomment route protection, fix redirect after license upload (#4081) --- web/src/Root.tsx | 63 ++-- web/src/components/UploadLicenseFile.tsx | 3 +- .../apps/HelmVMClusterManagement.tsx | 170 +++-------- web/src/components/apps/HelmVMViewNode.jsx | 268 +----------------- 4 files changed, 75 insertions(+), 429 deletions(-) diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 866fb6fd44..86bef0aa94 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -101,8 +101,6 @@ type State = { selectedAppName: string | null; snapshotInProgressApps: string[]; themeState: ThemeState; - isKurl: boolean | null; - isHelmVM: boolean | null; }; let interval: ReturnType | undefined; @@ -134,8 +132,6 @@ const Root = () => { navbarLogo: null, }, app: null, - isKurl: null, - isHelmVM: null, } ); @@ -307,8 +303,6 @@ const Root = () => { adminConsoleMetadata: data.adminConsoleMetadata, featureFlags: data.consoleFeatureFlags, fetchingMetadata: false, - isKurl: data.isKurl, - isHelmVM: data.isHelmVM, }); }) .catch((err) => { @@ -538,8 +532,7 @@ const Root = () => { appSlugFromMetadata={state.appSlugFromMetadata || ""} fetchingMetadata={state.fetchingMetadata} onUploadSuccess={getAppsList} - isKurl={!!state.isKurl} - isHelmVM={!!state.isHelmVM} + isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} /> } /> @@ -582,33 +575,33 @@ const Root = () => { } /> } /> - {/* {state.adminConsoleMetadata?.isHelmVM && ( */} - - } - /> - {/* )} */} - {/* {(state.adminConsoleMetadata?.isKurl || - state.adminConsoleMetadata?.isHelmVM) && ( */} - - ) : ( - - ) - } - /> - {/* )} */} - {/* {state.adminConsoleMetadata?.isHelmVM && ( */} - } /> - {/* )} */} + {state.adminConsoleMetadata?.isHelmVM && ( + + } + /> + )} + {(state.adminConsoleMetadata?.isKurl || + state.adminConsoleMetadata?.isHelmVM) && ( + + ) : ( + + ) + } + /> + )} + {state.adminConsoleMetadata?.isHelmVM && ( + } /> + )} } diff --git a/web/src/components/UploadLicenseFile.tsx b/web/src/components/UploadLicenseFile.tsx index 2fe1ed8a12..8992f063b2 100644 --- a/web/src/components/UploadLicenseFile.tsx +++ b/web/src/components/UploadLicenseFile.tsx @@ -68,7 +68,6 @@ type Props = { logo: string | null; snapshot?: { name: string }; isHelmVM: boolean; - isKurl: boolean; }; const UploadLicenseFile = (props: Props) => { @@ -267,7 +266,7 @@ const UploadLicenseFile = (props: Props) => { return; } - if (props.isHelmVM && !props.isKurl) { + if (props.isHelmVM) { navigate(`/${data.slug}/cluster/manage`, { replace: true }); return; } diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index 1101c11352..aa38833731 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -1,8 +1,8 @@ +import { useQuery } from "@tanstack/react-query"; import classNames from "classnames"; import MaterialReactTable from "material-react-table"; import React, { ChangeEvent, useMemo, useReducer, useState } from "react"; import Modal from "react-modal"; -import { useQuery } from "@tanstack/react-query"; import { Link, useParams } from "react-router-dom"; import { KotsPageTitle } from "@components/Head"; @@ -14,82 +14,6 @@ import CodeSnippet from "../shared/CodeSnippet"; import "@src/scss/components/apps/HelmVMClusterManagement.scss"; -const testData = { - isHelmVMEnabled: true, - ha: false, - nodes: [ - { - name: "test-helmvm-node", - isConnected: true, - isReady: true, - isPrimaryNode: true, - canDelete: false, - kubeletVersion: "v1.28.2", - cpu: { - capacity: 8, - available: 7.466876775, - }, - memory: { - capacity: 31.33294677734375, - available: 24.23790740966797, - }, - pods: { - capacity: 110, - available: 77, - }, - labels: [ - "beta.kubernetes.io/arch:amd64", - "beta.kubernetes.io/os:linux", - "node-role.kubernetes.io/master:", - "node.kubernetes.io/exclude-from-external-load-balancers:", - "kubernetes.io/arch:amd64", - "kubernetes.io/hostname:laverya-kurl", - "kubernetes.io/os:linux", - "node-role.kubernetes.io/control-plane:", - ], - conditions: { - memoryPressure: false, - diskPressure: false, - pidPressure: false, - ready: true, - }, - }, - { - name: "test-helmvm-worker", - isConnected: true, - isReady: true, - isPrimaryNode: false, - canDelete: false, - kubeletVersion: "v1.28.2", - cpu: { - capacity: 4, - available: 3.761070507, - }, - memory: { - capacity: 15.50936508178711, - available: 11.742542266845703, - }, - pods: { - capacity: 110, - available: 94, - }, - labels: [ - "beta.kubernetes.io/arch:amd64", - "beta.kubernetes.io/os:linux", - "kubernetes.io/arch:amd64", - "kubernetes.io/os:linux", - "kurl.sh/cluster:true", - ], - conditions: { - memoryPressure: false, - diskPressure: false, - pidPressure: false, - ready: true, - }, - }, - ], -}; - type State = { displayAddNode: boolean; confirmDeleteNode: string; @@ -260,15 +184,6 @@ const HelmVMClusterManagement = ({ const NODE_TYPES = ["controller"]; const determineDisabledState = () => { - // if (nodeType === "controller") { - // const numControllers = testData.nodes.reduce((acc, node) => { - // if (node.labels.includes("controller")) { - // acc += 1; - // } - // return acc; - // }, 0); - // return numControllers === 3; - // } return false; }; @@ -338,44 +253,49 @@ const HelmVMClusterManagement = ({ }; const mappedNodes = useMemo(() => { - return (nodesData?.nodes || testData.nodes).map((n) => ({ - name: slug ? ( - n.name - ) : ( - - {n.name} - - ), - roles: ( -

- {n.labels.map((l) => ( - - {l} - - ))} -
- ), - status: n.isReady ? "Ready" : "Not Ready", - cpu: `${calculateUtilization(n.cpu.capacity, n.cpu.available)}%`, - memory: `${calculateUtilization(n.memory.capacity, n.memory.available)}%`, - pods: `${n.pods.capacity - n.pods.available} / ${n.pods.capacity}`, - pause: ( - <> - - - ), - delete: ( - <> - - - ), - })); + return ( + nodesData?.nodes?.map((n) => ({ + name: slug ? ( + n.name + ) : ( + + {n.name} + + ), + roles: ( +
+ {n.labels.map((l) => ( + + {l} + + ))} +
+ ), + status: n.isReady ? "Ready" : "Not Ready", + cpu: `${calculateUtilization(n.cpu.capacity, n.cpu.available)}%`, + memory: `${calculateUtilization( + n.memory.capacity, + n.memory.available + )}%`, + pods: `${n.pods.capacity - n.pods.available} / ${n.pods.capacity}`, + pause: ( + <> + + + ), + delete: ( + <> + + + ), + })) || [] + ); }, [nodesData?.nodes?.toString()]); // #endregion @@ -412,7 +332,7 @@ const HelmVMClusterManagement = ({ {nodesError?.message}

)} - {(nodesData?.nodes || testData?.nodes) && ( + {nodesData?.nodes && ( { const { nodeName } = useParams(); const { data: nodeData } = useQuery({ @@ -306,7 +40,7 @@ const HelmVMViewNode = () => { }, }); - const node = nodeData || testData.nodes[0]; + const node = nodeData; // #region table data const columns = useMemo( From 8f2427a31157a0dc77c9ff902de8adb42c9d43a8 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 11:02:23 -0600 Subject: [PATCH 10/31] 24, not 60-something, character join tokens (#4080) --- pkg/store/kotsstore/k0s_store.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/store/kotsstore/k0s_store.go b/pkg/store/kotsstore/k0s_store.go index 61c717dd0b..4eb7a9bd7b 100644 --- a/pkg/store/kotsstore/k0s_store.go +++ b/pkg/store/kotsstore/k0s_store.go @@ -3,15 +3,16 @@ package kotsstore import ( "encoding/json" "fmt" - "github.com/google/uuid" - "github.com/replicatedhq/kots/pkg/persistence" "github.com/rqlite/gorqlite" + + "github.com/replicatedhq/kots/pkg/persistence" + "github.com/replicatedhq/kots/pkg/rand" ) func (s *KOTSStore) SetK0sInstallCommandRoles(roles []string) (string, error) { db := persistence.MustGetDBSession() - installID := uuid.New().String() + installID := rand.StringWithCharset(24, rand.LOWER_CASE+rand.UPPER_CASE) query := `delete from k0s_tokens where token = ?` wr, err := db.WriteOneParameterized(gorqlite.ParameterizedStatement{ From cac19526cdf4d2985f6d8c5b7e48ec00354a9a81 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 11:02:33 -0600 Subject: [PATCH 11/31] node usage metrics not being collected is not a fatal error (#4082) --- pkg/helmvm/helmvm_node.go | 74 +++++++++++++++++--------------- pkg/helmvm/helmvm_nodes.go | 87 ++------------------------------------ 2 files changed, 43 insertions(+), 118 deletions(-) diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go index 7805400c1d..7054e0cf26 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/helmvm/helmvm_node.go @@ -31,7 +31,31 @@ func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) return nil, fmt.Errorf("failed to create metrics client: %w", err) } - nodePods, err := podsOnNode(ctx, client, nodeName) + return nodeMetrics(ctx, client, metricsClient, *node) +} + +func podsOnNode(ctx context.Context, client kubernetes.Interface, nodeName string) ([]corev1.Pod, error) { + namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("list namespaces: %w", err) + } + + toReturn := []corev1.Pod{} + + for _, ns := range namespaces.Items { + nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName)}) + if err != nil { + return nil, fmt.Errorf("list pods on %s in namespace %s: %w", nodeName, ns.Name, err) + } + + toReturn = append(toReturn, nsPods.Items...) + } + return toReturn, nil +} + +// nodeMetrics takes a corev1.Node and gets metrics + status for that node +func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient *metricsv.Clientset, node corev1.Node) (*types.Node, error) { + nodePods, err := podsOnNode(ctx, client, node.Name) if err != nil { return nil, fmt.Errorf("pods per node: %w", err) } @@ -49,17 +73,18 @@ func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value()) - nodeMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("list pod metrics: %w", err) - } - - if nodeMetrics.Usage.Memory() != nil { - memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeMetrics.Usage.Memory().Value())/math.Pow(2, 30) - } + nodeUsageMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) + if err == nil { + if nodeUsageMetrics.Usage.Memory() != nil { + memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeUsageMetrics.Usage.Memory().Value())/math.Pow(2, 30) + } - if nodeMetrics.Usage.Cpu() != nil { - cpuCapacity.Available = cpuCapacity.Capacity - nodeMetrics.Usage.Cpu().AsApproximateFloat64() + if nodeUsageMetrics.Usage.Cpu() != nil { + cpuCapacity.Available = cpuCapacity.Capacity - nodeUsageMetrics.Usage.Cpu().AsApproximateFloat64() + } + } else { + // if we can't get metrics, we'll do nothing for now + // in the future we may decide to retry or log a warning } podCapacity.Available = podCapacity.Capacity - float64(len(nodePods)) @@ -71,10 +96,10 @@ func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) return &types.Node{ Name: node.Name, - IsConnected: isConnected(*node), - IsReady: isReady(*node), - IsPrimaryNode: isPrimary(*node), - CanDelete: node.Spec.Unschedulable && !isConnected(*node), + IsConnected: isConnected(node), + IsReady: isReady(node), + IsPrimaryNode: isPrimary(node), + CanDelete: node.Spec.Unschedulable && !isConnected(node), KubeletVersion: node.Status.NodeInfo.KubeletVersion, CPU: cpuCapacity, Memory: memoryCapacity, @@ -84,22 +109,3 @@ func GetNode(ctx context.Context, client kubernetes.Interface, nodeName string) PodList: nodePods, }, nil } - -func podsOnNode(ctx context.Context, client kubernetes.Interface, nodeName string) ([]corev1.Pod, error) { - namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("list namespaces: %w", err) - } - - toReturn := []corev1.Pod{} - - for _, ns := range namespaces.Items { - nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s", nodeName)}) - if err != nil { - return nil, fmt.Errorf("list pods on %s in namespace %s: %w", nodeName, ns.Name, err) - } - - toReturn = append(toReturn, nsPods.Items...) - } - return toReturn, nil -} diff --git a/pkg/helmvm/helmvm_nodes.go b/pkg/helmvm/helmvm_nodes.go index f8bfefff4b..9396e6508c 100644 --- a/pkg/helmvm/helmvm_nodes.go +++ b/pkg/helmvm/helmvm_nodes.go @@ -2,10 +2,6 @@ package helmvm import ( "context" - "fmt" - "math" - "strconv" - "github.com/pkg/errors" "github.com/replicatedhq/kots/pkg/helmvm/types" "github.com/replicatedhq/kots/pkg/k8sutil" @@ -34,58 +30,13 @@ func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNo toReturn := types.HelmVMNodes{} - nodePods, err := podsPerNode(ctx, client) - if err != nil { - return nil, errors.Wrap(err, "pods per node") - } - for _, node := range nodes.Items { - cpuCapacity := types.CapacityAvailable{} - memoryCapacity := types.CapacityAvailable{} - podCapacity := types.CapacityAvailable{} - - memoryCapacity.Capacity = float64(node.Status.Capacity.Memory().Value()) / math.Pow(2, 30) // capacity in GB - - cpuCapacity.Capacity, err = strconv.ParseFloat(node.Status.Capacity.Cpu().String(), 64) - if err != nil { - return nil, errors.Wrapf(err, "parse CPU capacity %q for node %s", node.Status.Capacity.Cpu().String(), node.Name) - } - - podCapacity.Capacity = float64(node.Status.Capacity.Pods().Value()) - - nodeMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) + nodeMet, err := nodeMetrics(ctx, client, metricsClient, node) if err != nil { - return nil, errors.Wrap(err, "list pod metrics") - } - - if nodeMetrics.Usage.Memory() != nil { - memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeMetrics.Usage.Memory().Value())/math.Pow(2, 30) + return nil, errors.Wrap(err, "node metrics") } - if nodeMetrics.Usage.Cpu() != nil { - cpuCapacity.Available = cpuCapacity.Capacity - nodeMetrics.Usage.Cpu().AsApproximateFloat64() - } - - podCapacity.Available = podCapacity.Capacity - float64(nodePods[node.Name]) - - nodeLabelArray := []string{} - for k, v := range node.Labels { - nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v)) - } - - toReturn.Nodes = append(toReturn.Nodes, types.Node{ - Name: node.Name, - IsConnected: isConnected(node), - IsReady: isReady(node), - IsPrimaryNode: isPrimary(node), - CanDelete: node.Spec.Unschedulable && !isConnected(node), - KubeletVersion: node.Status.NodeInfo.KubeletVersion, - CPU: cpuCapacity, - Memory: memoryCapacity, - Pods: podCapacity, - Labels: nodeLabelArray, - Conditions: findNodeConditions(node.Status.Conditions), - }) + toReturn.Nodes = append(toReturn.Nodes, *nodeMet) } isHelmVM, err := IsHelmVM(client) @@ -122,38 +73,6 @@ func findNodeConditions(conditions []corev1.NodeCondition) types.NodeConditions return discoveredConditions } -// podsPerNode returns a map of node names to the number of pods, across all namespaces -func podsPerNode(ctx context.Context, client kubernetes.Interface) (map[string]int, error) { - namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, errors.Wrap(err, "list namespaces") - } - - toReturn := map[string]int{} - - for _, ns := range namespaces.Items { - nsPods, err := client.CoreV1().Pods(ns.Name).List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "list pods in namespace %s", ns.Name) - } - - for _, pod := range nsPods.Items { - pod := pod - if pod.Spec.NodeName == "" { - continue - } - - if _, ok := toReturn[pod.Spec.NodeName]; !ok { - toReturn[pod.Spec.NodeName] = 0 - } - - toReturn[pod.Spec.NodeName]++ - } - } - - return toReturn, nil -} - func isConnected(node corev1.Node) bool { for _, taint := range node.Spec.Taints { if taint.Key == "node.kubernetes.io/unreachable" { From 32bc2eb102ab9927cf1227fb12531446f51d7dd7 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 11:43:12 -0600 Subject: [PATCH 12/31] include pod usage metrics (#4083) add kube-proxy/os/kernel to node metrics return 'used' not 'available' --- pkg/helmvm/helmvm_node.go | 67 ++++++++++++++++++++++++++++----------- pkg/helmvm/types/types.go | 43 +++++++++++++++---------- 2 files changed, 75 insertions(+), 35 deletions(-) diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go index 7054e0cf26..cf576b8aa2 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/helmvm/helmvm_node.go @@ -60,9 +60,9 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient return nil, fmt.Errorf("pods per node: %w", err) } - cpuCapacity := types.CapacityAvailable{} - memoryCapacity := types.CapacityAvailable{} - podCapacity := types.CapacityAvailable{} + cpuCapacity := types.CapacityUsed{} + memoryCapacity := types.CapacityUsed{} + podCapacity := types.CapacityUsed{} memoryCapacity.Capacity = float64(node.Status.Capacity.Memory().Value()) / math.Pow(2, 30) // capacity in GB @@ -76,36 +76,67 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient nodeUsageMetrics, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) if err == nil { if nodeUsageMetrics.Usage.Memory() != nil { - memoryCapacity.Available = memoryCapacity.Capacity - float64(nodeUsageMetrics.Usage.Memory().Value())/math.Pow(2, 30) + memoryCapacity.Used = float64(nodeUsageMetrics.Usage.Memory().Value()) / math.Pow(2, 30) } if nodeUsageMetrics.Usage.Cpu() != nil { - cpuCapacity.Available = cpuCapacity.Capacity - nodeUsageMetrics.Usage.Cpu().AsApproximateFloat64() + cpuCapacity.Used = nodeUsageMetrics.Usage.Cpu().AsApproximateFloat64() } } else { // if we can't get metrics, we'll do nothing for now // in the future we may decide to retry or log a warning } - podCapacity.Available = podCapacity.Capacity - float64(len(nodePods)) + podCapacity.Used = float64(len(nodePods)) nodeLabelArray := []string{} for k, v := range node.Labels { nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v)) } + podInfo := []types.PodInfo{} + + for _, pod := range nodePods { + newInfo := types.PodInfo{ + Name: pod.Name, + Namespace: pod.Namespace, + Status: string(pod.Status.Phase), + } + + podMetrics, err := metricsClient.MetricsV1beta1().PodMetricses(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + if err == nil { + podTotalMemory := 0.0 + podTotalCPU := 0.0 + for _, container := range podMetrics.Containers { + if container.Usage.Memory() != nil { + podTotalMemory += float64(container.Usage.Memory().Value()) / math.Pow(2, 30) + } + if container.Usage.Cpu() != nil { + podTotalCPU += container.Usage.Cpu().AsApproximateFloat64() + } + } + newInfo.Memory = podTotalMemory + newInfo.CPU = podTotalCPU + } + + podInfo = append(podInfo, newInfo) + } + return &types.Node{ - Name: node.Name, - IsConnected: isConnected(node), - IsReady: isReady(node), - IsPrimaryNode: isPrimary(node), - CanDelete: node.Spec.Unschedulable && !isConnected(node), - KubeletVersion: node.Status.NodeInfo.KubeletVersion, - CPU: cpuCapacity, - Memory: memoryCapacity, - Pods: podCapacity, - Labels: nodeLabelArray, - Conditions: findNodeConditions(node.Status.Conditions), - PodList: nodePods, + Name: node.Name, + IsConnected: isConnected(node), + IsReady: isReady(node), + IsPrimaryNode: isPrimary(node), + CanDelete: node.Spec.Unschedulable && !isConnected(node), + KubeletVersion: node.Status.NodeInfo.KubeletVersion, + KubeProxyVersion: node.Status.NodeInfo.KubeProxyVersion, + OperatingSystem: node.Status.NodeInfo.OperatingSystem, + KernelVersion: node.Status.NodeInfo.KernelVersion, + CPU: cpuCapacity, + Memory: memoryCapacity, + Pods: podCapacity, + Labels: nodeLabelArray, + Conditions: findNodeConditions(node.Status.Conditions), + PodList: podInfo, }, nil } diff --git a/pkg/helmvm/types/types.go b/pkg/helmvm/types/types.go index 78ea23f248..b94f254dc0 100644 --- a/pkg/helmvm/types/types.go +++ b/pkg/helmvm/types/types.go @@ -1,7 +1,5 @@ package types -import corev1 "k8s.io/api/core/v1" - type HelmVMNodes struct { Nodes []Node `json:"nodes"` HA bool `json:"ha"` @@ -9,23 +7,26 @@ type HelmVMNodes struct { } type Node struct { - Name string `json:"name"` - IsConnected bool `json:"isConnected"` - IsReady bool `json:"isReady"` - IsPrimaryNode bool `json:"isPrimaryNode"` - CanDelete bool `json:"canDelete"` - KubeletVersion string `json:"kubeletVersion"` - CPU CapacityAvailable `json:"cpu"` - Memory CapacityAvailable `json:"memory"` - Pods CapacityAvailable `json:"pods"` - Labels []string `json:"labels"` - Conditions NodeConditions `json:"conditions"` - PodList []corev1.Pod `json:"podList"` + Name string `json:"name"` + IsConnected bool `json:"isConnected"` + IsReady bool `json:"isReady"` + IsPrimaryNode bool `json:"isPrimaryNode"` + CanDelete bool `json:"canDelete"` + KubeletVersion string `json:"kubeletVersion"` + KubeProxyVersion string `json:"kubeProxyVersion"` + OperatingSystem string `json:"operatingSystem"` + KernelVersion string `json:"kernelVersion"` + CPU CapacityUsed `json:"cpu"` + Memory CapacityUsed `json:"memory"` + Pods CapacityUsed `json:"pods"` + Labels []string `json:"labels"` + Conditions NodeConditions `json:"conditions"` + PodList []PodInfo `json:"podList"` } -type CapacityAvailable struct { - Capacity float64 `json:"capacity"` - Available float64 `json:"available"` +type CapacityUsed struct { + Capacity float64 `json:"capacity"` + Used float64 `json:"used"` } type NodeConditions struct { @@ -34,3 +35,11 @@ type NodeConditions struct { PidPressure bool `json:"pidPressure"` Ready bool `json:"ready"` } + +type PodInfo struct { + Name string `json:"name"` + Status string `json:"status"` + Namespace string `json:"namespace"` + CPU float64 `json:"cpu"` + Memory float64 `json:"memory"` +} From e9d4d741b65c84d757eb95763280e5db735bdef5 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 12:06:37 -0600 Subject: [PATCH 13/31] remove pause and delete columns, update styles (#4085) * remove pause and delete columns until the api code is ready, add loading state, update styles * update variable names, fix redirect to cluster manage --- web/src/components/UploadLicenseFile.tsx | 8 +- .../apps/HelmVMClusterManagement.tsx | 198 ++++++++-------- web/src/components/apps/HelmVMViewNode.jsx | 211 ++++++++++-------- .../apps/HelmVMClusterManagement.scss | 25 --- 4 files changed, 211 insertions(+), 231 deletions(-) diff --git a/web/src/components/UploadLicenseFile.tsx b/web/src/components/UploadLicenseFile.tsx index 8992f063b2..e08cf5b829 100644 --- a/web/src/components/UploadLicenseFile.tsx +++ b/web/src/components/UploadLicenseFile.tsx @@ -261,13 +261,13 @@ const UploadLicenseFile = (props: Props) => { return; } - if (data.isConfigurable) { - navigate(`/${data.slug}/config`, { replace: true }); + if (props.isHelmVM) { + navigate(`/${data.slug}/cluster/manage`, { replace: true }); return; } - if (props.isHelmVM) { - navigate(`/${data.slug}/cluster/manage`, { replace: true }); + if (data.isConfigurable) { + navigate(`/${data.slug}/config`, { replace: true }); return; } diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index aa38833731..a545de0e75 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -65,15 +65,15 @@ const HelmVMClusterManagement = ({ kubeletVersion: string; cpu: { capacity: number; - available: number; + used: number; }; memory: { capacity: number; - available: number; + used: number; }; pods: { capacity: number; - available: number; + used: number; }; labels: string[]; conditions: { @@ -233,25 +233,20 @@ const HelmVMClusterManagement = ({ header: "Pods", size: 150, }, - { - accessorKey: "pause", - header: "Pause", - size: 100, - }, - { - accessorKey: "delete", - header: "Delete", - size: 100, - }, + // { + // accessorKey: "pause", + // header: "Pause", + // size: 100, + // }, + // { + // accessorKey: "delete", + // header: "Delete", + // size: 80, + // }, ], [] ); - const calculateUtilization = (capacity: number, available: number) => { - const used = capacity - available; - return Math.round((used / capacity) * 100); - }; - const mappedNodes = useMemo(() => { return ( nodesData?.nodes?.map((n) => ({ @@ -278,12 +273,11 @@ const HelmVMClusterManagement = ({
), status: n.isReady ? "Ready" : "Not Ready", - cpu: `${calculateUtilization(n.cpu.capacity, n.cpu.available)}%`, - memory: `${calculateUtilization( - n.memory.capacity, - n.memory.available - )}%`, - pods: `${n.pods.capacity - n.pods.available} / ${n.pods.capacity}`, + cpu: `${n.cpu.used.toFixed(2)} / ${n.cpu.capacity.toFixed(2)}`, + memory: `${n.memory.used.toFixed(2)}GB / ${n.memory.capacity.toFixed( + 2 + )}GB`, + pods: `${n.pods.used} / ${n.pods.capacity}`, pause: ( <> @@ -300,93 +294,89 @@ const HelmVMClusterManagement = ({ // #endregion return ( -
+
-
-
-

- Cluster Nodes +

+

+ Cluster Nodes +

+
+

+ This page lists the nodes that are configured and shows the + status/health of each.

-
-

- This page lists the nodes that are configured and shows the - status/health of each. + {Utilities.sessionRolesHasOneOf([rbacRoles.CLUSTER_ADMIN]) && ( + + )} +

+
+ {nodesLoading && ( +

+ Loading nodes...

- {Utilities.sessionRolesHasOneOf([rbacRoles.CLUSTER_ADMIN]) && ( - - )} -
-
- {nodesLoading && ( -

- Loading nodes... -

- )} - {!nodesData && nodesError && ( -

- {nodesError?.message} -

- )} - {nodesData?.nodes && ( - + {nodesError?.message} +

+ )} + {nodesData?.nodes && ( + - )} -
- {fromLicenseFlow && ( - - Continue - + }, + }} + muiTableBodyCellProps={{ + sx: { + borderRight: "2px solid #e0e0e0", + }, + }} + muiTablePaperProps={{ + sx: { + width: "100%", + boxShadow: "none", + }, + }} + initialState={{ density: "compact" }} + enablePagination={false} + enableColumnFilters={false} + /> )}
+ {fromLicenseFlow && ( + + Continue + + )}
{/* MODALS */} { const { nodeName } = useParams(); - const { data: nodeData } = useQuery({ + const { data: nodeData, isLoading: nodeLoading } = useQuery({ queryKey: ["helmVmNode", nodeName], queryFn: async ({ queryKey }) => { const [, nodeName] = queryKey; @@ -57,11 +58,6 @@ const HelmVMViewNode = () => { header: "Status", size: 150, }, - { - accessorKey: "disk", - header: "Disk", - size: 150, - }, { accessorKey: "cpu", header: "CPU", @@ -72,23 +68,22 @@ const HelmVMViewNode = () => { header: "Memory", size: 150, }, - { - accessorKey: "canDelete", - header: "Delete Pod", - size: 150, - }, + // { + // accessorKey: "delete", + // header: "Delete", + // size: 80, + // }, ], [] ); const mappedPods = useMemo(() => { return node?.podList?.map((p) => ({ - name: p.metadata.name, - status: p.status.phase, - disk: null, - cpu: null, - memory: null, - canDelete: ( + name: p.name, + status: p.status, + cpu: p.cpu, + memory: `${p.memory}GB`, + delete: ( <> @@ -98,7 +93,7 @@ const HelmVMViewNode = () => { // #endregion return ( -
+
{/* Breadcrumbs */}

{ > Cluster Nodes {" "} - / {node?.name} + / {nodeName}

- {/* Node Info */} -
-

- Node Info -

-
-

Name

-

{node?.name}

+ + {nodeLoading && ( +
+
-
- {/* Pods table */} -
-

Pods

- -
- {/* Troubleshooting */} -
-

- Troubleshooting -

-
- {/* Danger Zone */} -
-

- Danger Zone -

- -
+ )} + {!nodeLoading && node && ( + <> + {/* Node Info */} +
+

+ {node?.name} +

+
+
+

+ kubelet version +

+

{node?.kubeletVersion}

+
+
+

+ kube-proxy version +

+

{node?.kubeletVersion}

+
+
+

OS

+

{node?.kubeletVersion}

+
+
+

+ kurl version +

+

{node?.kubeletVersion}

+
+
+
+ {/* Pods table */} +
+

Pods

+
+ +
+
+ {/* Troubleshooting */} + {/*
+

+ Troubleshooting +

+
*/} + {/* Danger Zone */} + {/*
+

+ Danger Zone +

+ +
*/} + + )}
); }; diff --git a/web/src/scss/components/apps/HelmVMClusterManagement.scss b/web/src/scss/components/apps/HelmVMClusterManagement.scss index cd8bc74a01..2fd75adb03 100644 --- a/web/src/scss/components/apps/HelmVMClusterManagement.scss +++ b/web/src/scss/components/apps/HelmVMClusterManagement.scss @@ -5,31 +5,6 @@ height: 85px; width: 200px; } - - .timestamp { - position: relative; - margin-top: -10px; - z-index: -1; - } - - .node-label { - font-size: 12px; - font-weight: 500; - line-height: 12px; - color: #577981; - padding: 4px 6px; - border-radius: 20px; - background-color: #ffffff; - white-space: nowrap; - border: 1px solid #577981; - margin-right: 8px; - display: inline-block; - margin-top: 8px; - - &:last-child { - margin-right: 0; - } - } } .HelmVMNodeRow--wrapper { From 3aea55b1a441d55573804cafa502ab1b0cbaebe5 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 13:00:10 -0600 Subject: [PATCH 14/31] include 'sudo ./' at the beginning of the node join command (#4088) --- pkg/helmvm/jointoken.go | 25 ------------------------- pkg/helmvm/node_join.go | 2 +- 2 files changed, 1 insertion(+), 26 deletions(-) delete mode 100644 pkg/helmvm/jointoken.go diff --git a/pkg/helmvm/jointoken.go b/pkg/helmvm/jointoken.go deleted file mode 100644 index d723f6f015..0000000000 --- a/pkg/helmvm/jointoken.go +++ /dev/null @@ -1,25 +0,0 @@ -package helmvm - -import ( - "encoding/base64" - "encoding/json" - - "github.com/google/uuid" -) - -// joinToken is a struct that holds both the actual token and the cluster id. This is marshaled -// and base64 encoded and used as argument to the join command in the other nodes. -type joinToken struct { - ClusterID uuid.UUID `json:"clusterID"` - Token string `json:"token"` - Role string `json:"role"` -} - -// Encode encodes a JoinToken to base64. -func (j *joinToken) Encode() (string, error) { - b, err := json.Marshal(j) - if err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(b), nil -} diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index aa6544f044..4c98ef0034 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -228,7 +228,7 @@ func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, to return "", fmt.Errorf("failed to get admin console port: %w", err) } - return fmt.Sprintf("%s node join %s:%d %s", binaryName, nodeIP, port, token), nil + return fmt.Sprintf("sudo ./%s node join %s:%d %s", binaryName, nodeIP, port, token), nil } // GenerateK0sJoinCommand returns the k0s node join command, without the token but with all other required flags From 4313952f8bc581663cf2228fc56ef47b4cfc5801 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 13:01:01 -0600 Subject: [PATCH 15/31] format pod CPU and memory usage before returning (#4086) --- pkg/helmvm/helmvm_node.go | 8 ++++---- pkg/helmvm/types/types.go | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go index cf576b8aa2..51a6cd1ef3 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/helmvm/helmvm_node.go @@ -109,14 +109,14 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient podTotalCPU := 0.0 for _, container := range podMetrics.Containers { if container.Usage.Memory() != nil { - podTotalMemory += float64(container.Usage.Memory().Value()) / math.Pow(2, 30) + podTotalMemory += float64(container.Usage.Memory().Value()) / math.Pow(2, 20) } if container.Usage.Cpu() != nil { - podTotalCPU += container.Usage.Cpu().AsApproximateFloat64() + podTotalCPU += container.Usage.Cpu().AsApproximateFloat64() * 1000 } } - newInfo.Memory = podTotalMemory - newInfo.CPU = podTotalCPU + newInfo.Memory = fmt.Sprintf("%.1f MB", podTotalMemory) + newInfo.CPU = fmt.Sprintf("%.1f m", podTotalCPU) } podInfo = append(podInfo, newInfo) diff --git a/pkg/helmvm/types/types.go b/pkg/helmvm/types/types.go index b94f254dc0..f177df2b37 100644 --- a/pkg/helmvm/types/types.go +++ b/pkg/helmvm/types/types.go @@ -37,9 +37,9 @@ type NodeConditions struct { } type PodInfo struct { - Name string `json:"name"` - Status string `json:"status"` - Namespace string `json:"namespace"` - CPU float64 `json:"cpu"` - Memory float64 `json:"memory"` + Name string `json:"name"` + Status string `json:"status"` + Namespace string `json:"namespace"` + CPU string `json:"cpu"` + Memory string `json:"memory"` } From 7bccea7c961453e73986ac8fe7e14b379f948914 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:35:15 -0600 Subject: [PATCH 16/31] fixes: clipboard, redirect, columns styling/formatting (#4090) * make clipboard work with http, fix redirect to cluster manage page * right align columns, remove placeholders, add namespace column --- web/src/Root.tsx | 2 ++ web/src/components/apps/AppDetailPage.tsx | 3 ++ .../apps/HelmVMClusterManagement.tsx | 6 ++++ web/src/components/apps/HelmVMViewNode.jsx | 24 ++++++++++------ web/src/components/shared/CodeSnippet.jsx | 28 +++++++++++++++++-- 5 files changed, 51 insertions(+), 12 deletions(-) diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 86bef0aa94..04a656426b 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -691,6 +691,7 @@ const Root = () => { snapshotInProgressApps={state.snapshotInProgressApps} ping={ping} isHelmManaged={state.isHelmManaged} + isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} /> } /> @@ -706,6 +707,7 @@ const Root = () => { snapshotInProgressApps={state.snapshotInProgressApps} ping={ping} isHelmManaged={state.isHelmManaged} + isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} /> } > diff --git a/web/src/components/apps/AppDetailPage.tsx b/web/src/components/apps/AppDetailPage.tsx index 4aa0d386ee..e969b8535d 100644 --- a/web/src/components/apps/AppDetailPage.tsx +++ b/web/src/components/apps/AppDetailPage.tsx @@ -30,6 +30,7 @@ type Props = { refetchAppsList: () => void; refetchAppMetadata: () => void; snapshotInProgressApps: string[]; + isHelmVM: boolean; }; type State = { @@ -97,6 +98,8 @@ function AppDetailPage(props: Props) { navigate(`/app/${appsList[0].slug}`, { replace: true }); } else if (props.isHelmManaged) { navigate("/install-with-helm", { replace: true }); + } else if (props.isHelmVM) { + navigate(`/${selectedApp?.slug}/cluster/manage`, { replace: true }); } else { navigate("/upload-license", { replace: true }); } diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index a545de0e75..5825a893b2 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -222,11 +222,17 @@ const HelmVMClusterManagement = ({ accessorKey: "cpu", header: "CPU", size: 150, + muiTableBodyCellProps: { + align: "right", + }, }, { accessorKey: "memory", header: "Memory", size: 150, + muiTableBodyCellProps: { + align: "right", + }, }, { accessorKey: "pods", diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx index 3af18d68ee..2efef120ce 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -53,6 +53,11 @@ const HelmVMViewNode = () => { enableColumnDragging: false, size: 150, }, + { + accessorKey: "namespace", + header: "Namespace", + size: 150, + }, { accessorKey: "status", header: "Status", @@ -62,11 +67,17 @@ const HelmVMViewNode = () => { accessorKey: "cpu", header: "CPU", size: 150, + muiTableBodyCellProps: { + align: "right", + }, }, { accessorKey: "memory", header: "Memory", size: 150, + muiTableBodyCellProps: { + align: "right", + }, }, // { // accessorKey: "delete", @@ -80,9 +91,10 @@ const HelmVMViewNode = () => { const mappedPods = useMemo(() => { return node?.podList?.map((p) => ({ name: p.name, + namespace: p.namespace, status: p.status, cpu: p.cpu, - memory: `${p.memory}GB`, + memory: p.memory, delete: ( <> @@ -128,17 +140,11 @@ const HelmVMViewNode = () => {

kube-proxy version

-

{node?.kubeletVersion}

+

{node?.kubeProxyVersion}

OS

-

{node?.kubeletVersion}

-
-
-

- kurl version -

-

{node?.kubeletVersion}

+

{node?.operatingSystem}

diff --git a/web/src/components/shared/CodeSnippet.jsx b/web/src/components/shared/CodeSnippet.jsx index 892c4b746a..9f0738e2fb 100644 --- a/web/src/components/shared/CodeSnippet.jsx +++ b/web/src/components/shared/CodeSnippet.jsx @@ -36,7 +36,7 @@ class CodeSnippet extends Component { const { children, copyDelay } = this.props; const textToCopy = Array.isArray(children) ? children.join("\n") : children; - if (navigator.clipboard) { + if (navigator.clipboard && window.isSecureContext) { navigator.clipboard.writeText(textToCopy).then(() => { this.setState({ didCopy: true }); @@ -44,6 +44,29 @@ class CodeSnippet extends Component { this.setState({ didCopy: false }); }, copyDelay); }); + } else { + const textArea = document.createElement("textarea"); + textArea.value = textToCopy; + + textArea.style.position = "absolute"; + textArea.style.opacity = 0; + + document.body.prepend(textArea); + textArea.select(); + + try { + document.execCommand("copy"); + + this.setState({ didCopy: true }); + + setTimeout(() => { + this.setState({ didCopy: false }); + }, copyDelay); + } catch (error) { + console.error(error); + } finally { + textArea.remove(); + } } }; @@ -67,7 +90,6 @@ class CodeSnippet extends Component { language, preText, canCopy, - clipboardEnabled = !!navigator.clipboard, copyText, onCopyText, variant, @@ -94,7 +116,7 @@ class CodeSnippet extends Component {
)} {content} - {clipboardEnabled && canCopy && ( + {canCopy && ( Date: Thu, 19 Oct 2023 13:44:41 -0600 Subject: [PATCH 17/31] determine node roles based on their labels (#4089) --- pkg/helmvm/helmvm_node.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go index 51a6cd1ef3..079086f983 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/helmvm/helmvm_node.go @@ -89,11 +89,6 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient podCapacity.Used = float64(len(nodePods)) - nodeLabelArray := []string{} - for k, v := range node.Labels { - nodeLabelArray = append(nodeLabelArray, fmt.Sprintf("%s:%s", k, v)) - } - podInfo := []types.PodInfo{} for _, pod := range nodePods { @@ -135,8 +130,23 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient CPU: cpuCapacity, Memory: memoryCapacity, Pods: podCapacity, - Labels: nodeLabelArray, + Labels: nodeRolesFromLabels(node.Labels), Conditions: findNodeConditions(node.Status.Conditions), PodList: podInfo, }, nil } + +func nodeRolesFromLabels(labels map[string]string) []string { + toReturn := []string{} + + // detect if this is a controller node from the k8s labels + if val, ok := labels["node-role.kubernetes.io/control-plane"]; ok && val == "true" { + toReturn = append(toReturn, "controller") + } + + if len(toReturn) == 0 { + toReturn = append(toReturn, "worker") + } + + return toReturn +} From ad3b2919af687020d7a02ba6058578be6443cf27 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 13:46:03 -0600 Subject: [PATCH 18/31] fix vet (#4084) * fix vet * fix tests * complete mock handler * more test * mockgen --- pkg/handlers/handlers_test.go | 31 +++++++++++++++++++++---------- pkg/handlers/interface.go | 1 + pkg/handlers/mock/mock.go | 18 +++++++++++++++--- 3 files changed, 37 insertions(+), 13 deletions(-) diff --git a/pkg/handlers/handlers_test.go b/pkg/handlers/handlers_test.go index 91bd7c0731..9d4ee77a74 100644 --- a/pkg/handlers/handlers_test.go +++ b/pkg/handlers/handlers_test.go @@ -1210,54 +1210,65 @@ var HandlerPolicyTests = map[string][]HandlerPolicyTest{ }, "HelmVM": {}, // Not implemented - "GenerateHelmVMNodeJoinCommandSecondary": { + "GenerateK0sNodeJoinCommand": { { Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GenerateHelmVMNodeJoinCommandSecondary(gomock.Any(), gomock.Any()) + handlerRecorder.GenerateK0sNodeJoinCommand(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "GenerateHelmVMNodeJoinCommandPrimary": { + "DrainHelmVMNode": { { + Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GenerateHelmVMNodeJoinCommandPrimary(gomock.Any(), gomock.Any()) + handlerRecorder.DrainHelmVMNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "DrainHelmVMNode": { + "DeleteHelmVMNode": { { Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.DrainHelmVMNode(gomock.Any(), gomock.Any()) + handlerRecorder.DeleteHelmVMNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "DeleteHelmVMNode": { + "GetHelmVMNodes": { + { + Roles: []rbactypes.Role{rbac.ClusterAdminRole}, + SessionRoles: []string{rbac.ClusterAdminRoleID}, + Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { + handlerRecorder.GetHelmVMNodes(gomock.Any(), gomock.Any()) + }, + ExpectStatus: http.StatusOK, + }, + }, + "GetHelmVMNode": { { Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.DeleteHelmVMNode(gomock.Any(), gomock.Any()) + handlerRecorder.GetHelmVMNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "GetHelmVMNodes": { + "GetK0sNodeJoinCommand": { { Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GetHelmVMNodes(gomock.Any(), gomock.Any()) + handlerRecorder.GetK0sNodeJoinCommand(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go index 0a1fa800fe..81945a41ac 100644 --- a/pkg/handlers/interface.go +++ b/pkg/handlers/interface.go @@ -144,6 +144,7 @@ type KOTSHandler interface { DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) GetHelmVMNode(w http.ResponseWriter, r *http.Request) + GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) // Prometheus SetPrometheusAddress(w http.ResponseWriter, r *http.Request) diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go index 0898775f70..2d47a3af6f 100644 --- a/pkg/handlers/mock/mock.go +++ b/pkg/handlers/mock/mock.go @@ -442,14 +442,14 @@ func (mr *MockKOTSHandlerMockRecorder) GarbageCollectImages(w, r interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollectImages", reflect.TypeOf((*MockKOTSHandler)(nil).GarbageCollectImages), w, r) } -// GenerateHelmVMNodeJoinCommand mocks base method. +// GenerateK0sNodeJoinCommand mocks base method. func (m *MockKOTSHandler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() m.ctrl.Call(m, "GenerateK0sNodeJoinCommand", w, r) } -// GenerateHelmVMNodeJoinCommand indicates an expected call of GenerateHelmVMNodeJoinCommand. -func (mr *MockKOTSHandlerMockRecorder) GenerateHelmVMNodeJoinCommand(w, r interface{}) *gomock.Call { +// GenerateK0sNodeJoinCommand indicates an expected call of GenerateK0sNodeJoinCommand. +func (mr *MockKOTSHandlerMockRecorder) GenerateK0sNodeJoinCommand(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateK0sNodeJoinCommand), w, r) } @@ -814,6 +814,18 @@ func (mr *MockKOTSHandlerMockRecorder) GetInstanceSnapshotConfig(w, r interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceSnapshotConfig", reflect.TypeOf((*MockKOTSHandler)(nil).GetInstanceSnapshotConfig), w, r) } +// GetK0sNodeJoinCommand mocks base method. +func (m *MockKOTSHandler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GetK0sNodeJoinCommand", w, r) +} + +// GetK0sNodeJoinCommand indicates an expected call of GetK0sNodeJoinCommand. +func (mr *MockKOTSHandlerMockRecorder) GetK0sNodeJoinCommand(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GetK0sNodeJoinCommand), w, r) +} + // GetKotsadmRegistry mocks base method. func (m *MockKOTSHandler) GetKotsadmRegistry(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() From d3e1c6006dfbca8983b037bfce05a89b00736545 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Thu, 19 Oct 2023 13:58:27 -0600 Subject: [PATCH 19/31] cleanup the k0s join token creation pod after completion (#4091) --- pkg/helmvm/node_join.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index 4c98ef0034..34b7ac8053 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -202,6 +202,12 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node return "", fmt.Errorf("failed to get pod logs: %w", err) } + // delete the completed pod + err = client.CoreV1().Pods("kube-system").Delete(ctx, podName, metav1.DeleteOptions{}) + if err != nil { + return "", fmt.Errorf("failed to delete pod: %w", err) + } + // the logs are just a join token, which needs to be added to other things to get a join command return string(podLogs), nil } From 77cb4f29bb01362d55bf76e38113bbd30936a7bb Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 14:06:28 -0600 Subject: [PATCH 20/31] fix redirect, add column types (#4092) --- web/src/components/apps/AppDetailPage.tsx | 6 +++-- .../apps/HelmVMClusterManagement.tsx | 26 +++++++++++-------- web/src/components/apps/HelmVMViewNode.jsx | 6 +++-- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/web/src/components/apps/AppDetailPage.tsx b/web/src/components/apps/AppDetailPage.tsx index e969b8535d..dbe0d00df5 100644 --- a/web/src/components/apps/AppDetailPage.tsx +++ b/web/src/components/apps/AppDetailPage.tsx @@ -98,8 +98,6 @@ function AppDetailPage(props: Props) { navigate(`/app/${appsList[0].slug}`, { replace: true }); } else if (props.isHelmManaged) { navigate("/install-with-helm", { replace: true }); - } else if (props.isHelmVM) { - navigate(`/${selectedApp?.slug}/cluster/manage`, { replace: true }); } else { navigate("/upload-license", { replace: true }); } @@ -324,6 +322,10 @@ function AppDetailPage(props: Props) { const firstVersion = downstream.pendingVersions.find( (version: Version) => version?.sequence === 0 ); + if (props.isHelmVM) { + navigate(`/${appNeedsConfiguration.slug}/cluster/manage`); + return; + } if (firstVersion?.status === "pending_config") { navigate(`/${appNeedsConfiguration.slug}/config`); return; diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index 5825a893b2..1ab0615805 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -1,6 +1,6 @@ import { useQuery } from "@tanstack/react-query"; import classNames from "classnames"; -import MaterialReactTable from "material-react-table"; +import MaterialReactTable, { MRT_ColumnDef } from "material-react-table"; import React, { ChangeEvent, useMemo, useReducer, useState } from "react"; import Modal from "react-modal"; import { Link, useParams } from "react-router-dom"; @@ -199,7 +199,17 @@ const HelmVMClusterManagement = ({ }; // #endregion - const columns = useMemo( + type NodeColumns = { + name: string | JSX.Element; + roles: JSX.Element; + status: string; + cpu: string; + memory: string; + pause: JSX.Element; + delete: JSX.Element; + }; + + const columns = useMemo[]>( () => [ { accessorKey: "name", @@ -211,7 +221,7 @@ const HelmVMClusterManagement = ({ { accessorKey: "roles", header: "Role(s)", - size: 404, + size: 150, }, { accessorKey: "status", @@ -234,11 +244,6 @@ const HelmVMClusterManagement = ({ align: "right", }, }, - { - accessorKey: "pods", - header: "Pods", - size: 150, - }, // { // accessorKey: "pause", // header: "Pause", @@ -280,10 +285,9 @@ const HelmVMClusterManagement = ({ ), status: n.isReady ? "Ready" : "Not Ready", cpu: `${n.cpu.used.toFixed(2)} / ${n.cpu.capacity.toFixed(2)}`, - memory: `${n.memory.used.toFixed(2)}GB / ${n.memory.capacity.toFixed( + memory: `${n.memory.used.toFixed(2)} / ${n.memory.capacity.toFixed( 2 - )}GB`, - pods: `${n.pods.used} / ${n.pods.capacity}`, + )} GB`, pause: ( <> diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx index 2efef120ce..8cf533312b 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -143,8 +143,10 @@ const HelmVMViewNode = () => {

{node?.kubeProxyVersion}

-

OS

-

{node?.operatingSystem}

+

+ kernel version +

+

{node?.kernelVersion}

From 2582c90a2884c2f0d919816921beff67ce52efff Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Thu, 19 Oct 2023 16:50:19 -0600 Subject: [PATCH 21/31] make labels optional (#4094) * check for labels * make labels optional type --- web/src/components/apps/HelmVMClusterManagement.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index 1ab0615805..799cd767c7 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -75,7 +75,7 @@ const HelmVMClusterManagement = ({ capacity: number; used: number; }; - labels: string[]; + labels?: string[]; conditions: { memoryPressure: boolean; diskPressure: boolean; @@ -273,7 +273,7 @@ const HelmVMClusterManagement = ({ ), roles: (
- {n.labels.map((l) => ( + {n?.labels?.map((l) => ( Date: Thu, 19 Oct 2023 18:17:33 -0600 Subject: [PATCH 22/31] remove the --force flag (#4097) --- pkg/helmvm/node_join.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index 34b7ac8053..ea739c4b54 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -247,7 +247,7 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro } } - cmd := []string{"/usr/local/bin/k0s", "install", k0sRole, "--force"} + cmd := []string{"/usr/local/bin/k0s", "install", k0sRole} if k0sRole == "controller" { cmd = append(cmd, "--enable-worker") } From 654d1cf9e20c1680632992e8b8c2058ec13a0f6b Mon Sep 17 00:00:00 2001 From: Diamon Wiggins <38189728+diamonwiggins@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:16:10 -0600 Subject: [PATCH 23/31] chore: change embedded cluster config map namespace (#4100) * implement 'IsHelmVM' function based on presence of configmap if embedded-cluster-config exists in kube-system it is helmvm * change namespace of embedded cluster config --------- Co-authored-by: Andrew Lavery --- pkg/helmvm/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/helmvm/util.go b/pkg/helmvm/util.go index 65d93f0c9f..87b6bfe285 100644 --- a/pkg/helmvm/util.go +++ b/pkg/helmvm/util.go @@ -11,7 +11,7 @@ import ( ) const configMapName = "embedded-cluster-config" -const configMapNamespace = "kube-system" +const configMapNamespace = "embedded-cluster" // ReadConfigMap will read the Kurl config from a configmap func ReadConfigMap(client kubernetes.Interface) (*corev1.ConfigMap, error) { From f5d0de9382c6dc348ac0bbec9e196d5e2a631ba5 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Fri, 20 Oct 2023 10:22:03 -0600 Subject: [PATCH 24/31] update redirect to cluster manage page, comment test data (#4096) * improve logic around initial cluster flow, comment test data * fix types, redirect on unknown or pending config status if helmvm --- pkg/store/types/constants.go | 16 +++---- web/src/Root.tsx | 24 +++++++---- web/src/components/apps/AppDetailPage.tsx | 6 ++- .../apps/HelmVMClusterManagement.tsx | 42 ++++++++++++++++--- web/src/components/apps/HelmVMViewNode.jsx | 38 +++++++++++++++-- web/src/components/shared/NavBar.tsx | 27 ++++++------ .../AppVersionHistoryRow.tsx | 9 ++-- .../components/DashboardVersionCard.tsx | 6 +-- web/src/types/index.ts | 5 +-- 9 files changed, 119 insertions(+), 54 deletions(-) diff --git a/pkg/store/types/constants.go b/pkg/store/types/constants.go index a449968b28..1ce8b655d7 100644 --- a/pkg/store/types/constants.go +++ b/pkg/store/types/constants.go @@ -3,12 +3,12 @@ package types type DownstreamVersionStatus string const ( - VersionUnknown DownstreamVersionStatus = "unknown" - VersionPendingConfig DownstreamVersionStatus = "pending_config" - VersionPending DownstreamVersionStatus = "pending" - VersionPendingPreflight DownstreamVersionStatus = "pending_preflight" - VersionPendingDownload DownstreamVersionStatus = "pending_download" - VersionDeploying DownstreamVersionStatus = "deploying" - VersionDeployed DownstreamVersionStatus = "deployed" - VersionFailed DownstreamVersionStatus = "failed" + VersionUnknown DownstreamVersionStatus = "unknown" // we don't know + VersionPendingConfig DownstreamVersionStatus = "pending_config" // needs required configuration + VersionPendingDownload DownstreamVersionStatus = "pending_download" // needs to be downloaded from the upstream source + VersionPendingPreflight DownstreamVersionStatus = "pending_preflight" // waiting for preflights to finish + VersionPending DownstreamVersionStatus = "pending" // can be deployed, but is not yet + VersionDeploying DownstreamVersionStatus = "deploying" // is being deployed + VersionDeployed DownstreamVersionStatus = "deployed" // did deploy successfully + VersionFailed DownstreamVersionStatus = "failed" // did not deploy successfully ) diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 04a656426b..54e86f9a31 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -576,15 +576,21 @@ const Root = () => { /> } /> {state.adminConsoleMetadata?.isHelmVM && ( - - } - /> + <> + + } + /> + } + /> + )} {(state.adminConsoleMetadata?.isKurl || state.adminConsoleMetadata?.isHelmVM) && ( diff --git a/web/src/components/apps/AppDetailPage.tsx b/web/src/components/apps/AppDetailPage.tsx index dbe0d00df5..46fbe7a2e2 100644 --- a/web/src/components/apps/AppDetailPage.tsx +++ b/web/src/components/apps/AppDetailPage.tsx @@ -322,11 +322,15 @@ function AppDetailPage(props: Props) { const firstVersion = downstream.pendingVersions.find( (version: Version) => version?.sequence === 0 ); - if (props.isHelmVM) { + if (firstVersion?.status === "unknown" && props.isHelmVM) { navigate(`/${appNeedsConfiguration.slug}/cluster/manage`); return; } if (firstVersion?.status === "pending_config") { + if (props.isHelmVM) { + navigate(`/${appNeedsConfiguration.slug}/cluster/manage`); + return; + } navigate(`/${appNeedsConfiguration.slug}/config`); return; } diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/HelmVMClusterManagement.tsx index 799cd767c7..3c80ae1ef9 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/HelmVMClusterManagement.tsx @@ -14,6 +14,38 @@ import CodeSnippet from "../shared/CodeSnippet"; import "@src/scss/components/apps/HelmVMClusterManagement.scss"; +const testData = { + nodes: undefined, +}; +// const testData = { +// nodes: [ +// { +// name: "laverya-helmvm", +// isConnected: true, +// isReady: true, +// isPrimaryNode: true, +// canDelete: false, +// kubeletVersion: "v1.28.2+k0s", +// kubeProxyVersion: "v1.28.2+k0s", +// operatingSystem: "linux", +// kernelVersion: "5.10.0-26-cloud-amd64", +// cpu: { capacity: 4, used: 1.9364847660000002 }, +// memory: { capacity: 15.633056640625, used: 3.088226318359375 }, +// pods: { capacity: 110, used: 27 }, +// labels: ["controller"], +// conditions: { +// memoryPressure: false, +// diskPressure: false, +// pidPressure: false, +// ready: true, +// }, +// podList: [], +// }, +// ], +// ha: true, +// isHelmVMEnabled: true, +// }; + type State = { displayAddNode: boolean; confirmDeleteNode: string; @@ -260,12 +292,10 @@ const HelmVMClusterManagement = ({ const mappedNodes = useMemo(() => { return ( - nodesData?.nodes?.map((n) => ({ - name: slug ? ( - n.name - ) : ( + (nodesData?.nodes || testData?.nodes)?.map((n) => ({ + name: ( {n.name} @@ -335,7 +365,7 @@ const HelmVMClusterManagement = ({ {nodesError?.message}

)} - {nodesData?.nodes && ( + {(nodesData?.nodes || testData?.nodes) && ( { - const { nodeName } = useParams(); + const { slug, nodeName } = useParams(); const { data: nodeData, isLoading: nodeLoading } = useQuery({ queryKey: ["helmVmNode", nodeName], queryFn: async ({ queryKey }) => { @@ -41,7 +73,7 @@ const HelmVMViewNode = () => { }, }); - const node = nodeData; + const node = nodeData || testData; // #region table data const columns = useMemo( @@ -109,7 +141,7 @@ const HelmVMViewNode = () => { {/* Breadcrumbs */}

Cluster Nodes diff --git a/web/src/components/shared/NavBar.tsx b/web/src/components/shared/NavBar.tsx index 6f937c6226..09c6dc2c9d 100644 --- a/web/src/components/shared/NavBar.tsx +++ b/web/src/components/shared/NavBar.tsx @@ -228,20 +228,21 @@ export class NavBar extends PureComponent {

)} - {(isKurlEnabled || isHelmVMEnabled) && ( -
- - Cluster Management - -
- )} + + Cluster Management + + + )} {isSnapshotsSupported && (
{ }; const getCurrentVersionStatus = (version: Version | null) => { - if ( - version?.status === "deployed" || - version?.status === "merged" || - version?.status === "pending" - ) { + if (version?.status === "deployed" || version?.status === "pending") { return ( Currently {version?.status.replace("_", " ")} version diff --git a/web/src/types/index.ts b/web/src/types/index.ts index 269d6d1b35..6a0c7cdf44 100644 --- a/web/src/types/index.ts +++ b/web/src/types/index.ts @@ -252,13 +252,12 @@ export type VersionStatus = | "deployed" | "deploying" | "failed" - | "merged" | "pending" | "pending_config" | "pending_download" | "pending_preflight" - | "superseded" - | "waiting"; + | "waiting" + | "unknown"; export type LicenseFile = { preview: string; From a541440c946da0c464d0093af3f3cfb5650fbe3a Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Fri, 20 Oct 2023 10:28:25 -0600 Subject: [PATCH 25/31] node role labels (#4093) * node role labels * handle having no labels on the first node * f * include a prefix on the label * = not : --- pkg/helmvm/helmvm_node.go | 26 ++++++++++++++++++++----- pkg/helmvm/node_join.go | 41 +++++++++++++++++++++++++++++++++++++++ pkg/helmvm/types/types.go | 3 +++ 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/pkg/helmvm/helmvm_node.go b/pkg/helmvm/helmvm_node.go index 079086f983..a9b17f497e 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/helmvm/helmvm_node.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "strconv" + "strings" "github.com/replicatedhq/kots/pkg/helmvm/types" "github.com/replicatedhq/kots/pkg/k8sutil" @@ -136,16 +137,31 @@ func nodeMetrics(ctx context.Context, client kubernetes.Interface, metricsClient }, nil } +// nodeRolesFromLabels parses a map of k8s node labels, and returns the roles of the node func nodeRolesFromLabels(labels map[string]string) []string { toReturn := []string{} - // detect if this is a controller node from the k8s labels - if val, ok := labels["node-role.kubernetes.io/control-plane"]; ok && val == "true" { - toReturn = append(toReturn, "controller") + numRolesStr, ok := labels[types.EMBEDDED_CLUSTER_ROLE_LABEL] + if !ok { + // the first node will not initially have a role label, but is a 'controller' + if val, ok := labels["node-role.kubernetes.io/control-plane"]; ok && val == "true" { + return []string{"controller"} + } + return nil } + numRoles, err := strconv.Atoi(strings.TrimPrefix(numRolesStr, "total-")) + if err != nil { + fmt.Printf("failed to parse role label %q: %s", numRolesStr, err.Error()) - if len(toReturn) == 0 { - toReturn = append(toReturn, "worker") + return nil + } + + for i := 0; i < numRoles; i++ { + roleLabel, ok := labels[fmt.Sprintf("%s-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, i)] + if !ok { + fmt.Printf("failed to find role label %d", i) + } + toReturn = append(toReturn, roleLabel) } return toReturn diff --git a/pkg/helmvm/node_join.go b/pkg/helmvm/node_join.go index ea739c4b54..4a4bb6c068 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/helmvm/node_join.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/replicatedhq/kots/pkg/helmvm/types" corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -252,6 +253,12 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro cmd = append(cmd, "--enable-worker") } + labels, err := getRolesNodeLabels(ctx, client, roles) + if err != nil { + return "", fmt.Errorf("failed to get role labels: %w", err) + } + cmd = append(cmd, "--labels", labels) + return strings.Join(cmd, " "), nil } @@ -296,3 +303,37 @@ func getControllerNodeIP(ctx context.Context, client kubernetes.Interface) (stri return "", fmt.Errorf("failed to find healthy controller node") } + +func getRolesNodeLabels(ctx context.Context, client kubernetes.Interface, roles []string) (string, error) { + roleLabels := getRoleListLabels(roles) + + for _, role := range roles { + labels, err := getRoleNodeLabels(ctx, client, role) + if err != nil { + return "", fmt.Errorf("failed to get node labels for role %s: %w", role, err) + } + roleLabels = append(roleLabels, labels...) + } + + return strings.Join(roleLabels, ","), nil +} + +// TODO: look up role in cluster config, apply additional labels based on role +func getRoleNodeLabels(ctx context.Context, client kubernetes.Interface, role string) ([]string, error) { + toReturn := []string{} + + return toReturn, nil +} + +// getRoleListLabels returns the labels needed to identify the roles of this node in the future +// one label will be the number of roles, and then deterministic label names will be used to store the role names +func getRoleListLabels(roles []string) []string { + toReturn := []string{} + toReturn = append(toReturn, fmt.Sprintf("%s=total-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, len(roles))) + + for idx, role := range roles { + toReturn = append(toReturn, fmt.Sprintf("%s-%d=%s", types.EMBEDDED_CLUSTER_ROLE_LABEL, idx, role)) + } + + return toReturn +} diff --git a/pkg/helmvm/types/types.go b/pkg/helmvm/types/types.go index f177df2b37..10bf390368 100644 --- a/pkg/helmvm/types/types.go +++ b/pkg/helmvm/types/types.go @@ -1,5 +1,8 @@ package types +const EMBEDDED_CLUSTER_LABEL = "kots.io/embedded-cluster" +const EMBEDDED_CLUSTER_ROLE_LABEL = EMBEDDED_CLUSTER_LABEL + "-role" + type HelmVMNodes struct { Nodes []Node `json:"nodes"` HA bool `json:"ha"` From 1c3e657f2436ce9040669358f526c7fc00c79b75 Mon Sep 17 00:00:00 2001 From: Star Richardson <67430892+alicenstar@users.noreply.github.com> Date: Fri, 20 Oct 2023 13:58:24 -0600 Subject: [PATCH 26/31] fix config redirect, remove unnecessary code (#4104) * fix config redirect, remove unnecessary code * linting --- web/src/Root.tsx | 7 +------ web/src/components/apps/HelmVMClusterManagement.tsx | 6 +++--- web/src/components/apps/HelmVMViewNode.jsx | 11 +---------- 3 files changed, 5 insertions(+), 19 deletions(-) diff --git a/web/src/Root.tsx b/web/src/Root.tsx index 54e86f9a31..a78e3b46bc 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -579,12 +579,7 @@ const Root = () => { <> - } + element={} /> { const [state, setState] = useReducer( (prevState: State, newState: Partial) => ({ @@ -81,7 +79,9 @@ const HelmVMClusterManagement = ({ const [selectedNodeTypes, setSelectedNodeTypes] = useState([]); const { data: appsData } = useApps(); - const app = appsData?.apps?.find((a) => a.name === appName); + // we grab the first app because helmvm users should only ever have one app + const app = appsData?.apps?.[0]; + const { slug } = useParams(); // #region queries diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/HelmVMViewNode.jsx index 66396d8b10..6bd7651568 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/HelmVMViewNode.jsx @@ -1,5 +1,5 @@ import { MaterialReactTable } from "material-react-table"; -import React, { useMemo } from "react"; +import React, { useMemo, setState } from "react"; import { useQuery } from "@tanstack/react-query"; import { Link, useParams } from "react-router-dom"; import Loader from "@components/shared/Loader"; @@ -62,15 +62,6 @@ const HelmVMViewNode = () => { err.status ); }, - onSuccess: (data) => { - setState({ - // if cluster doesn't support ha, then primary will be disabled. Force into secondary - selectedNodeType: !data.ha ? "secondary" : state.selectedNodeType, - }); - }, - config: { - retry: false, - }, }); const node = nodeData || testData; From 3289ee57d33f7ae1d5e9141ad6afd3e8c01ac219 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Mon, 23 Oct 2023 10:33:45 -0600 Subject: [PATCH 27/31] quite a bit of renaming (#4106) * renaming things to 'embedded-cluster' * rename frontend * import ordering * undo goland's wonderful formatting changes * function naming * undo whitespace change --- .github/actions/cmx-versions/dist/index.js | 4 +- .github/actions/cmx-versions/index.js | 2 +- .github/workflows/regression.yaml | 6 +- ...okens.yaml => embeded_cluster_tokens.yaml} | 4 +- .../delete_node.go | 2 +- pkg/{helmvm => embeddedcluster}/drain_node.go | 2 +- pkg/{helmvm => embeddedcluster}/exec.go | 2 +- .../helmvm_node.go | 4 +- .../helmvm_nodes.go | 15 +-- pkg/{helmvm => embeddedcluster}/node_join.go | 8 +- .../types/types.go | 8 +- pkg/{helmvm => embeddedcluster}/util.go | 4 +- ...ode.go => embedded_cluster_delete_node.go} | 6 +- ...node.go => embedded_cluster_drain_node.go} | 6 +- ...{helmvm_get.go => embedded_cluster_get.go} | 10 +- ... => embedded_cluster_node_join_command.go} | 32 +++---- pkg/handlers/handlers.go | 26 ++--- pkg/handlers/handlers_test.go | 32 +++---- pkg/handlers/interface.go | 13 ++- pkg/handlers/metadata.go | 14 +-- pkg/handlers/mock/mock.go | 96 ++++++++----------- pkg/kotsadm/metadata.go | 10 +- pkg/kotsadm/types/metadata.go | 6 +- ...k0s_store.go => embedded_cluster_store.go} | 14 +-- web/src/Root.tsx | 45 +++++---- web/src/components/UploadLicenseFile.tsx | 4 +- web/src/components/apps/AppDetailPage.tsx | 18 ++-- web/src/components/apps/AppVersionHistory.tsx | 10 +- ...ment.tsx => EmbeddedClusterManagement.tsx} | 37 +++---- ...ewNode.jsx => EmbeddedClusterViewNode.jsx} | 29 +++--- ....test.js => EmbeddedClustrNodeRow.test.js} | 2 +- web/src/components/shared/NavBar.tsx | 8 +- ...nt.scss => EmbeddedClusterManagement.scss} | 6 +- web/src/types/index.ts | 2 +- 34 files changed, 243 insertions(+), 244 deletions(-) rename migrations/tables/{k0s_tokens.yaml => embeded_cluster_tokens.yaml} (84%) rename pkg/{helmvm => embeddedcluster}/delete_node.go (90%) rename pkg/{helmvm => embeddedcluster}/drain_node.go (88%) rename pkg/{helmvm => embeddedcluster}/exec.go (94%) rename pkg/{helmvm => embeddedcluster}/helmvm_node.go (98%) rename pkg/{helmvm => embeddedcluster}/helmvm_nodes.go (88%) rename pkg/{helmvm => embeddedcluster}/node_join.go (97%) rename pkg/{helmvm => embeddedcluster}/types/types.go (88%) rename pkg/{helmvm => embeddedcluster}/util.go (93%) rename pkg/handlers/{helmvm_delete_node.go => embedded_cluster_delete_node.go} (82%) rename pkg/handlers/{helmvm_drain_node.go => embedded_cluster_drain_node.go} (82%) rename pkg/handlers/{helmvm_get.go => embedded_cluster_get.go} (67%) rename pkg/handlers/{helmvm_node_join_command.go => embedded_cluster_node_join_command.go} (64%) rename pkg/store/kotsstore/{k0s_store.go => embedded_cluster_store.go} (71%) rename web/src/components/apps/{HelmVMClusterManagement.tsx => EmbeddedClusterManagement.tsx} (94%) rename web/src/components/apps/{HelmVMViewNode.jsx => EmbeddedClusterViewNode.jsx} (92%) rename web/src/components/apps/{HelmVMNodeRow.test.js => EmbeddedClustrNodeRow.test.js} (57%) rename web/src/scss/components/apps/{HelmVMClusterManagement.scss => EmbeddedClusterManagement.scss} (88%) diff --git a/.github/actions/cmx-versions/dist/index.js b/.github/actions/cmx-versions/dist/index.js index 85624a1de8..74168e22f1 100644 --- a/.github/actions/cmx-versions/dist/index.js +++ b/.github/actions/cmx-versions/dist/index.js @@ -7661,7 +7661,7 @@ async function getClusterVersions() { clusterVersions.forEach((distribution) => { const distroName = distribution.short_name; - if (distroName === 'helmvm' || distroName === 'kurl') { + if (distroName === 'embedded_cluster' || distroName === 'kurl') { // excluding the embedded distributions return; } @@ -7710,4 +7710,4 @@ getClusterVersions(); module.exports = __webpack_exports__; /******/ })() -; \ No newline at end of file +; diff --git a/.github/actions/cmx-versions/index.js b/.github/actions/cmx-versions/index.js index 213ffe0939..ffc802451d 100644 --- a/.github/actions/cmx-versions/index.js +++ b/.github/actions/cmx-versions/index.js @@ -41,7 +41,7 @@ async function getClusterVersions() { clusterVersions.forEach((distribution) => { const distroName = distribution.short_name; - if (distroName === 'helmvm' || distroName === 'kurl') { + if (distroName === 'embedded_cluster' || distroName === 'kurl') { // excluding the embedded distributions return; } diff --git a/.github/workflows/regression.yaml b/.github/workflows/regression.yaml index 2a99593730..bd40a77491 100644 --- a/.github/workflows/regression.yaml +++ b/.github/workflows/regression.yaml @@ -191,9 +191,9 @@ jobs: is_upgrade: "1" }, { - name: "type=helmvm cluster, env=online, phase=new install, rbac=cluster admin", - backend_config: "helmvm-online-install-backend-config.tfvars", - terraform_script: "helmvm-online-install.sh" + name: "type=embeddedcluster cluster, env=online, phase=new install, rbac=cluster admin", + backend_config: "embeddedcluster-online-install-backend-config.tfvars", + terraform_script: "embeddedcluster-online-install.sh" } ] steps: diff --git a/migrations/tables/k0s_tokens.yaml b/migrations/tables/embeded_cluster_tokens.yaml similarity index 84% rename from migrations/tables/k0s_tokens.yaml rename to migrations/tables/embeded_cluster_tokens.yaml index 1ae6760972..6c233ba4d9 100644 --- a/migrations/tables/k0s_tokens.yaml +++ b/migrations/tables/embeded_cluster_tokens.yaml @@ -1,9 +1,9 @@ apiVersion: schemas.schemahero.io/v1alpha4 kind: Table metadata: - name: k0s-tokens + name: embedded-cluster-tokens spec: - name: k0s_tokens + name: embedded_cluster_tokens requires: [] schema: rqlite: diff --git a/pkg/helmvm/delete_node.go b/pkg/embeddedcluster/delete_node.go similarity index 90% rename from pkg/helmvm/delete_node.go rename to pkg/embeddedcluster/delete_node.go index 24d7e5e46d..a50a99eb75 100644 --- a/pkg/helmvm/delete_node.go +++ b/pkg/embeddedcluster/delete_node.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( "context" diff --git a/pkg/helmvm/drain_node.go b/pkg/embeddedcluster/drain_node.go similarity index 88% rename from pkg/helmvm/drain_node.go rename to pkg/embeddedcluster/drain_node.go index b8fa55afbb..0d1439b979 100644 --- a/pkg/helmvm/drain_node.go +++ b/pkg/embeddedcluster/drain_node.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( "context" diff --git a/pkg/helmvm/exec.go b/pkg/embeddedcluster/exec.go similarity index 94% rename from pkg/helmvm/exec.go rename to pkg/embeddedcluster/exec.go index 04f94635de..be71e0a014 100644 --- a/pkg/helmvm/exec.go +++ b/pkg/embeddedcluster/exec.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( corev1client "k8s.io/client-go/kubernetes/typed/core/v1" diff --git a/pkg/helmvm/helmvm_node.go b/pkg/embeddedcluster/helmvm_node.go similarity index 98% rename from pkg/helmvm/helmvm_node.go rename to pkg/embeddedcluster/helmvm_node.go index a9b17f497e..2b7fff9dd7 100644 --- a/pkg/helmvm/helmvm_node.go +++ b/pkg/embeddedcluster/helmvm_node.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( "context" @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/replicatedhq/kots/pkg/helmvm/types" + "github.com/replicatedhq/kots/pkg/embeddedcluster/types" "github.com/replicatedhq/kots/pkg/k8sutil" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/helmvm/helmvm_nodes.go b/pkg/embeddedcluster/helmvm_nodes.go similarity index 88% rename from pkg/helmvm/helmvm_nodes.go rename to pkg/embeddedcluster/helmvm_nodes.go index 9396e6508c..7b0eb7729e 100644 --- a/pkg/helmvm/helmvm_nodes.go +++ b/pkg/embeddedcluster/helmvm_nodes.go @@ -1,9 +1,10 @@ -package helmvm +package embeddedcluster import ( "context" + "github.com/pkg/errors" - "github.com/replicatedhq/kots/pkg/helmvm/types" + "github.com/replicatedhq/kots/pkg/embeddedcluster/types" "github.com/replicatedhq/kots/pkg/k8sutil" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,7 +13,7 @@ import ( ) // GetNodes will get a list of nodes with stats -func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNodes, error) { +func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.EmbeddedClusterNodes, error) { nodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, errors.Wrap(err, "list nodes") @@ -28,7 +29,7 @@ func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNo return nil, errors.Wrap(err, "failed to create metrics client") } - toReturn := types.HelmVMNodes{} + toReturn := types.EmbeddedClusterNodes{} for _, node := range nodes.Items { nodeMet, err := nodeMetrics(ctx, client, metricsClient, node) @@ -39,11 +40,11 @@ func GetNodes(ctx context.Context, client kubernetes.Interface) (*types.HelmVMNo toReturn.Nodes = append(toReturn.Nodes, *nodeMet) } - isHelmVM, err := IsHelmVM(client) + isEmbeddedCluster, err := IsEmbeddedCluster(client) if err != nil { - return nil, errors.Wrap(err, "is helmvm") + return nil, errors.Wrap(err, "is embeddedcluster") } - toReturn.IsHelmVMEnabled = isHelmVM + toReturn.IsEmbeddedClusterEnabled = isEmbeddedCluster isHA, err := IsHA(client) if err != nil { diff --git a/pkg/helmvm/node_join.go b/pkg/embeddedcluster/node_join.go similarity index 97% rename from pkg/helmvm/node_join.go rename to pkg/embeddedcluster/node_join.go index 4a4bb6c068..b0835b3761 100644 --- a/pkg/helmvm/node_join.go +++ b/pkg/embeddedcluster/node_join.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( "context" @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/replicatedhq/kots/pkg/helmvm/types" + "github.com/replicatedhq/kots/pkg/embeddedcluster/types" corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +24,7 @@ type joinTokenEntry struct { var joinTokenMapMut = sync.Mutex{} var joinTokenMap = map[string]*joinTokenEntry{} -// GenerateAddNodeToken will generate the HelmVM node add command for a primary or secondary node +// GenerateAddNodeToken will generate the embedded cluster node add command for a node with the specified roles // join commands will last for 24 hours, and will be cached for 1 hour after first generation func GenerateAddNodeToken(ctx context.Context, client kubernetes.Interface, nodeRole string) (string, error) { // get the joinToken struct entry for this node role @@ -214,7 +214,7 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node } // GenerateAddNodeCommand returns the command a user should run to add a node with the provided token -// the command will be of the form 'helmvm node join ip:port UUID' +// the command will be of the form 'embeddedcluster node join ip:port UUID' func GenerateAddNodeCommand(ctx context.Context, client kubernetes.Interface, token string) (string, error) { cm, err := ReadConfigMap(client) if err != nil { diff --git a/pkg/helmvm/types/types.go b/pkg/embeddedcluster/types/types.go similarity index 88% rename from pkg/helmvm/types/types.go rename to pkg/embeddedcluster/types/types.go index 10bf390368..f9b93a4b8c 100644 --- a/pkg/helmvm/types/types.go +++ b/pkg/embeddedcluster/types/types.go @@ -3,10 +3,10 @@ package types const EMBEDDED_CLUSTER_LABEL = "kots.io/embedded-cluster" const EMBEDDED_CLUSTER_ROLE_LABEL = EMBEDDED_CLUSTER_LABEL + "-role" -type HelmVMNodes struct { - Nodes []Node `json:"nodes"` - HA bool `json:"ha"` - IsHelmVMEnabled bool `json:"isHelmVMEnabled"` +type EmbeddedClusterNodes struct { + Nodes []Node `json:"nodes"` + HA bool `json:"ha"` + IsEmbeddedClusterEnabled bool `json:"isEmbeddedClusterEnabled"` } type Node struct { diff --git a/pkg/helmvm/util.go b/pkg/embeddedcluster/util.go similarity index 93% rename from pkg/helmvm/util.go rename to pkg/embeddedcluster/util.go index 87b6bfe285..87f120b7d8 100644 --- a/pkg/helmvm/util.go +++ b/pkg/embeddedcluster/util.go @@ -1,4 +1,4 @@ -package helmvm +package embeddedcluster import ( "context" @@ -18,7 +18,7 @@ func ReadConfigMap(client kubernetes.Interface) (*corev1.ConfigMap, error) { return client.CoreV1().ConfigMaps(configMapNamespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) } -func IsHelmVM(clientset kubernetes.Interface) (bool, error) { +func IsEmbeddedCluster(clientset kubernetes.Interface) (bool, error) { if clientset == nil { return false, fmt.Errorf("clientset is nil") } diff --git a/pkg/handlers/helmvm_delete_node.go b/pkg/handlers/embedded_cluster_delete_node.go similarity index 82% rename from pkg/handlers/helmvm_delete_node.go rename to pkg/handlers/embedded_cluster_delete_node.go index 1b732ab07f..ea55401654 100644 --- a/pkg/handlers/helmvm_delete_node.go +++ b/pkg/handlers/embedded_cluster_delete_node.go @@ -5,14 +5,14 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/replicatedhq/kots/pkg/helmvm" + "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (h *Handler) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) { +func (h *Handler) DeleteEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { client, err := k8sutil.GetClientset() if err != nil { logger.Error(err) @@ -41,7 +41,7 @@ func (h *Handler) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) { return } - if err := helmvm.DeleteNode(ctx, client, restconfig, node); err != nil { + if err := embeddedcluster.DeleteNode(ctx, client, restconfig, node); err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) return diff --git a/pkg/handlers/helmvm_drain_node.go b/pkg/handlers/embedded_cluster_drain_node.go similarity index 82% rename from pkg/handlers/helmvm_drain_node.go rename to pkg/handlers/embedded_cluster_drain_node.go index ae0a337f6f..ae59dc5071 100644 --- a/pkg/handlers/helmvm_drain_node.go +++ b/pkg/handlers/embedded_cluster_drain_node.go @@ -5,14 +5,14 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/replicatedhq/kots/pkg/helmvm" + "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (h *Handler) DrainHelmVMNode(w http.ResponseWriter, r *http.Request) { +func (h *Handler) DrainEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { client, err := k8sutil.GetClientset() if err != nil { logger.Error(err) @@ -36,7 +36,7 @@ func (h *Handler) DrainHelmVMNode(w http.ResponseWriter, r *http.Request) { // This pod may get evicted and not be able to respond to the request go func() { - if err := helmvm.DrainNode(ctx, client, node); err != nil { + if err := embeddedcluster.DrainNode(ctx, client, node); err != nil { logger.Error(err) return } diff --git a/pkg/handlers/helmvm_get.go b/pkg/handlers/embedded_cluster_get.go similarity index 67% rename from pkg/handlers/helmvm_get.go rename to pkg/handlers/embedded_cluster_get.go index 4f736996ed..b08d02072a 100644 --- a/pkg/handlers/helmvm_get.go +++ b/pkg/handlers/embedded_cluster_get.go @@ -4,12 +4,12 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/replicatedhq/kots/pkg/helmvm" + "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" ) -func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { +func (h *Handler) GetEmbeddedClusterNodes(w http.ResponseWriter, r *http.Request) { client, err := k8sutil.GetClientset() if err != nil { logger.Error(err) @@ -17,7 +17,7 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { return } - nodes, err := helmvm.GetNodes(r.Context(), client) + nodes, err := embeddedcluster.GetNodes(r.Context(), client) if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) @@ -26,7 +26,7 @@ func (h *Handler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { JSON(w, http.StatusOK, nodes) } -func (h *Handler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { +func (h *Handler) GetEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { client, err := k8sutil.GetClientset() if err != nil { logger.Error(err) @@ -35,7 +35,7 @@ func (h *Handler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { } nodeName := mux.Vars(r)["nodeName"] - node, err := helmvm.GetNode(r.Context(), client, nodeName) + node, err := embeddedcluster.GetNode(r.Context(), client, nodeName) if err != nil { logger.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/handlers/helmvm_node_join_command.go b/pkg/handlers/embedded_cluster_node_join_command.go similarity index 64% rename from pkg/handlers/helmvm_node_join_command.go rename to pkg/handlers/embedded_cluster_node_join_command.go index b4d6a0da4f..ca1dbed32a 100644 --- a/pkg/handlers/helmvm_node_join_command.go +++ b/pkg/handlers/embedded_cluster_node_join_command.go @@ -5,36 +5,36 @@ import ( "fmt" "net/http" - "github.com/replicatedhq/kots/pkg/helmvm" + "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" "github.com/replicatedhq/kots/pkg/store/kotsstore" ) -type GenerateK0sNodeJoinCommandResponse struct { +type GenerateEmbeddedClusterNodeJoinCommandResponse struct { Command []string `json:"command"` } -type GetK0sNodeJoinCommandResponse struct { +type GetEmbeddedClusterNodeJoinCommandResponse struct { ClusterID string `json:"clusterID"` K0sJoinCommand string `json:"k0sJoinCommand"` K0sToken string `json:"k0sToken"` } -type GenerateHelmVMNodeJoinCommandRequest struct { +type GenerateEmbeddedClusterNodeJoinCommandRequest struct { Roles []string `json:"roles"` } -func (h *Handler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { - generateHelmVMNodeJoinCommandRequest := GenerateHelmVMNodeJoinCommandRequest{} - if err := json.NewDecoder(r.Body).Decode(&generateHelmVMNodeJoinCommandRequest); err != nil { +func (h *Handler) GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, r *http.Request) { + generateEmbeddedClusterNodeJoinCommandRequest := GenerateEmbeddedClusterNodeJoinCommandRequest{} + if err := json.NewDecoder(r.Body).Decode(&generateEmbeddedClusterNodeJoinCommandRequest); err != nil { logger.Error(fmt.Errorf("failed to decode request body: %w", err)) w.WriteHeader(http.StatusBadRequest) return } store := kotsstore.StoreFromEnv() - token, err := store.SetK0sInstallCommandRoles(generateHelmVMNodeJoinCommandRequest.Roles) + token, err := store.SetEmbeddedClusterInstallCommandRoles(generateEmbeddedClusterNodeJoinCommandRequest.Roles) if err != nil { logger.Error(fmt.Errorf("failed to set k0s install command roles: %w", err)) w.WriteHeader(http.StatusInternalServerError) @@ -47,24 +47,24 @@ func (h *Handler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Requ w.WriteHeader(http.StatusInternalServerError) return } - nodeJoinCommand, err := helmvm.GenerateAddNodeCommand(r.Context(), client, token) + nodeJoinCommand, err := embeddedcluster.GenerateAddNodeCommand(r.Context(), client, token) if err != nil { logger.Error(fmt.Errorf("failed to generate add node command: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } - JSON(w, http.StatusOK, GenerateK0sNodeJoinCommandResponse{ + JSON(w, http.StatusOK, GenerateEmbeddedClusterNodeJoinCommandResponse{ Command: []string{nodeJoinCommand}, }) } // this function relies on the token being valid for authentication -func (h *Handler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { +func (h *Handler) GetEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, r *http.Request) { // read query string, ensure that the token is valid token := r.URL.Query().Get("token") store := kotsstore.StoreFromEnv() - roles, err := store.GetK0sInstallCommandRoles(token) + roles, err := store.GetEmbeddedClusterInstallCommandRoles(token) if err != nil { logger.Error(fmt.Errorf("failed to get k0s install command roles: %w", err)) w.WriteHeader(http.StatusInternalServerError) @@ -87,28 +87,28 @@ func (h *Handler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) } } - k0sToken, err := helmvm.GenerateAddNodeToken(r.Context(), client, k0sRole) + k0sToken, err := embeddedcluster.GenerateAddNodeToken(r.Context(), client, k0sRole) if err != nil { logger.Error(fmt.Errorf("failed to generate add node token: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } - k0sJoinCommand, err := helmvm.GenerateK0sJoinCommand(r.Context(), client, roles) + k0sJoinCommand, err := embeddedcluster.GenerateK0sJoinCommand(r.Context(), client, roles) if err != nil { logger.Error(fmt.Errorf("failed to generate k0s join command: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } - clusterID, err := helmvm.ClusterID(client) + clusterID, err := embeddedcluster.ClusterID(client) if err != nil { logger.Error(fmt.Errorf("failed to get cluster id: %w", err)) w.WriteHeader(http.StatusInternalServerError) return } - JSON(w, http.StatusOK, GetK0sNodeJoinCommandResponse{ + JSON(w, http.StatusOK, GetEmbeddedClusterNodeJoinCommandResponse{ ClusterID: clusterID, K0sJoinCommand: k0sJoinCommand, K0sToken: k0sToken, diff --git a/pkg/handlers/handlers.go b/pkg/handlers/handlers.go index 2ebf1fea32..f8853fc4b1 100644 --- a/pkg/handlers/handlers.go +++ b/pkg/handlers/handlers.go @@ -275,18 +275,18 @@ func RegisterSessionAuthRoutes(r *mux.Router, kotsStore store.Store, handler KOT r.Name("GetKurlNodes").Path("/api/v1/kurl/nodes").Methods("GET"). HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetKurlNodes)) - // HelmVM - r.Name("HelmVM").Path("/api/v1/helmvm").HandlerFunc(NotImplemented) - r.Name("GenerateK0sNodeJoinCommand").Path("/api/v1/helmvm/generate-node-join-command").Methods("POST"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateK0sNodeJoinCommand)) - r.Name("DrainHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}/drain").Methods("POST"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DrainHelmVMNode)) - r.Name("DeleteHelmVMNode").Path("/api/v1/helmvm/nodes/{nodeName}").Methods("DELETE"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DeleteHelmVMNode)) - r.Name("GetHelmVMNodes").Path("/api/v1/helmvm/nodes").Methods("GET"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNodes)) - r.Name("GetHelmVMNode").Path("/api/v1/helmvm/node/{nodeName}").Methods("GET"). - HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetHelmVMNode)) + // Embedded Cluster + r.Name("EmbeddedCluster").Path("/api/v1/embedded-cluster").HandlerFunc(NotImplemented) + r.Name("GenerateEmbeddedClusterNodeJoinCommand").Path("/api/v1/embedded-cluster/generate-node-join-command").Methods("POST"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.GenerateEmbeddedClusterNodeJoinCommand)) + r.Name("DrainEmbeddedClusterNode").Path("/api/v1/embedded-cluster/nodes/{nodeName}/drain").Methods("POST"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DrainEmbeddedClusterNode)) + r.Name("DeleteEmbeddedClusterNode").Path("/api/v1/embedded-cluster/nodes/{nodeName}").Methods("DELETE"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterWrite, handler.DeleteEmbeddedClusterNode)) + r.Name("GetEmbeddedClusterNodes").Path("/api/v1/embedded-cluster/nodes").Methods("GET"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetEmbeddedClusterNodes)) + r.Name("GetEmbeddedClusterNode").Path("/api/v1/embedded-cluster/node/{nodeName}").Methods("GET"). + HandlerFunc(middleware.EnforceAccess(policy.ClusterRead, handler.GetEmbeddedClusterNode)) // Prometheus r.Name("SetPrometheusAddress").Path("/api/v1/prometheus").Methods("POST"). @@ -357,7 +357,7 @@ func RegisterUnauthenticatedRoutes(handler *Handler, kotsStore store.Store, debu loggingRouter.Path("/api/v1/app/custom-metrics").Methods("POST").HandlerFunc(handler.GetSendCustomAppMetricsHandler(kotsStore)) // This handler requires a valid token in the query - loggingRouter.Path("/api/v1/embedded-cluster/join").Methods("GET").HandlerFunc(handler.GetK0sNodeJoinCommand) + loggingRouter.Path("/api/v1/embedded-cluster/join").Methods("GET").HandlerFunc(handler.GetEmbeddedClusterNodeJoinCommand) } func StreamJSON(c *websocket.Conn, payload interface{}) { diff --git a/pkg/handlers/handlers_test.go b/pkg/handlers/handlers_test.go index 9d4ee77a74..c502bb6733 100644 --- a/pkg/handlers/handlers_test.go +++ b/pkg/handlers/handlers_test.go @@ -1209,66 +1209,56 @@ var HandlerPolicyTests = map[string][]HandlerPolicyTest{ }, }, - "HelmVM": {}, // Not implemented - "GenerateK0sNodeJoinCommand": { + "EmbeddedCluster": {}, // Not implemented + "GenerateEmbeddedClusterNodeJoinCommand": { { Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GenerateK0sNodeJoinCommand(gomock.Any(), gomock.Any()) + handlerRecorder.GenerateEmbeddedClusterNodeJoinCommand(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "DrainHelmVMNode": { + "DrainEmbeddedClusterNode": { { Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.DrainHelmVMNode(gomock.Any(), gomock.Any()) + handlerRecorder.DrainEmbeddedClusterNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "DeleteHelmVMNode": { + "DeleteEmbeddedClusterNode": { { Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.DeleteHelmVMNode(gomock.Any(), gomock.Any()) + handlerRecorder.DeleteEmbeddedClusterNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "GetHelmVMNodes": { + "GetEmbeddedClusterNodes": { { Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GetHelmVMNodes(gomock.Any(), gomock.Any()) + handlerRecorder.GetEmbeddedClusterNodes(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, }, - "GetHelmVMNode": { + "GetEmbeddedClusterNode": { { Vars: map[string]string{"nodeName": "node-name"}, Roles: []rbactypes.Role{rbac.ClusterAdminRole}, SessionRoles: []string{rbac.ClusterAdminRoleID}, Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GetHelmVMNode(gomock.Any(), gomock.Any()) - }, - ExpectStatus: http.StatusOK, - }, - }, - "GetK0sNodeJoinCommand": { - { - Roles: []rbactypes.Role{rbac.ClusterAdminRole}, - SessionRoles: []string{rbac.ClusterAdminRoleID}, - Calls: func(storeRecorder *mock_store.MockStoreMockRecorder, handlerRecorder *mock_handlers.MockKOTSHandlerMockRecorder) { - handlerRecorder.GetK0sNodeJoinCommand(gomock.Any(), gomock.Any()) + handlerRecorder.GetEmbeddedClusterNode(gomock.Any(), gomock.Any()) }, ExpectStatus: http.StatusOK, }, diff --git a/pkg/handlers/interface.go b/pkg/handlers/interface.go index 67dd05d3bc..a8fe1bd4dc 100644 --- a/pkg/handlers/interface.go +++ b/pkg/handlers/interface.go @@ -138,13 +138,12 @@ type KOTSHandler interface { DeleteKurlNode(w http.ResponseWriter, r *http.Request) GetKurlNodes(w http.ResponseWriter, r *http.Request) - // HelmVM - GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) - DrainHelmVMNode(w http.ResponseWriter, r *http.Request) - DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) - GetHelmVMNodes(w http.ResponseWriter, r *http.Request) - GetHelmVMNode(w http.ResponseWriter, r *http.Request) - GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) + // EmbeddedCLuster + GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, r *http.Request) + DrainEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) + DeleteEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) + GetEmbeddedClusterNodes(w http.ResponseWriter, r *http.Request) + GetEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) // Prometheus SetPrometheusAddress(w http.ResponseWriter, r *http.Request) diff --git a/pkg/handlers/metadata.go b/pkg/handlers/metadata.go index 81903b26dd..cc7253551e 100644 --- a/pkg/handlers/metadata.go +++ b/pkg/handlers/metadata.go @@ -50,9 +50,9 @@ type MetadataResponseBranding struct { } type AdminConsoleMetadata struct { - IsAirgap bool `json:"isAirgap"` - IsKurl bool `json:"isKurl"` - IsHelmVM bool `json:"isHelmVM"` + IsAirgap bool `json:"isAirgap"` + IsKurl bool `json:"isKurl"` + IsEmbeddedCluster bool `json:"isEmbeddedCluster"` } // GetMetadataHandler helper function that returns a http handler func that returns metadata. It takes a function that @@ -73,7 +73,7 @@ func GetMetadataHandler(getK8sInfoFn MetadataK8sFn, kotsStore store.Store) http. if kuberneteserrors.IsNotFound(err) { metadataResponse.AdminConsoleMetadata.IsAirgap = kotsadmMetadata.IsAirgap metadataResponse.AdminConsoleMetadata.IsKurl = kotsadmMetadata.IsKurl - metadataResponse.AdminConsoleMetadata.IsHelmVM = kotsadmMetadata.IsHelmVM + metadataResponse.AdminConsoleMetadata.IsEmbeddedCluster = kotsadmMetadata.IsEmbeddedCluster logger.Info(fmt.Sprintf("config map %q not found", metadataConfigMapName)) JSON(w, http.StatusOK, &metadataResponse) @@ -114,9 +114,9 @@ func GetMetadataHandler(getK8sInfoFn MetadataK8sFn, kotsStore store.Store) http. metadataResponse.UpstreamURI = brandingConfigMap.Data[upstreamUriKey] metadataResponse.ConsoleFeatureFlags = application.Spec.ConsoleFeatureFlags metadataResponse.AdminConsoleMetadata = AdminConsoleMetadata{ - IsAirgap: kotsadmMetadata.IsAirgap, - IsKurl: kotsadmMetadata.IsKurl, - IsHelmVM: kotsadmMetadata.IsHelmVM, + IsAirgap: kotsadmMetadata.IsAirgap, + IsKurl: kotsadmMetadata.IsKurl, + IsEmbeddedCluster: kotsadmMetadata.IsEmbeddedCluster, } JSON(w, http.StatusOK, metadataResponse) diff --git a/pkg/handlers/mock/mock.go b/pkg/handlers/mock/mock.go index 736883ad70..c122017330 100644 --- a/pkg/handlers/mock/mock.go +++ b/pkg/handlers/mock/mock.go @@ -262,16 +262,16 @@ func (mr *MockKOTSHandlerMockRecorder) DeleteBackup(w, r interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBackup", reflect.TypeOf((*MockKOTSHandler)(nil).DeleteBackup), w, r) } -// DeleteHelmVMNode mocks base method. -func (m *MockKOTSHandler) DeleteHelmVMNode(w http.ResponseWriter, r *http.Request) { +// DeleteEmbeddedClusterNode mocks base method. +func (m *MockKOTSHandler) DeleteEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DeleteHelmVMNode", w, r) + m.ctrl.Call(m, "DeleteEmbeddedClusterNode", w, r) } -// DeleteHelmVMNode indicates an expected call of DeleteHelmVMNode. -func (mr *MockKOTSHandlerMockRecorder) DeleteHelmVMNode(w, r interface{}) *gomock.Call { +// DeleteEmbeddedClusterNode indicates an expected call of DeleteEmbeddedClusterNode. +func (mr *MockKOTSHandlerMockRecorder) DeleteEmbeddedClusterNode(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).DeleteHelmVMNode), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEmbeddedClusterNode", reflect.TypeOf((*MockKOTSHandler)(nil).DeleteEmbeddedClusterNode), w, r) } // DeleteKurlNode mocks base method. @@ -394,16 +394,16 @@ func (mr *MockKOTSHandlerMockRecorder) DownloadSupportBundle(w, r interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadSupportBundle", reflect.TypeOf((*MockKOTSHandler)(nil).DownloadSupportBundle), w, r) } -// DrainHelmVMNode mocks base method. -func (m *MockKOTSHandler) DrainHelmVMNode(w http.ResponseWriter, r *http.Request) { +// DrainEmbeddedClusterNode mocks base method. +func (m *MockKOTSHandler) DrainEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "DrainHelmVMNode", w, r) + m.ctrl.Call(m, "DrainEmbeddedClusterNode", w, r) } -// DrainHelmVMNode indicates an expected call of DrainHelmVMNode. -func (mr *MockKOTSHandlerMockRecorder) DrainHelmVMNode(w, r interface{}) *gomock.Call { +// DrainEmbeddedClusterNode indicates an expected call of DrainEmbeddedClusterNode. +func (mr *MockKOTSHandlerMockRecorder) DrainEmbeddedClusterNode(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DrainHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).DrainHelmVMNode), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DrainEmbeddedClusterNode", reflect.TypeOf((*MockKOTSHandler)(nil).DrainEmbeddedClusterNode), w, r) } // DrainKurlNode mocks base method. @@ -442,16 +442,16 @@ func (mr *MockKOTSHandlerMockRecorder) GarbageCollectImages(w, r interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollectImages", reflect.TypeOf((*MockKOTSHandler)(nil).GarbageCollectImages), w, r) } -// GenerateK0sNodeJoinCommand mocks base method. -func (m *MockKOTSHandler) GenerateK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { +// GenerateEmbeddedClusterNodeJoinCommand mocks base method. +func (m *MockKOTSHandler) GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() - m.ctrl.Call(m, "GenerateK0sNodeJoinCommand", w, r) + m.ctrl.Call(m, "GenerateEmbeddedClusterNodeJoinCommand", w, r) } -// GenerateK0sNodeJoinCommand indicates an expected call of GenerateK0sNodeJoinCommand. -func (mr *MockKOTSHandlerMockRecorder) GenerateK0sNodeJoinCommand(w, r interface{}) *gomock.Call { +// GenerateEmbeddedClusterNodeJoinCommand indicates an expected call of GenerateEmbeddedClusterNodeJoinCommand. +func (mr *MockKOTSHandlerMockRecorder) GenerateEmbeddedClusterNodeJoinCommand(w, r interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateK0sNodeJoinCommand), w, r) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateEmbeddedClusterNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GenerateEmbeddedClusterNodeJoinCommand), w, r) } // GenerateKurlNodeJoinCommandMaster mocks base method. @@ -706,6 +706,30 @@ func (mr *MockKOTSHandlerMockRecorder) GetDownstreamOutput(w, r interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDownstreamOutput", reflect.TypeOf((*MockKOTSHandler)(nil).GetDownstreamOutput), w, r) } +// GetEmbeddedClusterNode mocks base method. +func (m *MockKOTSHandler) GetEmbeddedClusterNode(w http.ResponseWriter, r *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GetEmbeddedClusterNode", w, r) +} + +// GetEmbeddedClusterNode indicates an expected call of GetEmbeddedClusterNode. +func (mr *MockKOTSHandlerMockRecorder) GetEmbeddedClusterNode(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEmbeddedClusterNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetEmbeddedClusterNode), w, r) +} + +// GetEmbeddedClusterNodes mocks base method. +func (m *MockKOTSHandler) GetEmbeddedClusterNodes(w http.ResponseWriter, r *http.Request) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "GetEmbeddedClusterNodes", w, r) +} + +// GetEmbeddedClusterNodes indicates an expected call of GetEmbeddedClusterNodes. +func (mr *MockKOTSHandlerMockRecorder) GetEmbeddedClusterNodes(w, r interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEmbeddedClusterNodes", reflect.TypeOf((*MockKOTSHandler)(nil).GetEmbeddedClusterNodes), w, r) +} + // GetFileSystemSnapshotProviderInstructions mocks base method. func (m *MockKOTSHandler) GetFileSystemSnapshotProviderInstructions(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() @@ -742,30 +766,6 @@ func (mr *MockKOTSHandlerMockRecorder) GetGlobalSnapshotSettings(w, r interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGlobalSnapshotSettings", reflect.TypeOf((*MockKOTSHandler)(nil).GetGlobalSnapshotSettings), w, r) } -// GetHelmVMNode mocks base method. -func (m *MockKOTSHandler) GetHelmVMNode(w http.ResponseWriter, r *http.Request) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "GetHelmVMNode", w, r) -} - -// GetHelmVMNode indicates an expected call of GetHelmVMNode. -func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNode(w, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNode", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNode), w, r) -} - -// GetHelmVMNodes mocks base method. -func (m *MockKOTSHandler) GetHelmVMNodes(w http.ResponseWriter, r *http.Request) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "GetHelmVMNodes", w, r) -} - -// GetHelmVMNodes indicates an expected call of GetHelmVMNodes. -func (mr *MockKOTSHandlerMockRecorder) GetHelmVMNodes(w, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHelmVMNodes", reflect.TypeOf((*MockKOTSHandler)(nil).GetHelmVMNodes), w, r) -} - // GetIdentityServiceConfig mocks base method. func (m *MockKOTSHandler) GetIdentityServiceConfig(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() @@ -802,18 +802,6 @@ func (mr *MockKOTSHandlerMockRecorder) GetInstanceSnapshotConfig(w, r interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceSnapshotConfig", reflect.TypeOf((*MockKOTSHandler)(nil).GetInstanceSnapshotConfig), w, r) } -// GetK0sNodeJoinCommand mocks base method. -func (m *MockKOTSHandler) GetK0sNodeJoinCommand(w http.ResponseWriter, r *http.Request) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "GetK0sNodeJoinCommand", w, r) -} - -// GetK0sNodeJoinCommand indicates an expected call of GetK0sNodeJoinCommand. -func (mr *MockKOTSHandlerMockRecorder) GetK0sNodeJoinCommand(w, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetK0sNodeJoinCommand", reflect.TypeOf((*MockKOTSHandler)(nil).GetK0sNodeJoinCommand), w, r) -} - // GetKotsadmRegistry mocks base method. func (m *MockKOTSHandler) GetKotsadmRegistry(w http.ResponseWriter, r *http.Request) { m.ctrl.T.Helper() diff --git a/pkg/kotsadm/metadata.go b/pkg/kotsadm/metadata.go index 9c9b045cb0..9c2c09b90a 100644 --- a/pkg/kotsadm/metadata.go +++ b/pkg/kotsadm/metadata.go @@ -1,7 +1,7 @@ package kotsadm import ( - "github.com/replicatedhq/kots/pkg/helmvm" + "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/kotsadm/types" "github.com/replicatedhq/kots/pkg/kurl" "k8s.io/client-go/kubernetes" @@ -9,12 +9,12 @@ import ( func GetMetadata(clientset kubernetes.Interface) types.Metadata { isKurl, _ := kurl.IsKurl(clientset) - isHelmVM, _ := helmvm.IsHelmVM(clientset) + isEmbeddedCluster, _ := embeddedcluster.IsEmbeddedCluster(clientset) metadata := types.Metadata{ - IsAirgap: IsAirgap(), - IsKurl: isKurl, - IsHelmVM: isHelmVM, + IsAirgap: IsAirgap(), + IsKurl: isKurl, + IsEmbeddedCluster: isEmbeddedCluster, } return metadata diff --git a/pkg/kotsadm/types/metadata.go b/pkg/kotsadm/types/metadata.go index 79e8b142a6..8ff2225f42 100644 --- a/pkg/kotsadm/types/metadata.go +++ b/pkg/kotsadm/types/metadata.go @@ -1,7 +1,7 @@ package types type Metadata struct { - IsAirgap bool - IsKurl bool - IsHelmVM bool + IsAirgap bool + IsKurl bool + IsEmbeddedCluster bool } diff --git a/pkg/store/kotsstore/k0s_store.go b/pkg/store/kotsstore/embedded_cluster_store.go similarity index 71% rename from pkg/store/kotsstore/k0s_store.go rename to pkg/store/kotsstore/embedded_cluster_store.go index 4eb7a9bd7b..39cc5cda5b 100644 --- a/pkg/store/kotsstore/k0s_store.go +++ b/pkg/store/kotsstore/embedded_cluster_store.go @@ -9,18 +9,18 @@ import ( "github.com/replicatedhq/kots/pkg/rand" ) -func (s *KOTSStore) SetK0sInstallCommandRoles(roles []string) (string, error) { +func (s *KOTSStore) SetEmbeddedClusterInstallCommandRoles(roles []string) (string, error) { db := persistence.MustGetDBSession() installID := rand.StringWithCharset(24, rand.LOWER_CASE+rand.UPPER_CASE) - query := `delete from k0s_tokens where token = ?` + query := `delete from embedded_cluster_tokens where token = ?` wr, err := db.WriteOneParameterized(gorqlite.ParameterizedStatement{ Query: query, Arguments: []interface{}{installID}, }) if err != nil { - return "", fmt.Errorf("delete k0s join token: %v: %v", err, wr.Err) + return "", fmt.Errorf("delete embedded_cluster join token: %v: %v", err, wr.Err) } jsonRoles, err := json.Marshal(roles) @@ -28,21 +28,21 @@ func (s *KOTSStore) SetK0sInstallCommandRoles(roles []string) (string, error) { return "", fmt.Errorf("failed to marshal roles: %w", err) } - query = `insert into k0s_tokens (token, roles) values (?, ?)` + query = `insert into embedded_cluster_tokens (token, roles) values (?, ?)` wr, err = db.WriteOneParameterized(gorqlite.ParameterizedStatement{ Query: query, Arguments: []interface{}{installID, string(jsonRoles)}, }) if err != nil { - return "", fmt.Errorf("insert k0s join token: %v: %v", err, wr.Err) + return "", fmt.Errorf("insert embedded_cluster join token: %v: %v", err, wr.Err) } return installID, nil } -func (s *KOTSStore) GetK0sInstallCommandRoles(token string) ([]string, error) { +func (s *KOTSStore) GetEmbeddedClusterInstallCommandRoles(token string) ([]string, error) { db := persistence.MustGetDBSession() - query := `select roles from k0s_tokens where token = ?` + query := `select roles from embedded_cluster_tokens where token = ?` rows, err := db.QueryOneParameterized(gorqlite.ParameterizedStatement{ Query: query, Arguments: []interface{}{token}, diff --git a/web/src/Root.tsx b/web/src/Root.tsx index a78e3b46bc..61fb86c592 100644 --- a/web/src/Root.tsx +++ b/web/src/Root.tsx @@ -1,6 +1,6 @@ -import React, { useReducer, useEffect } from "react"; +import React, { useEffect, useReducer } from "react"; import { createBrowserHistory } from "history"; -import { Route, Routes, Navigate, useNavigate } from "react-router-dom"; +import { Navigate, Route, Routes, useNavigate } from "react-router-dom"; import { Helmet } from "react-helmet"; import Modal from "react-modal"; import find from "lodash/find"; @@ -10,10 +10,10 @@ import PreflightResultPage from "./components/PreflightResultPage"; import AppConfig from "./features/AppConfig/components/AppConfig"; import { AppDetailPage } from "./components/apps/AppDetailPage"; import KurlClusterManagement from "./components/apps/KurlClusterManagement"; -import HelmVMClusterManagement from "./components/apps/HelmVMClusterManagement"; +import EmbeddedClusterManagement from "@components/apps/EmbeddedClusterManagement"; import UnsupportedBrowser from "./components/static/UnsupportedBrowser"; import NotFound from "./components/static/NotFound"; -import { Utilities, parseUpstreamUri } from "./utilities/utilities"; +import { parseUpstreamUri, Utilities } from "./utilities/utilities"; import fetch from "./utilities/fetchWithTimeout"; import { SecureAdminConsole } from "@features/Auth"; import UploadLicenseFile from "./components/UploadLicenseFile"; @@ -58,7 +58,7 @@ import SnapshotDetails from "@components/snapshots/SnapshotDetails"; import SnapshotRestore from "@components/snapshots/SnapshotRestore"; import AppSnapshots from "@components/snapshots/AppSnapshots"; import AppSnapshotRestore from "@components/snapshots/AppSnapshotRestore"; -import HelmVMViewNode from "@components/apps/HelmVMViewNode"; +import EmbeddedClusterViewNode from "@components/apps/EmbeddedClusterViewNode"; // react-query client const queryClient = new QueryClient(); @@ -467,7 +467,9 @@ const Root = () => { refetchAppsList={getAppsList} fetchingMetadata={state.fetchingMetadata} isKurlEnabled={Boolean(state.adminConsoleMetadata?.isKurl)} - isHelmVMEnabled={Boolean(state.adminConsoleMetadata?.isHelmVM)} + isEmbeddedClusterEnabled={Boolean( + state.adminConsoleMetadata?.isEmbeddedCluster + )} isGitOpsSupported={isGitOpsSupported()} isIdentityServiceSupported={isIdentityServiceSupported()} appsList={state.appsList} @@ -532,7 +534,9 @@ const Root = () => { appSlugFromMetadata={state.appSlugFromMetadata || ""} fetchingMetadata={state.fetchingMetadata} onUploadSuccess={getAppsList} - isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} + isEmbeddedCluster={Boolean( + state.adminConsoleMetadata?.isEmbeddedCluster + )} /> } /> @@ -575,33 +579,38 @@ const Root = () => { } /> } /> - {state.adminConsoleMetadata?.isHelmVM && ( + {state.adminConsoleMetadata?.isEmbeddedCluster && ( <> } + element={ + + } /> } + element={} /> )} {(state.adminConsoleMetadata?.isKurl || - state.adminConsoleMetadata?.isHelmVM) && ( + state.adminConsoleMetadata?.isEmbeddedCluster) && ( ) : ( - + ) } /> )} - {state.adminConsoleMetadata?.isHelmVM && ( - } /> + {state.adminConsoleMetadata?.isEmbeddedCluster && ( + } + /> )} { snapshotInProgressApps={state.snapshotInProgressApps} ping={ping} isHelmManaged={state.isHelmManaged} - isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} + isEmbeddedCluster={Boolean( + state.adminConsoleMetadata?.isEmbeddedCluster + )} /> } /> @@ -708,7 +719,9 @@ const Root = () => { snapshotInProgressApps={state.snapshotInProgressApps} ping={ping} isHelmManaged={state.isHelmManaged} - isHelmVM={Boolean(state.adminConsoleMetadata?.isHelmVM)} + isEmbeddedCluster={Boolean( + state.adminConsoleMetadata?.isEmbeddedCluster + )} /> } > diff --git a/web/src/components/UploadLicenseFile.tsx b/web/src/components/UploadLicenseFile.tsx index e08cf5b829..9af2e5e3c4 100644 --- a/web/src/components/UploadLicenseFile.tsx +++ b/web/src/components/UploadLicenseFile.tsx @@ -67,7 +67,7 @@ type Props = { onUploadSuccess: () => Promise; logo: string | null; snapshot?: { name: string }; - isHelmVM: boolean; + isEmbeddedCluster: boolean; }; const UploadLicenseFile = (props: Props) => { @@ -261,7 +261,7 @@ const UploadLicenseFile = (props: Props) => { return; } - if (props.isHelmVM) { + if (props.isEmbeddedCluster) { navigate(`/${data.slug}/cluster/manage`, { replace: true }); return; } diff --git a/web/src/components/apps/AppDetailPage.tsx b/web/src/components/apps/AppDetailPage.tsx index 46fbe7a2e2..aa0b2d0ef1 100644 --- a/web/src/components/apps/AppDetailPage.tsx +++ b/web/src/components/apps/AppDetailPage.tsx @@ -1,10 +1,12 @@ -import React, { Fragment, useReducer, useEffect, useState } from "react"; +import React, { Fragment, useEffect, useReducer, useState } from "react"; import classNames from "classnames"; -import { useNavigate, useParams, Outlet } from "react-router-dom"; +import { Outlet, useNavigate, useParams } from "react-router-dom"; import Modal from "react-modal"; import { useTheme } from "@src/components/context/withTheme"; -import { KotsSidebarItem } from "@src/components/watches/WatchSidebarItem"; -import { HelmChartSidebarItem } from "@src/components/watches/WatchSidebarItem"; +import { + HelmChartSidebarItem, + KotsSidebarItem, +} from "@src/components/watches/WatchSidebarItem"; import { isAwaitingResults } from "../../utilities/utilities"; import SubNavBar from "@src/components/shared/SubNavBar"; @@ -15,7 +17,7 @@ import Loader from "../shared/Loader"; import ErrorModal from "../modals/ErrorModal"; // Types -import { App, Metadata, KotsParams, Version } from "@types"; +import { App, KotsParams, Metadata, Version } from "@types"; import { useApps, useSelectedApp } from "@features/App"; type Props = { @@ -30,7 +32,7 @@ type Props = { refetchAppsList: () => void; refetchAppMetadata: () => void; snapshotInProgressApps: string[]; - isHelmVM: boolean; + isEmbeddedCluster: boolean; }; type State = { @@ -322,12 +324,12 @@ function AppDetailPage(props: Props) { const firstVersion = downstream.pendingVersions.find( (version: Version) => version?.sequence === 0 ); - if (firstVersion?.status === "unknown" && props.isHelmVM) { + if (firstVersion?.status === "unknown" && props.isEmbeddedCluster) { navigate(`/${appNeedsConfiguration.slug}/cluster/manage`); return; } if (firstVersion?.status === "pending_config") { - if (props.isHelmVM) { + if (props.isEmbeddedCluster) { navigate(`/${appNeedsConfiguration.slug}/cluster/manage`); return; } diff --git a/web/src/components/apps/AppVersionHistory.tsx b/web/src/components/apps/AppVersionHistory.tsx index f39ff953ef..a0a9277c3d 100644 --- a/web/src/components/apps/AppVersionHistory.tsx +++ b/web/src/components/apps/AppVersionHistory.tsx @@ -19,12 +19,12 @@ import DeployWarningModal from "../shared/modals/DeployWarningModal"; import AutomaticUpdatesModal from "@src/components/modals/AutomaticUpdatesModal"; import SkipPreflightsModal from "../shared/modals/SkipPreflightsModal"; import { - Utilities, + getCommitHashFromUrl, + getGitProviderDiffUrl, + getPreflightResultState, isAwaitingResults, secondsAgo, - getPreflightResultState, - getGitProviderDiffUrl, - getCommitHashFromUrl, + Utilities, } from "../../utilities/utilities"; import { Repeater } from "../../utilities/repeater"; import { AirgapUploader } from "../../utilities/airgapUploader"; @@ -59,7 +59,7 @@ type Props = { adminConsoleMetadata: { isAirgap: boolean; isKurl: boolean; - isHelmVM: boolean; + isEmbeddedCluster: boolean; }; app: App; displayErrorModal: boolean; diff --git a/web/src/components/apps/HelmVMClusterManagement.tsx b/web/src/components/apps/EmbeddedClusterManagement.tsx similarity index 94% rename from web/src/components/apps/HelmVMClusterManagement.tsx rename to web/src/components/apps/EmbeddedClusterManagement.tsx index f7ced8bec4..1f946217fc 100644 --- a/web/src/components/apps/HelmVMClusterManagement.tsx +++ b/web/src/components/apps/EmbeddedClusterManagement.tsx @@ -12,7 +12,7 @@ import { Utilities } from "../../utilities/utilities"; import Icon from "../Icon"; import CodeSnippet from "../shared/CodeSnippet"; -import "@src/scss/components/apps/HelmVMClusterManagement.scss"; +import "@src/scss/components/apps/EmbeddedClusterManagement.scss"; const testData = { nodes: undefined, @@ -20,7 +20,7 @@ const testData = { // const testData = { // nodes: [ // { -// name: "laverya-helmvm", +// name: "laverya-embeddedcluster", // isConnected: true, // isReady: true, // isPrimaryNode: true, @@ -43,7 +43,7 @@ const testData = { // }, // ], // ha: true, -// isHelmVMEnabled: true, +// isEmbeddedClusterEnabled: true, // }; type State = { @@ -56,7 +56,7 @@ type State = { drainNodeSuccessful: boolean; }; -const HelmVMClusterManagement = ({ +const EmbeddedClusterManagement = ({ fromLicenseFlow = false, }: { fromLicenseFlow?: boolean; @@ -79,7 +79,7 @@ const HelmVMClusterManagement = ({ const [selectedNodeTypes, setSelectedNodeTypes] = useState([]); const { data: appsData } = useApps(); - // we grab the first app because helmvm users should only ever have one app + // we grab the first app because embeddedcluster users should only ever have one app const app = appsData?.apps?.[0]; const { slug } = useParams(); @@ -87,7 +87,7 @@ const HelmVMClusterManagement = ({ // #region queries type NodesResponse = { ha: boolean; - isHelmVMEnabled: boolean; + isEmbeddedClusterEnabled: boolean; nodes: { name: string; isConnected: boolean; @@ -122,15 +122,18 @@ const HelmVMClusterManagement = ({ isInitialLoading: nodesLoading, error: nodesError, } = useQuery({ - queryKey: ["helmVmNodes"], + queryKey: ["embeddedClusterNodes"], queryFn: async () => { - const res = await fetch(`${process.env.API_ENDPOINT}/helmvm/nodes`, { - headers: { - Accept: "application/json", - }, - credentials: "include", - method: "GET", - }); + const res = await fetch( + `${process.env.API_ENDPOINT}/embedded-cluster/nodes`, + { + headers: { + Accept: "application/json", + }, + credentials: "include", + method: "GET", + } + ); if (!res.ok) { if (res.status === 401) { Utilities.logoutUser(); @@ -168,7 +171,7 @@ const HelmVMClusterManagement = ({ queryFn: async ({ queryKey }) => { const [, nodeTypes] = queryKey; const res = await fetch( - `${process.env.API_ENDPOINT}/helmvm/generate-node-join-command`, + `${process.env.API_ENDPOINT}/embedded-cluster/generate-node-join-command`, { headers: { "Content-Type": "application/json", @@ -334,7 +337,7 @@ const HelmVMClusterManagement = ({ // #endregion return ( -
+

@@ -517,4 +520,4 @@ const HelmVMClusterManagement = ({ ); }; -export default HelmVMClusterManagement; +export default EmbeddedClusterManagement; diff --git a/web/src/components/apps/HelmVMViewNode.jsx b/web/src/components/apps/EmbeddedClusterViewNode.jsx similarity index 92% rename from web/src/components/apps/HelmVMViewNode.jsx rename to web/src/components/apps/EmbeddedClusterViewNode.jsx index 6bd7651568..a581009c3c 100644 --- a/web/src/components/apps/HelmVMViewNode.jsx +++ b/web/src/components/apps/EmbeddedClusterViewNode.jsx @@ -1,12 +1,12 @@ import { MaterialReactTable } from "material-react-table"; -import React, { useMemo, setState } from "react"; +import React, { useMemo } from "react"; import { useQuery } from "@tanstack/react-query"; import { Link, useParams } from "react-router-dom"; import Loader from "@components/shared/Loader"; const testData = undefined; // const testData = { -// name: "laverya-helmvm", +// name: "laverya-embeddedcluster", // isConnected: true, // isReady: true, // isPrimaryNode: true, @@ -29,27 +29,30 @@ const testData = undefined; // { // name: "example-es-85fc9df74-8x8l6", // status: "Running", -// namespace: "helmvm", +// namespace: "embeddedcluster", // cpu: "0.0345789345 GB", // memory: 0, // }, // ], // }; -const HelmVMViewNode = () => { +const EmbeddedClusterViewNode = () => { const { slug, nodeName } = useParams(); const { data: nodeData, isLoading: nodeLoading } = useQuery({ - queryKey: ["helmVmNode", nodeName], + queryKey: ["embeddedClusterNode", nodeName], queryFn: async ({ queryKey }) => { const [, nodeName] = queryKey; return ( - await fetch(`${process.env.API_ENDPOINT}/helmvm/node/${nodeName}`, { - headers: { - Accept: "application/json", - }, - credentials: "include", - method: "GET", - }) + await fetch( + `${process.env.API_ENDPOINT}/embedded-cluster/node/${nodeName}`, + { + headers: { + Accept: "application/json", + }, + credentials: "include", + method: "GET", + } + ) ).json(); }, onError: (err) => { @@ -239,4 +242,4 @@ const HelmVMViewNode = () => { ); }; -export default HelmVMViewNode; +export default EmbeddedClusterViewNode; diff --git a/web/src/components/apps/HelmVMNodeRow.test.js b/web/src/components/apps/EmbeddedClustrNodeRow.test.js similarity index 57% rename from web/src/components/apps/HelmVMNodeRow.test.js rename to web/src/components/apps/EmbeddedClustrNodeRow.test.js index b7a4d8324c..18b6d130ad 100644 --- a/web/src/components/apps/HelmVMNodeRow.test.js +++ b/web/src/components/apps/EmbeddedClustrNodeRow.test.js @@ -1,3 +1,3 @@ -describe("HelmVMNodeRow", () => { +describe("EmbeddedClusterNodeRow", () => { it.todo("upgrade to react 18 and add unit tests"); }); diff --git a/web/src/components/shared/NavBar.tsx b/web/src/components/shared/NavBar.tsx index 09c6dc2c9d..48ee8c0624 100644 --- a/web/src/components/shared/NavBar.tsx +++ b/web/src/components/shared/NavBar.tsx @@ -1,7 +1,7 @@ import React, { PureComponent } from "react"; import PropTypes from "prop-types"; import classNames from "classnames"; -import { withRouter, RouterProps } from "@src/utilities/react-router-utilities"; +import { RouterProps, withRouter } from "@src/utilities/react-router-utilities"; import { Link } from "react-router-dom"; import { Utilities } from "@src/utilities/utilities"; import ErrorModal from "../modals/ErrorModal"; @@ -19,7 +19,7 @@ type Props = { isHelmManaged: boolean; isIdentityServiceSupported: boolean; isKurlEnabled: boolean; - isHelmVMEnabled: boolean; + isEmbeddedClusterEnabled: boolean; isSnapshotsSupported: boolean; logo: string | null; onLogoutError: (message: string) => void; @@ -144,7 +144,7 @@ export class NavBar extends PureComponent { className, fetchingMetadata, isKurlEnabled, - isHelmVMEnabled, + isEmbeddedClusterEnabled, isGitOpsSupported, isIdentityServiceSupported, appsList, @@ -228,7 +228,7 @@ export class NavBar extends PureComponent {

)} - {(isKurlEnabled || isHelmVMEnabled) && + {(isKurlEnabled || isEmbeddedClusterEnabled) && location.pathname !== `${selectedApp?.slug}/cluster/manage` && (
Date: Mon, 23 Oct 2023 12:58:51 -0400 Subject: [PATCH 28/31] Apply suggestions from code review Co-authored-by: Salah Al Saleh --- pkg/handlers/embedded_cluster_node_join_command.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/handlers/embedded_cluster_node_join_command.go b/pkg/handlers/embedded_cluster_node_join_command.go index ca1dbed32a..edeb07a171 100644 --- a/pkg/handlers/embedded_cluster_node_join_command.go +++ b/pkg/handlers/embedded_cluster_node_join_command.go @@ -8,7 +8,7 @@ import ( "github.com/replicatedhq/kots/pkg/embeddedcluster" "github.com/replicatedhq/kots/pkg/k8sutil" "github.com/replicatedhq/kots/pkg/logger" - "github.com/replicatedhq/kots/pkg/store/kotsstore" + "github.com/replicatedhq/kots/pkg/store" ) type GenerateEmbeddedClusterNodeJoinCommandResponse struct { @@ -34,7 +34,7 @@ func (h *Handler) GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, } store := kotsstore.StoreFromEnv() - token, err := store.SetEmbeddedClusterInstallCommandRoles(generateEmbeddedClusterNodeJoinCommandRequest.Roles) + token, err := store.GetStore().SetEmbeddedClusterInstallCommandRoles(generateEmbeddedClusterNodeJoinCommandRequest.Roles) if err != nil { logger.Error(fmt.Errorf("failed to set k0s install command roles: %w", err)) w.WriteHeader(http.StatusInternalServerError) From e462416555bf202933948b542e35594871603be5 Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Mon, 23 Oct 2023 20:12:45 +0300 Subject: [PATCH 29/31] address review comments --- pkg/embeddedcluster/helmvm_node.go | 1 + pkg/embeddedcluster/node_join.go | 12 ++- .../embedded_cluster_node_join_command.go | 4 +- pkg/store/mock/mock.go | 83 +++++++++++++++++++ pkg/store/store_interface.go | 6 ++ pkg/util/image.go | 26 ++++++ .../apps/EmbeddedClusterManagement.tsx | 28 ------- .../apps/EmbeddedClusterViewNode.jsx | 30 ------- 8 files changed, 126 insertions(+), 64 deletions(-) create mode 100644 pkg/util/image.go diff --git a/pkg/embeddedcluster/helmvm_node.go b/pkg/embeddedcluster/helmvm_node.go index 2b7fff9dd7..4443e3cdf8 100644 --- a/pkg/embeddedcluster/helmvm_node.go +++ b/pkg/embeddedcluster/helmvm_node.go @@ -160,6 +160,7 @@ func nodeRolesFromLabels(labels map[string]string) []string { roleLabel, ok := labels[fmt.Sprintf("%s-%d", types.EMBEDDED_CLUSTER_ROLE_LABEL, i)] if !ok { fmt.Printf("failed to find role label %d", i) + continue } toReturn = append(toReturn, roleLabel) } diff --git a/pkg/embeddedcluster/node_join.go b/pkg/embeddedcluster/node_join.go index b0835b3761..b7e85e9b35 100644 --- a/pkg/embeddedcluster/node_join.go +++ b/pkg/embeddedcluster/node_join.go @@ -3,12 +3,12 @@ package embeddedcluster import ( "context" "fmt" - "os" "strings" "sync" "time" "github.com/replicatedhq/kots/pkg/embeddedcluster/types" + "github.com/replicatedhq/kots/pkg/util" corev1 "k8s.io/api/core/v1" kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -70,6 +70,12 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node } } + // get the kotsadm image, as we know that will always exist + kotsadmImage, err := util.ThisImage(ctx, client) + if err != nil { + return "", fmt.Errorf("failed to get kotsadm image: %w", err) + } + hostPathFile := corev1.HostPathFile hostPathDir := corev1.HostPathDirectory _, err = client.CoreV1().Pods("kube-system").Create(ctx, &corev1.Pod{ @@ -143,7 +149,7 @@ func runAddNodeCommandPod(ctx context.Context, client kubernetes.Interface, node Containers: []corev1.Container{ { Name: "k0s-token-generator", - Image: "ubuntu:latest", // TODO use the kotsadm image here as we'll know it exists + Image: kotsadmImage, Command: []string{"/mnt/k0s"}, Args: []string{ "token", @@ -264,7 +270,7 @@ func GenerateK0sJoinCommand(ctx context.Context, client kubernetes.Interface, ro // gets the port of the 'admin-console' service func getAdminConsolePort(ctx context.Context, client kubernetes.Interface) (int32, error) { - svc, err := client.CoreV1().Services(os.Getenv("POD_NAMESPACE")).Get(ctx, "admin-console", metav1.GetOptions{}) + svc, err := client.CoreV1().Services(util.PodNamespace).Get(ctx, "admin-console", metav1.GetOptions{}) if err != nil { return -1, fmt.Errorf("failed to get admin-console service: %w", err) } diff --git a/pkg/handlers/embedded_cluster_node_join_command.go b/pkg/handlers/embedded_cluster_node_join_command.go index edeb07a171..8b61429999 100644 --- a/pkg/handlers/embedded_cluster_node_join_command.go +++ b/pkg/handlers/embedded_cluster_node_join_command.go @@ -33,7 +33,6 @@ func (h *Handler) GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, return } - store := kotsstore.StoreFromEnv() token, err := store.GetStore().SetEmbeddedClusterInstallCommandRoles(generateEmbeddedClusterNodeJoinCommandRequest.Roles) if err != nil { logger.Error(fmt.Errorf("failed to set k0s install command roles: %w", err)) @@ -63,8 +62,7 @@ func (h *Handler) GenerateEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, func (h *Handler) GetEmbeddedClusterNodeJoinCommand(w http.ResponseWriter, r *http.Request) { // read query string, ensure that the token is valid token := r.URL.Query().Get("token") - store := kotsstore.StoreFromEnv() - roles, err := store.GetEmbeddedClusterInstallCommandRoles(token) + roles, err := store.GetStore().GetEmbeddedClusterInstallCommandRoles(token) if err != nil { logger.Error(fmt.Errorf("failed to get k0s install command roles: %w", err)) w.WriteHeader(http.StatusInternalServerError) diff --git a/pkg/store/mock/mock.go b/pkg/store/mock/mock.go index 6ae23ab37b..a71c1b9f57 100644 --- a/pkg/store/mock/mock.go +++ b/pkg/store/mock/mock.go @@ -758,6 +758,21 @@ func (mr *MockStoreMockRecorder) GetEmbeddedClusterAuthToken() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEmbeddedClusterAuthToken", reflect.TypeOf((*MockStore)(nil).GetEmbeddedClusterAuthToken)) } +// GetEmbeddedClusterInstallCommandRoles mocks base method. +func (m *MockStore) GetEmbeddedClusterInstallCommandRoles(token string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEmbeddedClusterInstallCommandRoles", token) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEmbeddedClusterInstallCommandRoles indicates an expected call of GetEmbeddedClusterInstallCommandRoles. +func (mr *MockStoreMockRecorder) GetEmbeddedClusterInstallCommandRoles(token interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEmbeddedClusterInstallCommandRoles", reflect.TypeOf((*MockStore)(nil).GetEmbeddedClusterInstallCommandRoles), token) +} + // GetIgnoreRBACErrors mocks base method. func (m *MockStore) GetIgnoreRBACErrors(appID string, sequence int64) (bool, error) { m.ctrl.T.Helper() @@ -1644,6 +1659,21 @@ func (mr *MockStoreMockRecorder) SetEmbeddedClusterAuthToken(token interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEmbeddedClusterAuthToken", reflect.TypeOf((*MockStore)(nil).SetEmbeddedClusterAuthToken), token) } +// SetEmbeddedClusterInstallCommandRoles mocks base method. +func (m *MockStore) SetEmbeddedClusterInstallCommandRoles(roles []string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetEmbeddedClusterInstallCommandRoles", roles) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetEmbeddedClusterInstallCommandRoles indicates an expected call of SetEmbeddedClusterInstallCommandRoles. +func (mr *MockStoreMockRecorder) SetEmbeddedClusterInstallCommandRoles(roles interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEmbeddedClusterInstallCommandRoles", reflect.TypeOf((*MockStore)(nil).SetEmbeddedClusterInstallCommandRoles), roles) +} + // SetIgnorePreflightPermissionErrors mocks base method. func (m *MockStore) SetIgnorePreflightPermissionErrors(appID string, sequence int64) error { m.ctrl.T.Helper() @@ -4432,3 +4462,56 @@ func (mr *MockReportingStoreMockRecorder) SaveReportingInfo(licenseID, reporting mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveReportingInfo", reflect.TypeOf((*MockReportingStore)(nil).SaveReportingInfo), licenseID, reportingInfo) } + +// MockEmbeddedClusterStore is a mock of EmbeddedClusterStore interface. +type MockEmbeddedClusterStore struct { + ctrl *gomock.Controller + recorder *MockEmbeddedClusterStoreMockRecorder +} + +// MockEmbeddedClusterStoreMockRecorder is the mock recorder for MockEmbeddedClusterStore. +type MockEmbeddedClusterStoreMockRecorder struct { + mock *MockEmbeddedClusterStore +} + +// NewMockEmbeddedClusterStore creates a new mock instance. +func NewMockEmbeddedClusterStore(ctrl *gomock.Controller) *MockEmbeddedClusterStore { + mock := &MockEmbeddedClusterStore{ctrl: ctrl} + mock.recorder = &MockEmbeddedClusterStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEmbeddedClusterStore) EXPECT() *MockEmbeddedClusterStoreMockRecorder { + return m.recorder +} + +// GetEmbeddedClusterInstallCommandRoles mocks base method. +func (m *MockEmbeddedClusterStore) GetEmbeddedClusterInstallCommandRoles(token string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEmbeddedClusterInstallCommandRoles", token) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEmbeddedClusterInstallCommandRoles indicates an expected call of GetEmbeddedClusterInstallCommandRoles. +func (mr *MockEmbeddedClusterStoreMockRecorder) GetEmbeddedClusterInstallCommandRoles(token interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEmbeddedClusterInstallCommandRoles", reflect.TypeOf((*MockEmbeddedClusterStore)(nil).GetEmbeddedClusterInstallCommandRoles), token) +} + +// SetEmbeddedClusterInstallCommandRoles mocks base method. +func (m *MockEmbeddedClusterStore) SetEmbeddedClusterInstallCommandRoles(roles []string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetEmbeddedClusterInstallCommandRoles", roles) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetEmbeddedClusterInstallCommandRoles indicates an expected call of SetEmbeddedClusterInstallCommandRoles. +func (mr *MockEmbeddedClusterStoreMockRecorder) SetEmbeddedClusterInstallCommandRoles(roles interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEmbeddedClusterInstallCommandRoles", reflect.TypeOf((*MockEmbeddedClusterStore)(nil).SetEmbeddedClusterInstallCommandRoles), roles) +} diff --git a/pkg/store/store_interface.go b/pkg/store/store_interface.go index 1a5df70a01..34b5872db2 100644 --- a/pkg/store/store_interface.go +++ b/pkg/store/store_interface.go @@ -47,6 +47,7 @@ type Store interface { EmbeddedStore BrandingStore ReportingStore + EmbeddedClusterStore Init() error // this may need options WaitForReady(ctx context.Context) error @@ -253,3 +254,8 @@ type ReportingStore interface { SavePreflightReport(licenseID string, preflightStatus *reportingtypes.PreflightStatus) error SaveReportingInfo(licenseID string, reportingInfo *reportingtypes.ReportingInfo) error } + +type EmbeddedClusterStore interface { + SetEmbeddedClusterInstallCommandRoles(roles []string) (string, error) + GetEmbeddedClusterInstallCommandRoles(token string) ([]string, error) +} diff --git a/pkg/util/image.go b/pkg/util/image.go new file mode 100644 index 0000000000..d1bd374831 --- /dev/null +++ b/pkg/util/image.go @@ -0,0 +1,26 @@ +package util + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// ThisImage looks for either a deployment 'kotsadm' or a statefulset 'kotsadm' in the current namespace +// it returns the image of the first container in the pod template +func ThisImage(ctx context.Context, client kubernetes.Interface) (string, error) { + deploy, err := client.AppsV1().Deployments(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{}) + if err == nil { + return deploy.Spec.Template.Spec.Containers[0].Image, nil + } + + statefulset, err := client.AppsV1().StatefulSets(PodNamespace).Get(ctx, "kotsadm", metav1.GetOptions{}) + if err == nil { + return statefulset.Spec.Template.Spec.Containers[0].Image, nil + } + + return "", fmt.Errorf("failed to find deployment or statefulset") + +} diff --git a/web/src/components/apps/EmbeddedClusterManagement.tsx b/web/src/components/apps/EmbeddedClusterManagement.tsx index 1f946217fc..55834092c9 100644 --- a/web/src/components/apps/EmbeddedClusterManagement.tsx +++ b/web/src/components/apps/EmbeddedClusterManagement.tsx @@ -17,34 +17,6 @@ import "@src/scss/components/apps/EmbeddedClusterManagement.scss"; const testData = { nodes: undefined, }; -// const testData = { -// nodes: [ -// { -// name: "laverya-embeddedcluster", -// isConnected: true, -// isReady: true, -// isPrimaryNode: true, -// canDelete: false, -// kubeletVersion: "v1.28.2+k0s", -// kubeProxyVersion: "v1.28.2+k0s", -// operatingSystem: "linux", -// kernelVersion: "5.10.0-26-cloud-amd64", -// cpu: { capacity: 4, used: 1.9364847660000002 }, -// memory: { capacity: 15.633056640625, used: 3.088226318359375 }, -// pods: { capacity: 110, used: 27 }, -// labels: ["controller"], -// conditions: { -// memoryPressure: false, -// diskPressure: false, -// pidPressure: false, -// ready: true, -// }, -// podList: [], -// }, -// ], -// ha: true, -// isEmbeddedClusterEnabled: true, -// }; type State = { displayAddNode: boolean; diff --git a/web/src/components/apps/EmbeddedClusterViewNode.jsx b/web/src/components/apps/EmbeddedClusterViewNode.jsx index a581009c3c..c138aa79c5 100644 --- a/web/src/components/apps/EmbeddedClusterViewNode.jsx +++ b/web/src/components/apps/EmbeddedClusterViewNode.jsx @@ -5,36 +5,6 @@ import { Link, useParams } from "react-router-dom"; import Loader from "@components/shared/Loader"; const testData = undefined; -// const testData = { -// name: "laverya-embeddedcluster", -// isConnected: true, -// isReady: true, -// isPrimaryNode: true, -// canDelete: false, -// kubeletVersion: "v1.28.2+k0s", -// kubeProxyVersion: "v1.28.2+k0s", -// operatingSystem: "linux", -// kernelVersion: "5.10.0-26-cloud-amd64", -// cpu: { capacity: 4, used: 1.9364847660000002 }, -// memory: { capacity: 15.633056640625, used: 3.088226318359375 }, -// pods: { capacity: 110, used: 27 }, -// labels: ["controller"], -// conditions: { -// memoryPressure: false, -// diskPressure: false, -// pidPressure: false, -// ready: true, -// }, -// podList: [ -// { -// name: "example-es-85fc9df74-8x8l6", -// status: "Running", -// namespace: "embeddedcluster", -// cpu: "0.0345789345 GB", -// memory: 0, -// }, -// ], -// }; const EmbeddedClusterViewNode = () => { const { slug, nodeName } = useParams(); From c39598d8dce35d0d2f617300531c221d8aaa70fb Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Tue, 24 Oct 2023 16:43:00 +0300 Subject: [PATCH 30/31] return to excluding helmvm from some tests --- .github/actions/cmx-versions/dist/index.js | 2 +- .github/actions/cmx-versions/index.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/cmx-versions/dist/index.js b/.github/actions/cmx-versions/dist/index.js index 74168e22f1..6a28fb5826 100644 --- a/.github/actions/cmx-versions/dist/index.js +++ b/.github/actions/cmx-versions/dist/index.js @@ -7661,7 +7661,7 @@ async function getClusterVersions() { clusterVersions.forEach((distribution) => { const distroName = distribution.short_name; - if (distroName === 'embedded_cluster' || distroName === 'kurl') { + if (distroName === 'helmvm' || distroName === 'kurl') { // excluding the embedded distributions return; } diff --git a/.github/actions/cmx-versions/index.js b/.github/actions/cmx-versions/index.js index ffc802451d..213ffe0939 100644 --- a/.github/actions/cmx-versions/index.js +++ b/.github/actions/cmx-versions/index.js @@ -41,7 +41,7 @@ async function getClusterVersions() { clusterVersions.forEach((distribution) => { const distroName = distribution.short_name; - if (distroName === 'embedded_cluster' || distroName === 'kurl') { + if (distroName === 'helmvm' || distroName === 'kurl') { // excluding the embedded distributions return; } From 236ef0dffed2be458389ed6c3c2a853bc79dd8fd Mon Sep 17 00:00:00 2001 From: Andrew Lavery Date: Tue, 24 Oct 2023 19:42:08 +0300 Subject: [PATCH 31/31] another set of mistaken find-replace --- .github/workflows/regression.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/regression.yaml b/.github/workflows/regression.yaml index bd40a77491..2a99593730 100644 --- a/.github/workflows/regression.yaml +++ b/.github/workflows/regression.yaml @@ -191,9 +191,9 @@ jobs: is_upgrade: "1" }, { - name: "type=embeddedcluster cluster, env=online, phase=new install, rbac=cluster admin", - backend_config: "embeddedcluster-online-install-backend-config.tfvars", - terraform_script: "embeddedcluster-online-install.sh" + name: "type=helmvm cluster, env=online, phase=new install, rbac=cluster admin", + backend_config: "helmvm-online-install-backend-config.tfvars", + terraform_script: "helmvm-online-install.sh" } ] steps: