Skip to content

Commit

Permalink
Merge pull request #22 from cofide/workload-info-via-debug-container
Browse files Browse the repository at this point in the history
Adds a workload status command
  • Loading branch information
markgoddard authored Dec 23, 2024
2 parents bba07f1 + 8287ec5 commit d327052
Show file tree
Hide file tree
Showing 4 changed files with 240 additions and 14 deletions.
92 changes: 90 additions & 2 deletions cmd/cofidectl/cmd/workload/workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,12 @@ import (
"fmt"
"os"

provisionpb "github.com/cofide/cofide-api-sdk/gen/go/proto/provision_plugin/v1alpha1"
trust_zone_proto "github.com/cofide/cofide-api-sdk/gen/go/proto/trust_zone/v1alpha1"
"github.com/cofide/cofidectl/cmd/cofidectl/cmd/statusspinner"
"github.com/cofide/cofidectl/internal/pkg/workload"
cmdcontext "github.com/cofide/cofidectl/pkg/cmd/context"
kubeutil "github.com/cofide/cofidectl/pkg/kube"
"github.com/cofide/cofidectl/pkg/provider/helm"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
Expand All @@ -32,13 +35,14 @@ This command consists of multiple sub-commands to interact with workloads.

func (c *WorkloadCommand) GetRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "workload list|discover [ARGS]",
Short: "List workloads in a trust zone or discover candidate workloads",
Use: "workload list|discover|status [ARGS]",
Short: "List or introspect the status of workloads in a trust zone or discover candidate workloads",
Long: workloadRootCmdDesc,
Args: cobra.NoArgs,
}

cmd.AddCommand(
c.GetStatusCommand(),
c.GetListCommand(),
c.GetDiscoverCommand(),
)
Expand Down Expand Up @@ -108,6 +112,77 @@ func (w *WorkloadCommand) GetListCommand() *cobra.Command {
return cmd
}

var workloadStatusCmdDesc = `
This command will display the status of workloads in the Cofide configuration state.
`

type StatusOpts struct {
podName string
namespace string
trustZone string
}

func (w *WorkloadCommand) GetStatusCommand() *cobra.Command {
opts := StatusOpts{}
cmd := &cobra.Command{
Use: "status [ARGS]",
Short: "Display workload status",
Long: workloadStatusCmdDesc,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
kubeConfig, err := cmd.Flags().GetString("kube-config")
if err != nil {
return fmt.Errorf("failed to retrieve the kubeconfig file location")
}

return w.status(cmd.Context(), kubeConfig, opts)
},
}

f := cmd.Flags()
f.StringVar(&opts.podName, "pod-name", "", "Pod name for the workload")
f.StringVar(&opts.namespace, "namespace", "", "Namespace for the workload")
f.StringVar(&opts.trustZone, "trust-zone", "", "Trust zone for the workload")

cobra.CheckErr(cmd.MarkFlagRequired("pod-name"))
cobra.CheckErr(cmd.MarkFlagRequired("namespace"))
cobra.CheckErr(cmd.MarkFlagRequired("trust-zone"))

return cmd
}

func (w *WorkloadCommand) status(ctx context.Context, kubeConfig string, opts StatusOpts) error {
ds, err := w.cmdCtx.PluginManager.GetDataSource(ctx)
if err != nil {
return err
}

trustZone, err := ds.GetTrustZone(opts.trustZone)
if err != nil {
return err
}

client, err := kubeutil.NewKubeClientFromSpecifiedContext(kubeConfig, *trustZone.KubernetesContext)
if err != nil {
return err
}

statusCh, dataCh := getWorkloadStatus(ctx, client, opts.podName, opts.namespace)

// Create a spinner to display whilst the debug container is created and executed and logs retrieved
if err := statusspinner.WatchProvisionStatus(ctx, statusCh, false); err != nil {
return fmt.Errorf("retrieving workload status failed: %w", err)
}

result := <-dataCh
if result == "" {
return fmt.Errorf("retrieving workload status failed")
}

fmt.Println(result)
return nil
}

func renderRegisteredWorkloads(ctx context.Context, kubeConfig string, trustZones []*trust_zone_proto.TrustZone) error {
data := make([][]string, 0, len(trustZones))

Expand Down Expand Up @@ -144,6 +219,19 @@ func renderRegisteredWorkloads(ctx context.Context, kubeConfig string, trustZone
return nil
}

func getWorkloadStatus(ctx context.Context, client *kubeutil.Client, podName string, namespace string) (<-chan *provisionpb.Status, chan string) {
statusCh := make(chan *provisionpb.Status)
dataCh := make(chan string, 1)

go func() {
defer close(statusCh)
defer close(dataCh)
workload.GetStatus(ctx, statusCh, dataCh, client, podName, namespace)
}()

return statusCh, dataCh
}

var workloadDiscoverCmdDesc = `
This command will discover all of the unregistered workloads.
`
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/trustprovider/trustprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (tp *TrustProvider) GetValues() error {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand Down
140 changes: 139 additions & 1 deletion internal/pkg/workload/workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,24 @@
package workload

import (
"bytes"
"context"
"fmt"
"io"
"time"

"github.com/cofide/cofidectl/pkg/spire"
provisionpb "github.com/cofide/cofide-api-sdk/gen/go/proto/provision_plugin/v1alpha1"
kubeutil "github.com/cofide/cofidectl/pkg/kube"
"github.com/cofide/cofidectl/pkg/plugin/provision"
"github.com/cofide/cofidectl/pkg/spire"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
)

const debugContainerNamePrefix = "cofidectl-debug"
const debugContainerImage = "ghcr.io/cofide/cofidectl-debug-container/cmd:v0.1.0"

type Workload struct {
Name string
Namespace string
Expand Down Expand Up @@ -134,6 +142,136 @@ func GetUnregisteredWorkloads(ctx context.Context, kubeCfgFile string, kubeConte
return unregisteredWorkloads, nil
}

func GetStatus(ctx context.Context, statusCh chan<- *provisionpb.Status, dataCh chan string, client *kubeutil.Client, podName string, namespace string) {
debugContainerName := fmt.Sprintf("%s-%s", debugContainerNamePrefix, rand.String(5))

statusCh <- provision.StatusOk(
"Creating",
fmt.Sprintf("Waiting for ephemeral debug container to be created in %s", podName),
)

if err := createDebugContainer(ctx, client, podName, namespace, debugContainerName); err != nil {
statusCh <- provision.StatusError(
"Creating",
fmt.Sprintf("Failed waiting for ephemeral debug container to be created in %s", podName),
err,
)
return
}

statusCh <- provision.StatusOk(
"Waiting",
"Waiting for ephemeral debug container to complete",
)

if err := waitForDebugContainer(ctx, client, podName, namespace, debugContainerName); err != nil {
statusCh <- provision.StatusError(
"Waiting",
"Error waiting for ephemeral debug container to complete",
err,
)
return
}

logs, err := getDebugContainerLogs(ctx, client, podName, namespace, debugContainerName)
if err != nil {
statusCh <- provision.StatusError(
"Waiting",
"Error waiting for ephemeral debug container logs",
err,
)
return
}

dataCh <- logs
statusCh <- provision.StatusDone(
"Complete",
fmt.Sprintf("Successfully executed emphemeral debug container in %s", podName),
)
}

func createDebugContainer(ctx context.Context, client *kubeutil.Client, podName string, namespace string, debugContainerName string) error {
pod, err := client.Clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return err
}

debugContainer := v1.EphemeralContainer{
EphemeralContainerCommon: v1.EphemeralContainerCommon{
Name: debugContainerName,
Image: debugContainerImage,
ImagePullPolicy: v1.PullIfNotPresent,
TTY: true,
Stdin: true,
VolumeMounts: []v1.VolumeMount{
{
ReadOnly: true,
Name: "spiffe-workload-api",
MountPath: "/spiffe-workload-api",
}},
},
TargetContainerName: pod.Spec.Containers[0].Name,
}

pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, debugContainer)

_, err = client.Clientset.CoreV1().Pods(namespace).UpdateEphemeralContainers(
ctx,
pod.Name,
pod,
metav1.UpdateOptions{},
)
if err != nil {
return err
}
return nil
}

func waitForDebugContainer(ctx context.Context, client *kubeutil.Client, podName string, namespace string, debugContainerName string) error {
waitCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
defer cancel()

for {
pod, err := client.Clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return err
}

for _, status := range pod.Status.EphemeralContainerStatuses {
if status.Name == debugContainerName && status.State.Terminated != nil {
return nil
}
}

select {
case <-waitCtx.Done():
return err
default:
time.Sleep(time.Second)
continue
}
}
}

func getDebugContainerLogs(ctx context.Context, client *kubeutil.Client, podName string, namespace string, debugContainerName string) (string, error) {
logs, err := client.Clientset.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{
Container: debugContainerName,
}).Stream(ctx)
if err != nil {
return "", err
}
defer logs.Close()

// Read the logs
buf := new(bytes.Buffer)
_, err = io.Copy(buf, logs)
if err != nil {
return "", err
}

return buf.String(), nil
}

func isAtRisk(creationTS time.Time) (time.Duration, bool) {
// Consider secrets older than 30 days as long-lived and a source for potential risk
age := time.Since(creationTS)
Expand Down
20 changes: 10 additions & 10 deletions pkg/provider/helm/values_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func TestHelmValuesGenerator_GenerateValues_success(t *testing.T) {
},
"workloadAttestors": Values{
"k8s": Values{
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"enabled": true,
"skipKubeletVerification": true,
"useNewContainerLocator": false,
Expand Down Expand Up @@ -169,7 +169,7 @@ func TestHelmValuesGenerator_GenerateValues_success(t *testing.T) {
},
"workloadAttestors": Values{
"k8s": Values{
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"enabled": true,
"skipKubeletVerification": true,
"useNewContainerLocator": false,
Expand Down Expand Up @@ -279,7 +279,7 @@ func TestHelmValuesGenerator_GenerateValues_success(t *testing.T) {
},
"workloadAttestors": Values{
"k8s": Values{
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"enabled": true,
"skipKubeletVerification": true,
"useNewContainerLocator": false,
Expand Down Expand Up @@ -417,7 +417,7 @@ func TestHelmValuesGenerator_GenerateValues_AdditionalValues(t *testing.T) {
},
"workloadAttestors": Values{
"k8s": Values{
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"enabled": true,
"skipKubeletVerification": true,
"useNewContainerLocator": false,
Expand Down Expand Up @@ -1084,7 +1084,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand Down Expand Up @@ -1121,7 +1121,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
"k8s": map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand All @@ -1140,7 +1140,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand Down Expand Up @@ -1194,7 +1194,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand Down Expand Up @@ -1224,7 +1224,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand All @@ -1249,7 +1249,7 @@ func TestSpireAgentValues_GenerateValues(t *testing.T) {
WorkloadAttestorConfig: map[string]any{
"enabled": true,
"skipKubeletVerification": true,
"disableContainerSelectors": false,
"disableContainerSelectors": true,
"useNewContainerLocator": false,
"verboseContainerLocatorLogs": false,
},
Expand Down

0 comments on commit d327052

Please sign in to comment.