Skip to content

Commit

Permalink
Merge pull request #1876 from FabianKramm/main
Browse files Browse the repository at this point in the history
fix: background-proxy flag
  • Loading branch information
FabianKramm authored Jun 18, 2024
2 parents d6fcc87 + a7ef4b4 commit 882c8e0
Show file tree
Hide file tree
Showing 7 changed files with 160 additions and 42 deletions.
11 changes: 3 additions & 8 deletions cmd/vclusterctl/cmd/logout.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (
"github.com/loft-sh/vcluster/pkg/cli/config"
"github.com/loft-sh/vcluster/pkg/cli/flags"
"github.com/loft-sh/vcluster/pkg/platform"
"github.com/loft-sh/vcluster/pkg/upgrade"
"github.com/mgutz/ansi"
"github.com/spf13/cobra"
)
Expand Down Expand Up @@ -43,9 +42,6 @@ vcluster logout
Long: description,
Args: cobra.NoArgs,
RunE: func(cobraCmd *cobra.Command, _ []string) error {
// Check for newer version
upgrade.PrintNewerVersionWarning()

return cmd.Run(cobraCmd.Context())
},
}
Expand All @@ -56,15 +52,14 @@ vcluster logout
func (cmd *LogoutCmd) Run(ctx context.Context) error {
platformClient := platform.NewClientFromConfig(cmd.LoadedConfig(cmd.Log))

cfg := platformClient.Config()

// delete old access key if were logged in before
cfg := platformClient.Config()
if cfg.Platform.AccessKey != "" {
if err := platformClient.Logout(ctx); err != nil {
return fmt.Errorf("failed to logout: %w", err)
cmd.Log.Errorf("failed to send logout request: %v", err)
}
configHost := cfg.Platform.Host

configHost := cfg.Platform.Host
cfg.Platform.Host = ""
cfg.Platform.AccessKey = ""
cfg.Platform.LastInstallContext = ""
Expand Down
25 changes: 15 additions & 10 deletions pkg/cli/connect_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,16 +100,6 @@ func (cmd *connectHelm) connect(ctx context.Context, vCluster *find.VCluster, co
return err
}

if len(command) == 0 && cmd.ServiceAccount == "" && cmd.Server == "" && cmd.BackgroundProxy && localkubernetes.IsDockerInstalledAndUpAndRunning() {
// start background container
server, err := localkubernetes.CreateBackgroundProxyContainer(ctx, vCluster.Name, cmd.Namespace, &cmd.rawConfig, kubeConfig, cmd.LocalPort, cmd.Log)
if err != nil {
cmd.Log.Warnf("Error exposing local vcluster, will fallback to port-forwarding: %v", err)
cmd.BackgroundProxy = false
}
cmd.Server = server
}

// check if we should execute command
if len(command) > 0 {
if !cmd.portForwarding {
Expand Down Expand Up @@ -327,6 +317,21 @@ func (cmd *connectHelm) getVClusterKubeConfig(ctx context.Context, vclusterName
if err != nil {
return nil, err
}

// check if we should start a background proxy
if cmd.Server == "" && cmd.BackgroundProxy {
if localkubernetes.IsDockerInstalledAndUpAndRunning() {
// start background container
server, err := localkubernetes.CreateBackgroundProxyContainer(ctx, vclusterName, cmd.Namespace, cmd.kubeClientConfig, kubeConfig, cmd.LocalPort, cmd.Log)
if err != nil {
cmd.Log.Warnf("Error exposing local vcluster, will fallback to port-forwarding: %v", err)
cmd.BackgroundProxy = false
}
cmd.Server = server
} else {
cmd.Log.Debugf("Docker is not installed, so skip background proxy")
}
}
}

// find out vcluster server port
Expand Down
3 changes: 3 additions & 0 deletions pkg/cli/create_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ type CreateOptions struct {

CreateNamespace bool
UpdateCurrent bool
BackgroundProxy bool
CreateContext bool
SwitchContext bool
Expose bool
Expand Down Expand Up @@ -145,6 +146,7 @@ func CreateHelm(ctx context.Context, options *CreateOptions, globalFlags *flags.
if isVClusterDeployed(release) {
if cmd.Connect {
return ConnectHelm(ctx, &ConnectOptions{
BackgroundProxy: cmd.BackgroundProxy,
UpdateCurrent: cmd.UpdateCurrent,
KubeConfigContextName: cmd.KubeConfigContextName,
KubeConfig: "./kubeconfig.yaml",
Expand Down Expand Up @@ -291,6 +293,7 @@ func CreateHelm(ctx context.Context, options *CreateOptions, globalFlags *flags.
if cmd.Connect || cmd.Print {
cmd.log.Donef("Successfully created virtual cluster %s in namespace %s", vClusterName, cmd.Namespace)
return ConnectHelm(ctx, &ConnectOptions{
BackgroundProxy: cmd.BackgroundProxy,
UpdateCurrent: cmd.UpdateCurrent,
Print: cmd.Print,
KubeConfigContextName: cmd.KubeConfigContextName,
Expand Down
2 changes: 1 addition & 1 deletion pkg/cli/flags/connect/connect.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func AddCommonFlags(cmd *cobra.Command, options *cli.ConnectOptions) {
cmd.Flags().StringVar(&options.ServiceAccountClusterRole, "cluster-role", "", "If specified, vCluster will create the service account if it does not exist and also add a cluster role binding for the given cluster role to it. Requires --service-account to be set")
cmd.Flags().IntVar(&options.ServiceAccountExpiration, "token-expiration", 0, "If specified, vCluster will create the service account token for the given duration in seconds. Defaults to eternal")
cmd.Flags().BoolVar(&options.Insecure, "insecure", false, "If specified, vCluster will create the kube config with insecure-skip-tls-verify")
cmd.Flags().BoolVar(&options.BackgroundProxy, "background-proxy", false, "If specified, vCluster will create the background proxy in docker [its mainly used for vclusters with no nodeport service.]")
cmd.Flags().BoolVar(&options.BackgroundProxy, "background-proxy", true, "Try to use a background-proxy to access the vCluster. Only works if docker is installed and reachable")

// deprecated
_ = cmd.Flags().MarkDeprecated("kube-config", fmt.Sprintf("please use %q to write the kubeconfig of the virtual cluster to stdout.", "vcluster connect --print"))
Expand Down
1 change: 1 addition & 0 deletions pkg/cli/flags/create/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ func AddHelmFlags(cmd *cobra.Command, options *cli.CreateOptions) {
cmd.Flags().BoolVar(&options.CreateNamespace, "create-namespace", true, "If true the namespace will be created if it does not exist")
cmd.Flags().StringVar(&options.LocalChartDir, "local-chart-dir", "", "The virtual cluster local chart dir to use")
cmd.Flags().BoolVar(&options.ExposeLocal, "expose-local", true, "If true and a local Kubernetes distro is detected, will deploy vcluster with a NodePort service. Will be set to false and the passed value will be ignored if --expose is set to true.")
cmd.Flags().BoolVar(&options.BackgroundProxy, "background-proxy", true, "Try to use a background-proxy to access the vCluster. Only works if docker is installed and reachable")

_ = cmd.Flags().MarkHidden("local-chart-dir")
_ = cmd.Flags().MarkHidden("expose-local")
Expand Down
46 changes: 23 additions & 23 deletions pkg/cli/localkubernetes/configure.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@ import (
"strings"
"time"

"github.com/loft-sh/vcluster/pkg/upgrade"

"github.com/loft-sh/log"
"github.com/loft-sh/vcluster/pkg/cli/find"
"github.com/loft-sh/vcluster/pkg/util/kubeconfig"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -294,20 +293,23 @@ func createProxyContainer(ctx context.Context, vClusterName, vClusterNamespace s
return server, nil
}

func CreateBackgroundProxyContainer(ctx context.Context, vClusterName, vClusterNamespace string, rawConfig, vRawConfig *clientcmdapi.Config, localPort int, log log.Logger) (string, error) {
func CreateBackgroundProxyContainer(ctx context.Context, vClusterName, vClusterNamespace string, rawConfig clientcmd.ClientConfig, vRawConfig *clientcmdapi.Config, localPort int, log log.Logger) (string, error) {
rawConfigObj, err := rawConfig.RawConfig()
if err != nil {
return "", err
}

// write kube config to buffer
physicalCluster, err := clientcmd.Write(*rawConfig)
physicalCluster, err := kubeconfig.ResolveKubeConfig(rawConfig)
if err != nil {
return "", nil
return "", fmt.Errorf("resolve kube config: %w", err)
}

// write a temporary kube file
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", errors.Wrap(err, "create temp file")
}
defer func(name string) {
_ = os.Remove(name)
}(tempFile.Name())
_, err = tempFile.Write(physicalCluster)
if err != nil {
return "", errors.Wrap(err, "write kube config to temp file")
Expand All @@ -319,33 +321,30 @@ func CreateBackgroundProxyContainer(ctx context.Context, vClusterName, vClusterN
kubeConfigPath := tempFile.Name()

// construct proxy name
proxyName := find.VClusterConnectBackgroundProxyName(vClusterName, vClusterNamespace, rawConfig.CurrentContext)
proxyName := find.VClusterConnectBackgroundProxyName(vClusterName, vClusterNamespace, rawConfigObj.CurrentContext)

// check if the background proxy container for this vcluster is running and then remove it.
_ = CleanupBackgroundProxy(proxyName, log)

// docker run -d --network=host -v /root/.kube/config:/root/.kube/config ghcr.io/loft-sh/vcluster-cli vcluster connect vcluster -n vcluster --local-port 13300
// build the command
cmd := exec.Command(
"docker",
"run",
"-d",
"-v",
fmt.Sprintf("%v:%v", kubeConfigPath, "/root/.kube/config"),
"-v", fmt.Sprintf("%v:%v", kubeConfigPath, "/kube-config"),
fmt.Sprintf("--name=%s", proxyName),
fmt.Sprintf("--network=%s", "host"),
"ghcr.io/loft-sh/vcluster-cli"+upgrade.GetVersion(),
"vcluster",
"connect",
vClusterName,
"--local-port",
strconv.Itoa(localPort),
"-n",
vClusterNamespace,
"--network=host",
"bitnami/kubectl:1.29",
"port-forward",
"svc/"+vClusterName,
strconv.Itoa(localPort)+":443",
"--kubeconfig", "/kube-config",
"-n", vClusterNamespace,
)
log.Infof("Starting background proxy container...")
out, err := cmd.Output()
out, err := cmd.CombinedOutput()
if err != nil {
return "", errors.Errorf("error starting background proxy : %s %v", string(out), err)
return "", errors.Errorf("error starting background proxy: %s %v", string(out), err)
}
server := fmt.Sprintf("https://127.0.0.1:%v", localPort)
waitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Second*60, true, func(ctx context.Context) (bool, error) {
Expand All @@ -358,6 +357,7 @@ func CreateBackgroundProxyContainer(ctx context.Context, vClusterName, vClusterN
if waitErr != nil {
return "", fmt.Errorf("test connection: %w %w", waitErr, err)
}

return server, nil
}

Expand Down
114 changes: 114 additions & 0 deletions pkg/util/kubeconfig/kubeconfig.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
package kubeconfig

import (
"bytes"
"context"
"fmt"
"os"
"os/exec"

"github.com/loft-sh/vcluster/pkg/util/translate"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/apis/clientauthentication"
"k8s.io/client-go/pkg/apis/clientauthentication/install"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
Expand Down Expand Up @@ -174,3 +181,110 @@ func ConvertRestConfigToClientConfig(config *rest.Config) (clientcmd.ClientConfi

return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}), nil
}

func ResolveKubeConfig(rawConfig clientcmd.ClientConfig) ([]byte, error) {
restConfig, err := rawConfig.ClientConfig()
if err != nil {
return nil, err
}

// convert exec auth
if restConfig.ExecProvider != nil {
err = resolveExecCredentials(restConfig)
if err != nil {
return nil, fmt.Errorf("resolve exec credentials: %w", err)
}
}
if restConfig.AuthProvider != nil {
return nil, fmt.Errorf("auth provider is not supported")
}

retConfig, err := ConvertRestConfigToClientConfig(restConfig)
if err != nil {
return nil, err
}

retRawConfig, err := retConfig.RawConfig()
if err != nil {
return nil, err
}

return clientcmd.Write(retRawConfig)
}

func resolveExecCredentials(restConfig *rest.Config) error {
cred := &clientauthentication.ExecCredential{
Spec: clientauthentication.ExecCredentialSpec{
Interactive: false,
},
}

execProvider := restConfig.ExecProvider
if execProvider.ProvideClusterInfo {
var err error
cred.Spec.Cluster, err = rest.ConfigToExecCluster(restConfig)
if err != nil {
return err
}
}

env := os.Environ()
for _, e := range execProvider.Env {
env = append(env, e.Name+"="+e.Value)
}

groupVersion, err := schema.ParseGroupVersion(execProvider.APIVersion)
if err != nil {
return err
}

scheme := runtime.NewScheme()
codecs := serializer.NewCodecFactory(scheme)
install.Install(scheme)
data, err := runtime.Encode(codecs.LegacyCodec(groupVersion), cred)
if err != nil {
return fmt.Errorf("encode ExecCredentials: %w", err)
}
env = append(env, fmt.Sprintf("%s=%s", "KUBERNETES_EXEC_INFO", data))

stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd := exec.Command(execProvider.Command, execProvider.Args...)
cmd.Env = env
cmd.Stderr = stderr
cmd.Stdout = stdout

err = cmd.Run()
if err != nil {
return fmt.Errorf("error executing exec provider: %s %s %w", stderr.String(), stdout.String(), err)
}

_, gvk, err := codecs.UniversalDecoder(groupVersion).Decode(stdout.Bytes(), nil, cred)
if err != nil {
return fmt.Errorf("decoding stdout: %w", err)
}
if gvk.Group != groupVersion.Group || gvk.Version != groupVersion.Version {
return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
groupVersion, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version})
}

if cred.Status == nil {
return fmt.Errorf("exec plugin didn't return a status field")
}
if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" {
return fmt.Errorf("exec plugin didn't return a token or cert/key pair")
}
if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") {
return fmt.Errorf("exec plugin returned only certificate or key, not both")
}

if cred.Status.Token != "" {
restConfig.BearerToken = cred.Status.Token
} else if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" {
restConfig.KeyData = []byte(cred.Status.ClientKeyData)
restConfig.CertData = []byte(cred.Status.ClientCertificateData)
}

restConfig.ExecProvider = nil
return nil
}

0 comments on commit 882c8e0

Please sign in to comment.