diff --git a/.editorconfig b/.editorconfig index 247aea13c..ca5881265 100644 --- a/.editorconfig +++ b/.editorconfig @@ -11,7 +11,7 @@ charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true -[*.{sh,yaml,yml,json}] +[*.{sh,yaml,yml,json,js,ts}] indent_style = space indent_size = 2 diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 95fc8c791..96d1868b0 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -145,6 +145,11 @@ jobs: go-version: "1.21" cache: false + - uses: azure/setup-helm@v3 + name: Setup Helm + with: + version: "v3.11.0" + - name: Set up kind k8s cluster uses: engineerd/setup-kind@v0.5.0 with: diff --git a/Justfile b/Justfile index ddff4ddbe..d100edbb1 100644 --- a/Justfile +++ b/Justfile @@ -68,6 +68,10 @@ copy-assets: generate-vcluster-images version="0.0.0": go run -mod vendor ./hack/assets/main.go {{ version }} > ./release/vcluster-images.txt +# Generate the CLI docs +generate-cli-docs: + go run -mod vendor -tags pro ./hack/docs/main.go + # Embed the charts into the vcluster binary [private] embed-charts version="0.0.0": @@ -116,3 +120,9 @@ e2e distribution="k3s" path="./test/e2e" multinamespace="false": create-kind && cli version="0.0.0" *ARGS="": RELEASE_VERSION={{ version }} go generate -tags embed_charts ./... go run -tags embed_charts -mod vendor -ldflags "-X main.version={{ version }}" ./cmd/vclusterctl/main.go {{ ARGS }} + +# --- Docs --- + +# Version the docs for the given version +docs-version id="pro" version="1.0.0": + yarn docusaurus docs:version {{version}} diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index c33a79a66..3dd38df33 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -1,55 +1,55 @@ -__webpack_public_path__ = "/docs/" +__webpack_public_path__ = "/docs/"; module.exports = { - title: 'vcluster docs | Virtual Clusters for Kubernetes', - tagline: 'Virtual Clusters for Kubernetes', - url: 'https://vcluster.com', + title: "vcluster docs | Virtual Clusters for Kubernetes", + tagline: "Virtual Clusters for Kubernetes", + url: "https://vcluster.com", baseUrl: __webpack_public_path__, - favicon: '/media/vcluster_symbol.svg', - organizationName: 'loft-sh', // Usually your GitHub org/user name. - projectName: 'vcluster', // Usually your repo name. + favicon: "/media/vcluster_symbol.svg", + organizationName: "loft-sh", // Usually your GitHub org/user name. + projectName: "vcluster", // Usually your repo name. themeConfig: { colorMode: { - defaultMode: 'light', + defaultMode: "light", disableSwitch: false, respectPrefersColorScheme: true, }, navbar: { logo: { - alt: 'vcluster', - src: '/media/vcluster_Horizontal_MonoBranding.svg', - href: 'https://vcluster.com/', - target: '_self', + alt: "vcluster", + src: "/media/vCluster_horizontal-orange.svg", + href: "https://vcluster.com/", + target: "_self", }, items: [ { - href: 'https://vcluster.com/', - label: 'Website', - position: 'left', - target: '_self' + href: "https://vcluster.com/", + label: "Website", + position: "left", + target: "_self", }, { - to: '/docs/what-are-virtual-clusters', - label: 'Docs', - position: 'left' + label: "Docs", + position: "left", + to: "/", }, { - href: 'https://loft.sh/blog', - label: 'Blog', - position: 'left', - target: '_self' + href: "https://loft.sh/blog", + label: "Blog", + position: "left", + target: "_self", }, { - href: 'https://slack.loft.sh/', - className: 'slack-link', - 'aria-label': 'Slack', - position: 'right', + href: "https://slack.loft.sh/", + className: "slack-link", + "aria-label": "Slack", + position: "right", }, { - href: 'https://github.com/loft-sh/vcluster', - className: 'github-link', - 'aria-label': 'GitHub', - position: 'right', + href: "https://github.com/loft-sh/vcluster", + className: "github-link", + "aria-label": "GitHub", + position: "right", }, ], }, @@ -58,41 +58,37 @@ module.exports = { apiKey: "42375731adc726ebb99849e9051aa9b4", indexName: "vcluster", placeholder: "Search...", - algoliaOptions: {} + algoliaOptions: {}, }, footer: { - style: 'light', + style: "light", links: [], copyright: `Copyright © ${new Date().getFullYear()} Loft Labs, Inc.`, }, }, presets: [ [ - '@docusaurus/preset-classic', + "@docusaurus/preset-classic", { docs: { - path: 'pages', - routeBasePath: '/', - sidebarPath: require.resolve('./sidebars.js'), - editUrl: - 'https://github.com/loft-sh/vcluster/edit/main/docs/', + path: "pages", + routeBasePath: "/", + sidebarPath: require.resolve("./sidebars.js"), + editUrl: "https://github.com/loft-sh/vcluster/edit/main/docs/", }, theme: { - customCss: require.resolve('./src/css/custom.css'), + customCss: require.resolve("./src/css/custom.css"), }, }, ], ], - plugins: [], scripts: [ { - src: - 'https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.0/clipboard.min.js', + src: "https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.0/clipboard.min.js", async: true, }, { - src: - '/docs/js/custom.js', + src: "/docs/js/custom.js", async: true, }, ], diff --git a/docs/pages/advanced-topics/plugins-development.mdx b/docs/pages/advanced-topics/plugins-development.mdx new file mode 100644 index 000000000..d92951ae0 --- /dev/null +++ b/docs/pages/advanced-topics/plugins-development.mdx @@ -0,0 +1,312 @@ +--- +title: "Development tutorial" +sidebar_label: "Development tutorial" +--- + +In this tutorial we will implement a ConfigMap syncer. Vcluster syncs ConfigMaps out of the box, but only those that are used by one of the pods created in vCluster. Here we will have a step-by-step look at a plugin implementation that will synchronize all ConfigMaps using the [vcluster plugin SDK](https://github.com/loft-sh/vcluster-sdk). + + +### Prerequisites + +Before starting to develop, make sure you have installed the following tools on your computer: +- [docker](https://docs.docker.com/) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) with a valid kube context configured +- [helm](https://helm.sh/docs/intro/install/), which is used to deploy vCluster and the plugin +- [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.9.1 or higher +- [Go](https://go.dev/dl/) programming language build tools + +## Implementation + +Check out the vCluster plugin example via: +``` +git clone https://github.com/loft-sh/vcluster-plugin-example.git +``` + +You'll see a bunch of files already created, but lets take a look at the `main.go` file: +``` +package main + +import ( + "github.com/loft-sh/vcluster-sdk/plugin" + "github.com/loft-sh/vcluster-sync-all-configmaps/syncers" +) + +func main() { + ctx := plugin.MustInit("sync-all-configmaps-plugin") + plugin.MustRegister(syncers.NewConfigMapSyncer(ctx)) + plugin.MustStart() +} +``` + +Let's break down what is happening in the `main()` function above. + +`ctx := plugin.MustInit("sync-all-configmaps-plugin")` - SDK will contact the vCluster backend server and retrieve it's configuration. The returned struct of type [`RegisterContext`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/context#RegisterContext) contains information about vCluster flags, namespace, vCluster client config, controller manager objects, etc. + +`plugin.MustRegister(syncers.NewConfigMapSyncer(ctx))` - we will implement the `NewConfigMapSyncer` function below, but for now, all we need to know is that it should return a struct that implements [`Base`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Base) interface, which is accepted by the [`MustRegister`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/plugin#MustRegister) function. We should call [`MustRegister`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/plugin#MustRegister) function for each syncer that we wish to be managed by the plugins controller manager. + +`plugin.MustStart()` - this blocking function will wait until the vCluster pod where this plugin container is running becomes the leader. Next, it will call the `Init()` and `RegisterIndices()` functions on the syncers that implement the [`Initializer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Initializer) and [`IndicesRegisterer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#IndicesRegisterer) respectively. Afterwards, the SDK will start its controller managers and call the `RegisterSyncer` or `RegisterFakeSyncer` function on the syncers that implement [`FakeSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#FakeSyncer) and [`Syncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Syncer) interfaces. Additionally, after configuring the default controller for the syncers, the `ModifyController` function is called for the syncers that implement [`ControllerModifier`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#ControllerModifier) interface, which gives a plugin developer a chance to interact with the controller builder object. All these interfaces act like hooks into different points of the SDK to allow you to customize the controller that will call your syncer based on the changes to the watched resources. + + +### Implementing a syncer for a namespaced resource + +In this chapter, we take a look at the `sync-all-configmaps.go` file that can be found in the `syncer` directory. + +``` +package syncers + +import ( + "github.com/loft-sh/vcluster-sdk/syncer" + syncercontext "github.com/loft-sh/vcluster-sdk/syncer/context" + "github.com/loft-sh/vcluster-sdk/syncer/translator" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func NewConfigMapSyncer(ctx *syncercontext.RegisterContext) syncer.Syncer { + return &configMapSyncer{ + NamespacedTranslator: translator.NewNamespacedTranslator(ctx, "configmap", &corev1.ConfigMap{}), + } +} + +type configMapSyncer struct { + translator.NamespacedTranslator +} +``` + +After an import block, we see the `NewConfigMapSyncer` function, which is being called from the `main.go`. It returns a new instance of the `configMapSyncer` struct, which is defined by a single nested anonymous struct of type [`NamespacedTranslator`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/translator#NamespacedTranslator). The [`NamespacedTranslator`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/translator#NamespacedTranslator) implements many functions of the [`Syncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Syncer) interface for us, and we will implement the remaining ones - `SyncDown` and `Sync`. + +:::info +You can get more familiar with the interfaces mentioned above by reading the SDK source files on GitHub - [vcluster-sdk/syncer/types.go](https://github.com/loft-sh/vcluster-sdk/blob/main/syncer/types.go) and [vcluster-sdk/syncer/translator/translator.go](https://github.com/loft-sh/vcluster-sdk/blob/main/syncer/translator/translator.go), or by using pkg.go.dev website - [Syncer](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Syncer) and [NamespacedTranslator](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/translator#NamespacedTranslator). +::: + + +The `SyncDown` function mentioned above is called by the vCluster SDK when a given resource, e.g. a ConfigMap, is created in the vCluster, but it doesn't exist in the host cluster yet. To create a ConfigMap in the host cluster we will call the `SyncDownCreate` function with the output of the `translate` function as third parameter. This demonstrates a typical pattern used in the vCluster syncer implementations. + +``` +func (s *configMapSyncer) SyncDown(ctx *syncercontext.syncercontext, vObj client.Object) (ctrl.Result, error) { + return s.SyncDownCreate(ctx, vObj, s.translate(vObj.(*corev1.ConfigMap))) +} + +func (s *configMapSyncer) translate(vObj client.Object) *corev1.ConfigMap { + return s.TranslateMetadata(vObj).(*corev1.ConfigMap) +} +``` +The `TranslateMetadata` function used above produces a ConfigMap object that will be created in the host cluster. It is a deep copy of the ConfigMap from vCluster, but with certain metadata modifications - the name and labels are transformed, some vCluster labels and annotations are added, many metadata fields are stripped (uid, resourceVersion, etc.). + + +Next, we need to implement code that will handle the updates of the ConfigMap. When a ConfigMap in vCluster or host cluster is updated, the vCluster SDK will call the `Sync` function of the syncer. Current ConfigMap resource from the host cluster and from vCluster are passed as the second and third parameters respectively. In the implementation below, you can see another pattern used by the vCluster syncers. The `translateUpdate` function will return nil when no change to the ConfigMap in the host cluster is needed, and the `SyncDownUpdate` function will not do an unnecessary update API call in such case. + +``` + +func (s *configMapSyncer) Sync(ctx *syncercontext.SyncContext, pObj client.Object, vObj client.Object) (ctrl.Result, error) { + return s.SyncDownUpdate(ctx, vObj, s.translateUpdate(pObj.(*corev1.ConfigMap), vObj.(*corev1.ConfigMap))) +} + +func (s *configMapSyncer) translateUpdate(pObj, vObj *corev1.ConfigMap) *corev1.ConfigMap { + var updated *corev1.ConfigMap + + changed, updatedAnnotations, updatedLabels := s.TranslateMetadataUpdate(vObj, pObj) + if changed { + updated = translator.NewIfNil(updated, pObj) + updated.Labels = updatedLabels + updated.Annotations = updatedAnnotations + } + + // check if the data has changed + if !equality.Semantic.DeepEqual(vObj.Data, pObj.Data) { + updated = translator.NewIfNil(updated, pObj) + updated.Data = vObj.Data + } + + // check if the binary data has changed + if !equality.Semantic.DeepEqual(vObj.BinaryData, pObj.BinaryData) { + updated = translator.NewIfNil(updated, pObj) + updated.BinaryData = vObj.BinaryData + } + return updated +} +``` + +As you might have noticed, the changes to the Immutable field of the ConfigMap are not being checked and propagated to the updated ConfigMap. That is done just for the simplification of the code in this tutorial. In the real world use cases, there will likely be many scenarios and edge cases that you will need to handle differently than just with a simple comparison and assignment. For example, you will need to look out for label selectors that are interpreted in the host cluster, e.g. pod selectors in the NetworkPolicy resources are interpreted by the host cluster network plugin. Such selectors must be translated when synced down to the host resources. Several functions for the common use cases are [built into the SDK in the `syncer/translator` package](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/translator#pkg-functions), including the `TranslateLabelSelector` function. + +Also, notice that this example lacks the updates to the ConfigMap resource in vCluster. Here we propagate the changes only down to the ConfigMap in the host cluster, but there are resources or use cases where a syncer would update the synced resource in vCluster. For example, this might be an update of the status subresource or synchronization of any other field that some controller sets on the host side, e.g., finalizers. Implementation of such updates needs to be considered on case-by-case basis. +For some use cases, you may need to sync the resources in the opposite direction, from the host cluster up into the vCluster, or even in both directions. If that is what your plugin needs to do, you will implement the [`UpSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#UpSyncer) interface defined by the SDK. + +### Adding a hook for changing a resource on the fly + +Hooks are a great feature to adjust current syncing behaviour of vCluster without the need to override an already existing syncer in vCluster completely. They allow you to change outgoing objects of vCluster similar to an mutating admission controller in Kubernetes. Requirement for an hook to work correctly is that vCluster itself would sync the resource, so hooks only work for the core resources that are synced by vCluster such as pods, services, secrets etc. + +To add a hook to your plugin, you simply need to create a new struct that implements the `ClientHook` interface: + +``` +package myhook + +import ( + "context" + "fmt" + "github.com/loft-sh/vcluster-sdk/hook" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func NewPodHook() hook.ClientHook { + return &podHook{} +} + +type podHook struct{} + +func (p *podHook) Name() string { + return "pod-hook" +} + +func (p *podHook) Resource() client.Object { + return &corev1.Pod{} +} +``` + +The `Name()` function defines the name of the hook which is used for logging purposes. The `Resource()` function returns the object you want to mutate. Besides those functions you can now define what actions you want to hook into inside vCluster's syncer: +``` +type MutateCreateVirtual interface { + MutateCreateVirtual(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateUpdateVirtual interface { + MutateUpdateVirtual(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateDeleteVirtual interface { + MutateDeleteVirtual(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateGetVirtual interface { + MutateGetVirtual(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateCreatePhysical interface { + MutateCreatePhysical(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateUpdatePhysical interface { + MutateUpdatePhysical(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateDeletePhysical interface { + MutateDeletePhysical(ctx context.Context, obj client.Object) (client.Object, error) +} + +type MutateGetPhysical interface { + MutateGetPhysical(ctx context.Context, obj client.Object) (client.Object, error) +} +``` + +By implementing one or more of the above interfaces you will receive events from vCluster that allows you to mutate an outgoing or incoming object to vCluster. +For example, to add an hook that adds a custom label to a pod, you can add the following code: +``` +var _ hook.MutateCreatePhysical = &podHook{} + +func (p *podHook) MutateCreatePhysical(ctx context.Context, obj client.Object) (client.Object, error) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("object %v is not a pod", obj) + } + + if pod.Labels == nil { + pod.Labels = map[string]string{} + } + pod.Labels["created-by-plugin"] = "pod-hook" + return pod, nil +} + +var _ hook.MutateUpdatePhysical = &podHook{} + +func (p *podHook) MutateUpdatePhysical(ctx context.Context, obj client.Object) (client.Object, error) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("object %v is not a pod", obj) + } + + if pod.Labels == nil { + pod.Labels = map[string]string{} + } + pod.Labels["created-by-plugin"] = "pod-hook" + return pod, nil +} +``` + +Incoming objects into vCluster can be modified through the `MutateGetPhysical` or `MutateGetVirtual` which allows you to change how vCluster is retrieving objects from either the virtual or physical cluster. +This can be useful if you don't want vCluster to change something you have mutated back for example. + +### Build and push your plugin + +Now you can run docker commands to build your container image and push it to the registry. +``` +docker build -t your_org/vcluster-sync-all-configmaps . && docker push your_org/vcluster-sync-all-configmaps +``` + +### Add plugin.yaml + +The last step before installing your plugin is creating a yaml file with your plugin metadata. This file follows the format of the Helm values files. It will be merged with other values files when a vCluster is installed or upgraded. For the plugin we just implemented and built it would look like this: + +``` +plugin: + sync-all-configmaps-plugin: + image: your_org/vcluster-sync-all-configmaps +syncer: + extraArgs: + - "--sync=-configmaps" +``` + +The first three lines contain a minimal definition of a vCluster plugin - a container name based on the key (second line) and container image (third line). The last three lines then contain extra values that the plugin will apply to the vCluster chart. These are needed for this particular plugin and are not mandatory otherwise. Our plugin would be syncing some ConfigMaps that would also be synced by the built-in "configmaps" syncer of the vCluster, and to avoid conflicting updates we will disable the built-in syncer by passing an additional command-line argument to the syncer container. + +### Deploy the plugin + +You can deploy your plugin to a vCluster using the same commands as [described on the overview page](./plugins-overview.mdx#loading-and-installing-plugins-to-vcluster), for example, with the vCluster CLI. +``` +vcluster create my-vcluster -n my-vcluster -f plugin.yaml +``` + +### Fast Plugin Development with DevSpace + +When developing your plugin we recommend using the [devspace](https://devspace.sh/) CLI tool for running your local plugin source code directly in Kubernetes. The appropriate configuration is already present in the `devspace.yaml` and you can start developing by running the following command: + +:::info +If you want to develop within a remote Kubernetes cluster (as opposed to docker-desktop or minikube), make sure to exchange `PLUGIN_IMAGE` in the `devspace.yaml` with a valid registry path you can push to. +::: + +After successfully setting up the tools, start the development environment with: +``` +devspace dev -n vcluster +``` + +After a while a terminal should show up with additional instructions. Enter the following command to start the plugin: +``` +go run -mod vendor main.go +``` + +The output should look something like this: +``` +I0124 11:20:14.702799 4185 logr.go:249] plugin: Try creating context... +I0124 11:20:14.730044 4185 logr.go:249] plugin: Waiting for vcluster to become leader... +I0124 11:20:14.731097 4185 logr.go:249] plugin: Starting syncers... +[...] +I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. +``` + +You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. + +DevSpace will create a development vCluster which will execute your plugin. Any changes made within the vCluster created by DevSpace will execute against your plugin. +``` +vcluster list + + NAME NAMESPACE STATUS CONNECTED CREATED AGE + vcluster vcluster Running True 2022-09-06 20:33:20 +1000 AEST 2h26m8s +``` + +After you are done developing or you want to recreate the environment, delete the development environment with: +``` +devspace purge -n vcluster +``` + + diff --git a/docs/pages/advanced-topics/plugins-overview.mdx b/docs/pages/advanced-topics/plugins-overview.mdx new file mode 100644 index 000000000..e7ae5f5d6 --- /dev/null +++ b/docs/pages/advanced-topics/plugins-overview.mdx @@ -0,0 +1,113 @@ +--- +title: Overview +sidebar_label: Plugins Overview +--- + +Plugins are a feature to extend the capabilities of vCluster. They allow you to add custom functionality, such as: + +1. Syncing specific resources from or to the virtual clusters, including cluster scoped resources like cluster roles +2. Syncing custom resources from or to the virtual cluster +3. Deploying resources on virtual cluster startup, such as CRDs, applications, etc. +4. Manage resources and applications inside the host or virtual cluster +5. Enforcing certain restrictions on synced resources or extending the existing syncers of vCluster +6. Any other operator use case that could benefit from having access to the virtual cluster and the host cluster simultaneously. + +A plugin in its purest form is a [Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) that will have access to both the virtual cluster and the host cluster simultaneously. +This is the main difference between a vCluster plugin and a regular Kubernetes operator that you would just install inside the vCluster itself. +Given this dual access, the plugin is able to translate resources between both clusters, which is the basic building block of [how vCluster works](../what-are-virtual-clusters.mdx). + +:::tip Recommended Reads +In order to better understand how vCluster plugins work, it is recommended to read about Kubernetes [operators](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) as well as [controllers](https://kubernetes.io/docs/concepts/architecture/controller/). +::: + +## Architecture + +Each plugin will run as a sidecar container inside the vCluster pod. +This is done to allow easier communication between vCluster and the plugins as well as provide certain capabilities such as high-availability out of the box. +The plugin itself will contact the vCluster pod during startup to obtain the access credentials to the virtual and host cluster. +The plugin controllers are started with these credentials, similar to how vCluster itself starts its resource syncers. + +### Plugin Controllers + +Resource syncing is the heart of vCluster which enables the virtual cluster to behave like an actual Kubernetes cluster. +A [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) that is responsible for resource syncing in vCluster is called a syncer. +This controller reacts on changes to objects within the virtual cluster and on changes to objects within the host cluster. +The syncer tries to map each virtual object to a physical object in the host cluster and then compares those. +After it discovers a change, the syncer ensures that the virtual cluster object and the physical cluster object are aligned in the desired state, +and if not, the syncer changes either one of those objects to reflect the desired state. + +Each plugin can define several of those resource syncers that would work exactly like the built-in syncers of vCluster. +However, you'll not need to sync every Kubernetes resource to the host cluster, as some can stay purely virtual. +Only resources that influence the workloads need to be synced, for example, pods, services, and endpoints, while others such as deployments, replicasets, namespaces etc. are only relevant to the Kubernetes control plane and hence are not needed in the host cluster. + +There are sometimes also cases where you want to manage specific core resources yourself without interfering with what vCluster is syncing, for example special secrets or configmaps that were created from the host cluster or a different resource inside the host cluster. +For this use case you can label resources vCluster should ignore either on the physical or virtual side with a label `vcluster.loft.sh/controlled-by` and a custom value of your choosing. This will tell vCluster to ignore the resource in its syncers. + +### Plugin Hooks + +Plugin hooks are a great feature to adjust current syncing behaviour of vCluster without the need to override an already existing syncer in vCluster completely. +They allow you to change outgoing objects of vCluster similar to an mutating admission controller in Kubernetes. +Requirement for an hook to work correctly is that vCluster itself would sync the resource, so hooks only work for the core resources that are synced by vCluster such as pods, services, secrets etc. + +If a plugin registers a hook to a specific resource, vCluster will forward all requests that match the plugin's defined hooks to the plugin and the plugin can then adjust or even deny the request completely. +This opens up a wide variety of adjustment possibilities for plugins, where you for example only want to add a custom label or annotation. + +### Plugin SDK + +:::tip Recommended Reads +If you want to start developing your own vCluster plugins, it is recommended that you read about [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) as well as [kube builder](https://book.kubebuilder.io/introduction.html) that uses the controller runtime internally. +::: + +vCluster provides an [SDK](https://github.com/loft-sh/vcluster-sdk) for writing plugin controllers that abstracts a lot of the syncer complexity away from the user, but still gives you access to the underlying structures if you need it. +Internally, the vCluster SDK uses the popular [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) project, that is used by vCluster itself to create the controllers. +The vCluster SDK lets you write custom plugin controllers with just a few lines of code. + +Since the plugin SDK interfaces are mostly compatible with the vCluster syncers, you can also take a look at how those are implemented in [the vCluster itself](https://github.com/loft-sh/vcluster/tree/main/pkg/controllers/resources), which work in most cases the same way as if those would be implemented in a plugin. +It would be even possible to reimplement all vCluster syncers in a separate plugin. + +## Loading and Installing Plugins to vCluster + +Since the most common distribution method of vCluster is helm, plugins are also configured via helm values. +If you develop a plugin of your own, we recommend creating a `plugin.yaml` (the name has no special functionality, you could also name it `my-plugin.yaml` or `extra-values.yaml`) in the following format: + +``` +# Plugin Definition below. This is essentially a valid helm values file that will be merged +# with the other vCluster values during vCluster create or helm install. +plugin: + myPlugin: + image: plugin-image + # Other optional sidecar values below + # command: ... + # env: ... + # Configure Extra RBAC Rules like this + #rbac: + # role: + # extraRules: + # - apiGroups: ["example.loft.sh"] + # ... + # clusterRole: + # extraRules: + # - apiGroups: ["apiextensions.k8s.io"] + # ... +``` + +The `plugin.yaml` is a valid helm values file used to define the plugin's sidecar configuration and additional RBAC rules needed to function properly. If you want to distribute that plugin for others, it's also possible to install a plugin through an URL: + +``` +# Install a plugin with a local plugin.yaml +vcluster create my-vcluster -n my-vcluster -f plugin.yaml -f other-values.yaml + +# Install a plugin with a remote URL +vcluster create my-vcluster -n my-vcluster -f https://github.com/my-org/my-plugin/plugin.yaml -f other-values.yaml + +# Install a plugin with helm with a remote URL +helm install my-vcluster vcluster -n my-vcluster --repo https://charts.loft.sh -f https://github.com/my-org/my-plugin/plugin.yaml -f other-values.yaml +``` + +:::info Examples +You can take a look at the [vcluster-sdk repo](https://github.com/loft-sh/vcluster-sdk/tree/main/examples) for some working examples. +::: + +:::warning Don't install untrusted plugins +A plugin runs with the same permissions as vCluster itself does in the Kubernetes cluster and can also define additional permissions through its `plugin.yaml`, so make sure you only install plugins you trust. +::: diff --git a/docs/pages/telemetry.mdx b/docs/pages/advanced-topics/telemetry.mdx similarity index 85% rename from docs/pages/telemetry.mdx rename to docs/pages/advanced-topics/telemetry.mdx index ea3b3cf47..d7563cfee 100644 --- a/docs/pages/telemetry.mdx +++ b/docs/pages/advanced-topics/telemetry.mdx @@ -3,27 +3,27 @@ title: Telemetry sidebar_label: Telemetry --- -import TelemetryOptOutSegment from './fragments/telemetry-opt-out.mdx' +import TelemetryOptOutSegment from '../fragments/telemetry-opt-out.mdx' -In this section, you will find a description of the vcluster telemetry - why we are collecting telemetry, what data points are we gathering, where we are sending the data, and how to opt-out. +In this section, you will find a description of the vCluster telemetry - why we are collecting telemetry, what data points are we gathering, where we are sending the data, and how to opt-out. ### Why do we collect telemetry -Because vcluster is a freely available open source project, we as maintainers have a very limited idea of how the project is being used, and very limited possibilities to gather this information from the users. Without reliable information, it is difficult to make decisions about the prioritization of features, test automation or bug fixes. Deprecation of the flags and features turns into guesswork, and removal becomes nearly impossible. +Because vCluster is a freely available open source project, we as maintainers have a very limited idea of how the project is being used, and very limited possibilities to gather this information from the users. Without reliable information, it is difficult to make decisions about the prioritization of features, test automation or bug fixes. Deprecation of the flags and features turns into guesswork, and removal becomes nearly impossible. To get to the next step in maturing the project, and ensure long-term maintainability, we will be making decisions about feature deprecation, prioritizing test coverage, etc., and we want these decisions to be data-driven. ### What are we collecting and how -First of all, we want to emphasize that we are not interested in collecting data about individuals that are using vcluster, we are collecting data about how it is being used. This entails information about the configuration of vcluster, and the environment where it is deployed (e.g. Kubernetes version, CPU architecture, etc.). +First of all, we want to emphasize that we are not interested in collecting data about individuals that are using vCluster, we are collecting data about how it is being used. This entails information about the configuration of vCluster, and the environment where it is deployed (e.g. Kubernetes version, CPU architecture, etc.). -Each vcluster is deployed with a "syncer" component that contains all controllers that make a virtual cluster function. This component will be collecting the data and uploading it to our backend at regular intervals (once every 1-5 minutes). We provide a documented example of the telemetry payload that would be uploaded, and of course, the source code responsible for this is fully available in [our repo](https://github.com/loft-sh/vcluster). The telemetry backend is hosted at this address - "https://admin.loft.sh/analytics/v1/vcluster/". The ingestion service is written and maintained by the vcluster maintainers. The data is saved into a relational database with strict access control. +Each vCluster is deployed with a "syncer" component that contains all controllers that make a virtual cluster function. This component will be collecting the data and uploading it to our backend at regular intervals (once every 1-5 minutes). We provide a documented example of the telemetry payload that would be uploaded, and of course, the source code responsible for this is fully available in [our repo](https://github.com/loft-sh/vcluster). The telemetry backend is hosted at this address - "https://admin.loft.sh/analytics/v1/vcluster/". The ingestion service is written and maintained by the vCluster maintainers. The data is saved into a relational database with strict access control. ### Telemetry payload example -Below you can find an example of the payload that vcluster syncer component would send to our telemetry backend. Some fields are self-explanatory, and some are explained below the example. +Below you can find an example of the payload that vCluster syncer component would send to our telemetry backend. Some fields are self-explanatory, and some are explained below the example. ```json { @@ -78,14 +78,14 @@ Below you can find an example of the payload that vcluster syncer component woul } ``` -- `instanceProperties.uid` - is a unique identifier of a particular instance. It is used to deduplicate data sent over time. The `.metadata.uid` value of the vcluster PVC or Service resource is used as value. -- `instanceProperties.instanceCreatorUID` - is a machine identifier attached by the vcluster CLI to an instance during creation. We use [machineid](https://github.com/denisbrodbeck/machineid) library to get the identifier, and then hash it for privacy. +- `instanceProperties.uid` - is a unique identifier of a particular instance. It is used to deduplicate data sent over time. The `.metadata.uid` value of the vCluster PVC or Service resource is used as value. +- `instanceProperties.instanceCreatorUID` - is a machine identifier attached by the vCluster CLI to an instance during creation. We use [machineid](https://github.com/denisbrodbeck/machineid) library to get the identifier, and then hash it for privacy. - `instanceProperties.syncerFlags` - contains a JSON payload that can have two fields: `setFlags` - list of the non-default syncer flags that were set, but the values are not collected, we only set `true` as value for each key; `controllers` - list of the resource sync controllers that have been enabled in addition to the default one. - `events` - an array of events for which we want to track duration and outcome (success/failure). We are sending just the GVK of the resource, but never any content. -- `token` - this is a token generated in memory from a static key that is part of the vcluster binary. It is used to validate that the payload is being received from a real vcluster binary. +- `token` - this is a token generated in memory from a static key that is part of the vCluster binary. It is used to validate that the payload is being received from a real vCluster binary. ### Telemetry opt-out process -Below, you can find the instructions for disabling the telemetry based on the tool that you use to install or upgrade your vcluster instances. +Below, you can find the instructions for disabling the telemetry based on the tool that you use to install or upgrade your vCluster instances. \ No newline at end of file diff --git a/docs/pages/architecture/control_plane/control_plane.mdx b/docs/pages/architecture/control_plane/control_plane.mdx new file mode 100644 index 000000000..16e677a8c --- /dev/null +++ b/docs/pages/architecture/control_plane/control_plane.mdx @@ -0,0 +1,12 @@ +--- +title: vCluster Control Plane +sidebar_label: vCluster Control Plane +--- + +This container contains API server, controller manager and a connection (or mount) of the data store. By default, vClusters use sqlite as data store and run the API server and controller manager of k3s, which is a certified Kubernetes distribution and CNCF sandbox project. You can also use a [different data store, such as etcd, mysql or postgresql](../../deploying-vclusters/persistence.mdx). You are also able to use another Kubernetes distribution as backing virtual cluster, such as [k0s or vanilla k8s](../../using-vclusters/access.mdx). + +Each vCluster has its own control plane consisting of: +- **Kubernetes API** server (point your kubectl requests to this vCluster API server) +- **Data store** (where the API stores all resources, real clusters run with etcd) +- **Controller Manager** (creates pods objects in the data store according to replica number in ReplicaSets etc.) +- **(Optional) Scheduler** (schedules workloads inside the virtual cluster. See [scheduling](../scheduling.mdx) for more info \ No newline at end of file diff --git a/docs/pages/operator/other-distributions.mdx b/docs/pages/architecture/control_plane/k8s_distros.mdx similarity index 73% rename from docs/pages/operator/other-distributions.mdx rename to docs/pages/architecture/control_plane/k8s_distros.mdx index 17df13353..71974af80 100644 --- a/docs/pages/operator/other-distributions.mdx +++ b/docs/pages/architecture/control_plane/k8s_distros.mdx @@ -1,19 +1,18 @@ --- -title: Other Kubernetes distributions -sidebar_label: Other Kubernetes distributions +title: Kubernetes distributions +sidebar_label: Kubernetes distributions --- -import HighAvailabilityK8s from '../fragments/high-availability-k8s.mdx'; -By default, vcluster will use [k3s](https://github.com/k3s-io/k3s) as virtual Kubernetes cluster, which is a highly available, certified Kubernetes distribution designed for production workloads in unattended, resource-constrained, remote locations or inside IoT appliances. +By default, vCluster will use [k3s](https://github.com/k3s-io/k3s) as virtual Kubernetes cluster, which is a highly available, certified Kubernetes distribution designed for production workloads in unattended, resource-constrained, remote locations or inside IoT appliances. -However, vcluster is not tied to a specific distribution and should work with all certified Kubernetes distributions. By default, we recommend to use k3s, because it has a small footprint and widely adopted, but if your use case requires a different k8s distribution, vcluster currently also supports k0s or vanilla k8s. If that is also not enough, you can also add your custom Kubernetes distribution as outlined below. +However, vCluster is not tied to a specific distribution and should work with all certified Kubernetes distributions. By default, we recommend to use k3s, because it has a small footprint and widely adopted, but if your use case requires a different k8s distribution, vCluster currently also supports k0s or vanilla k8s. If that is also not enough, you can also add your custom Kubernetes distribution as outlined below. ## k0s -[k0s](https://github.com/k0sproject/k0s) is an all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster and packaged as a single binary for ease of use. vcluster supports k0s as backing virtual Kubernetes cluster. +[k0s](https://github.com/k0sproject/k0s) is an all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster and packaged as a single binary for ease of use. vCluster supports k0s as backing virtual Kubernetes cluster. -In order to use k0s as backing cluster, create a vcluster with the following command: +In order to use k0s as backing cluster, create a vCluster with the following command: ``` vcluster create my-vcluster --distro k0s @@ -29,15 +28,15 @@ Behind the scenes a different helm chart will be deployed (`vcluster-k0s`), that ## Vanilla k8s -When choosing this option, vcluster will deploy a separate etcd cluster, kubernetes controller manager and api server alongside the vcluster hypervisor. +When choosing this option, vCluster will deploy a separate etcd cluster, kubernetes controller manager and api server alongside the vCluster hypervisor. -In order to use vanilla k8s as backing cluster, create a vcluster with the following command: +In order to use vanilla k8s as backing cluster, create a vCluster with the following command: ``` vcluster create my-vcluster --distro k8s ``` -Connect to the vcluster and start using it: +Connect to the vCluster and start using it: ``` kubectl get ns ... @@ -45,14 +44,10 @@ kubectl get ns Behind the scenes a different helm chart will be deployed (`vcluster-k8s`), that holds specific configuration to support vanilla k8s. Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/k8s) for all available chart options. -### High Available Vanilla k8s - - - ## Other Distributions -vcluster has no dependencies on any specific Kubernetes distribution, so you should be able to run it with most certified Kubernetes distributions. -One requirement vcluster has, is that the distribution can be deployed without a scheduler and kubelet, meaning that vcluster just requires the api server, controller manager and data storage of the distribution. +vCluster has no dependencies on any specific Kubernetes distribution, so you should be able to run it with most certified Kubernetes distributions. +One requirement vCluster has, is that the distribution can be deployed without a scheduler and kubelet, meaning that vCluster just requires the api server, controller manager and data storage of the distribution. For single binary distributions, such as k3s or k0s, extra bundled components can usually be disabled through flags, for multi binary distributions, such as vanilla k8s, you just need to deploy the virtual control plane with api server, controller manager and usually etcd. Most multi binary distributions work by just overriding the images of the k8s chart in a `values.yaml`, e.g.: @@ -66,18 +61,18 @@ etcd: image: custom-domain.com/custom-etcd:v3.4.16 ``` -And then deploy vcluster with: +And then deploy vCluster with: ``` vcluster create my-vcluster -n test --distro k8s -f values.yaml ``` If you want to create a separate chart for the Kubernetes distribution, a good starting point is to copy one of [our distro charts](https://github.com/loft-sh/vcluster/tree/main/charts) and then modifying it to work with your distribution. -vcluster only needs the following information from the virtual Kubernetes distribution to function properly: +vCluster only needs the following information from the virtual Kubernetes distribution to function properly: 1. The api server central authority certificate (usually found at `/pki/ca.crt`) 2. The api server central authority key (usually found at `/pki/ca.key`) 3. An admin kube config to contact the virtual Kubernetes control plane (usually found at `/pki/admin.conf`) -For multi binary distributions, vcluster can even create those with a pre-install hook as found in the [k8s chart](https://github.com/loft-sh/vcluster/tree/main/charts/k8s/templates). +For multi binary distributions, vCluster can even create those with a pre-install hook as found in the [k8s chart](https://github.com/loft-sh/vcluster/tree/main/charts/k8s/templates). -In general, if you need vcluster to support another Kubernetes distribution, we are always happy to help you or accept a pull request in our github repository. +In general, if you need vCluster to support another Kubernetes distribution, we are always happy to help you or accept a pull request in our github repository. diff --git a/docs/pages/architecture/networking.mdx b/docs/pages/architecture/networking.mdx deleted file mode 100644 index b017391dd..000000000 --- a/docs/pages/architecture/networking.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Network & DNS -sidebar_label: Network & DNS ---- - -
- vcluster Networking -
vcluster - Networking
-
- -By default, resources such as `Service` and `Ingress` are synced from the virtual cluster to the host cluster in order to enable correct network functionality for the vcluster. - - -## Pod-To-Pod Traffic -Since pods are synchronized by the [syncer component](./scheduling.mdx) of the vcluster, they actually run inside the host namespace of the underlying cluster. That means that these pods have regular cluster-internal IP addresses and can communicate with each other via IP-based networking. - -## Pod-To-Service Traffic -By default, the vcluster also synchronizes Services (while stripping away unnecessary information from the resource) to allow pods to communicate with services. However, instead of using the DNS names of the services inside the host cluster, the vcluster has its own DNS service which allows the vcluster pods to use much more intuitive DNS mappings just as in a regular cluster. - -## Mapping Services between vcluster and Host Cluster - -Each vcluster has its own DNS service (CoreDNS by default) which allows pods in the vcluster to get the IP addresses of services that are also running in this vcluster. -The vcluster syncer ensures that the intuitive naming logic of Kubernetes DNS names for services applies and users can connect to these DNS names which in fact map to the IP address of the synchronized services that are present in the underlying host cluster. - -However, this also means that you cannot directly access host services inside the virtual cluster via DNS as well as host pods can only access virtual cluster services by their synced name. vcluster offers a feature to map services from the virtual cluster to the host cluster and vice versa. - -### Map Host Cluster Service to vcluster Service - -For example, to map a service `my-host-service` in the namespace `my-host-namespace` to the virtual cluster service `my-virtual-service` in the virtual cluster namespace `my-virtual-namespace`, you can use the following config in your `values.yaml`: - -```yaml -mapServices: - fromHost: - - from: my-host-namespace/my-host-service - to: my-virtual-namespace/my-virtual-service -``` - -With this configuration, vcluster will manage a service called `my-virtual-service` inside the virtual cluster that points to the host service `my-host-service` in namespace `my-host-namespace`. So pods inside the vcluster will be able to access the host service via e.g. `curl http://my-virtual-service.my-virtual-namespace`. - -### Map vcluster Service to Host Cluster Service - -It is also possible to map a virtual cluster service to an host cluster service. This is especially useful if you want to expose an application that runs inside the virtual cluster to other workloads running in the host cluster. This makes it also easier to share services across vcluster's. -For example, to map a virtual service `my-virtual-service` in the namespace `my-virtual-namespace` to the vcluster host namespace service `my-host-service`, you can use the following config in your `values.yaml`: - -```yaml -mapServices: - fromVirtual: - - from: my-virtual-namespace/my-virtual-service - to: my-host-service -``` - -With this configuration, vcluster will manage a service called `my-host-service` inside the namespace where the vcluster workloads are synced to, which points to the virtual service `my-virtual-service` in namespace `my-virtual-namespace` inside the vcluster. So pods in the host cluster will be able to access the virtual service via e.g. `curl http://my-host-service`. - -### Fallback to host DNS -If enabled, will fallback to host dns for resolving domains. This is useful if using istio or dapr in the host cluster and sidecar containers cannot connect to the central instance. Its also useful if you want to access host cluster services from within the vcluster. We can enable this feature with -```yaml -fallbackHostDns: true -``` - -## Ingress Controller Traffic -The vcluster has the option to enable Ingress resources synchronization. That means that you can create an ingress in a vcluster to make a service in this vcluster available via a hostname/domain. However, instead of having to run a separate ingress controller in each vcluster, the ingress resource will be synchronized to the underlying cluster (when enabled) which means that the vcluster can use a shared ingress controller that is running in the host cluster. This helps to share resources across different vclusters and is easier for users of vclusters because otherwise, they would need to install an ingress controller and manually configure DNS for each vcluster. - -:::info -Before the v0.12.0 release of vcluster, the Ingress synchronization was enabled by default. -::: - - -### Enable Ingress Sync -If you want to use an ingress controller from the underlying cluster by synchronizing the Ingress resources, set the following in your `values.yaml`: -``` -sync: - ingresses: - enabled: true -``` -then create or upgrade the vcluster with: - -``` -vcluster create my-vcluster --upgrade -f values.yaml -``` - -By default, when Ingress sync is enabled, the IngressClasses will be synced from the underlying cluster to the vcluster. This can be disabled by setting the following in your `values.yaml`: -``` -sync: - ingressclasses: - enabled: false -``` - -### SSL Certificates -Because the syncer keeps typical SSL provisioning related annotations for ingresses, you may also set the cert-manager ingress annotations on an ingress in your vclusters to use the cert-manager of the underlying host cluster to automatically provision SSL certificates from Let's Encrypt. - -## Network Policies -Kubernetes has a [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) resource type that allows creation of the rules that govern how pods communicate with each other. - -By default, vcluster ignores these resources. However, once you enable synchronization of the Network Policies, vcluster will ensure correct policies are created in the host cluster to achieve the desired traffic behaviour. - -:::info -Network Policies in vcluster rely on the support for this feature in the host cluster. Make sure that your host cluster satisfies the [Network Policy prerequisites](https://kubernetes.io/docs/concepts/services-networking/network-policies/#prerequisites). -::: - -### Enable Network Policy Sync -To enable the synchronization of the Network Policy resources add the following to your `values.yaml`: -``` -sync: - networkpolicies: - enabled: true -``` -then create or upgrade the vcluster with: - -``` -vcluster create my-vcluster --upgrade -f values.yaml -``` diff --git a/docs/pages/architecture/nodes.mdx b/docs/pages/architecture/nodes.mdx index e9c662b7a..ecfa38523 100644 --- a/docs/pages/architecture/nodes.mdx +++ b/docs/pages/architecture/nodes.mdx @@ -3,16 +3,16 @@ title: Nodes sidebar_label: Nodes --- -By default, vcluster will create fake nodes for every pod `spec.nodeName` it encounters within the virtual cluster. Those fake nodes are created because vcluster has no RBAC permissions by default to view the real nodes in the host cluster, as this would need a cluster role and cluster role binding. It will also create a fake kubelet endpoint for each node that will forward requests to the actual node or rewrite them to preserve virtual cluster names. +By default, vCluster will create fake nodes for every pod `spec.nodeName` it encounters within the virtual cluster. Those fake nodes are created because vCluster has no RBAC permissions by default to view the real nodes in the host cluster, as this would need a cluster role and cluster role binding. It will also create a fake kubelet endpoint for each node that will forward requests to the actual node or rewrite them to preserve virtual cluster names. ## Node Syncing Modes -vcluster supports multiple modes to customize node syncing behaviour: -- **Fake Nodes** (default): vcluster will create fake nodes for each `spec.nodeName`. If there are no more pods on a node, the fake node will be deleted (no cluster role for vcluster is needed) -- **Real Nodes** : vcluster will copy and sync real nodes information for each `spec.nodeName`. If there are no more pods on a node within vcluster, the virtual cluster node will be deleted. This mode requires helm value `.sync.nodes.enabled: true`, as described below. -- **Real Nodes All** : vcluster will always sync all nodes from the host cluster to the vcluster, no matter where pods are running. This is useful if you want to use DaemonSets within the vcluster. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.syncAllNodes: true`. -- **Real Nodes Label Selector** vcluster will only sync nodes that match the given label selector. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.nodeSelector: "label1=value1"`. You can also specify `--enforce-node-selector` to enforce scheduling only on these nodes. -- **Real Nodes + Label Selector** vcluster will sync nodes that match the given label selector as well as the real nodes information for each `spec.nodeName`. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.nodeSelector: "label1=value1"` and the flag `--enforce-node-selector=false`. +vCluster supports multiple modes to customize node syncing behaviour: +- **Fake Nodes** (default): vCluster will create fake nodes for each `spec.nodeName`. If there are no more pods on a node, the fake node will be deleted (no cluster role for vCluster is needed) +- **Real Nodes** : vCluster will copy and sync real nodes information for each `spec.nodeName`. If there are no more pods on a node within vCluster, the virtual cluster node will be deleted. This mode requires helm value `.sync.nodes.enabled: true`, as described below. +- **Real Nodes All** : vCluster will always sync all nodes from the host cluster to the vCluster, no matter where pods are running. This is useful if you want to use DaemonSets within the vCluster. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.syncAllNodes: true`. +- **Real Nodes Label Selector** vCluster will only sync nodes that match the given label selector. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.nodeSelector: "label1=value1"`. You can also specify `--enforce-node-selector` to enforce scheduling only on these nodes. +- **Real Nodes + Label Selector** vCluster will sync nodes that match the given label selector as well as the real nodes information for each `spec.nodeName`. This mode requires following helm values: `.sync.nodes.enabled: true` and `.sync.nodes.nodeSelector: "label1=value1"` and the flag `--enforce-node-selector=false`. To set the `.sync.nodes.enabled: true` helm value add the following to your `values.yaml` file: ``` @@ -20,19 +20,19 @@ sync: nodes: enabled: true ``` -Then you can create the vcluster with: +Then you can create the vCluster with: ```yaml vcluster create my-vcluster -f values.yaml ``` :::info DaemonSets -If you want to use DaemonSets within vcluster, we recommend to either use the *Real Nodes All* or *Real Nodes Label Selector* option, as this will hard delete the nodes that are not there anymore from vcluster. If you are using fake nodes or just the used real nodes option, daemon sets will essentially never let vcluster delete an unused node as it will always be occupied by a daemon set pod. +If you want to use DaemonSets within vCluster, we recommend to either use the *Real Nodes All* or *Real Nodes Label Selector* option, as this will hard delete the nodes that are not there anymore from vCluster. If you are using fake nodes or just the used real nodes option, daemon sets will essentially never let vCluster delete an unused node as it will always be occupied by a daemon set pod. ::: ### Example Sync All Nodes -For example, if you want to create a vcluster that syncs all nodes from the host cluster, you can create a file `values.yaml`: +For example, if you want to create a vCluster that syncs all nodes from the host cluster, you can create a file `values.yaml`: ```yaml sync: @@ -41,7 +41,7 @@ sync: syncAllNodes: true ``` -Then you can create the vcluster with: +Then you can create the vCluster with: ```yaml vcluster create my-vcluster -f values.yaml diff --git a/docs/pages/architecture/basics.mdx b/docs/pages/architecture/overview.mdx similarity index 50% rename from docs/pages/architecture/basics.mdx rename to docs/pages/architecture/overview.mdx index 1994cbe7c..2ffd182cb 100644 --- a/docs/pages/architecture/basics.mdx +++ b/docs/pages/architecture/overview.mdx @@ -1,6 +1,6 @@ --- -title: Basics -sidebar_label: Basics +title: Overview +sidebar_label: Overview --- Virtual clusters are Kubernetes clusters that run on top of other Kubernetes clusters. Compared to fully separate "real" clusters, virtual clusters do not have their own node pools or networking. Instead, they are scheduling workloads inside the underlying cluster while having their own control plane. @@ -12,70 +12,60 @@ Virtual clusters are Kubernetes clusters that run on top of other Kubernetes clu ## Components -By default, vclusters run as a single pod (scheduled by a StatefulSet) that consists of 2 containers: -- [**Control Plane**](#vcluster-control-plane): This container contains API server, controller manager and a connection (or mount) of the data store. By default, vclusters use sqlite as data store and run the API server and controller manager of k3s, which is a certified Kubernetes distribution and CNCF sandbox project. You can also use a [different data store, such as etcd, mysql or postgresql](../operator/external-datastore.mdx). You are also able to use another Kubernetes distribution as backing virtual cluster, such as [k0s or vanilla k8s](../operator/other-distributions.mdx). -- [**Syncer**](#vcluster-syncer): What makes a vcluster virtual is the fact that it does not have actual worker nodes or network. Instead, it uses a so-called syncer which copies the pods that are created within the vcluster to the underlying host cluster. Then, the host cluster will actually schedule the pod and the vcluster will keep the vcluster pod and host cluster pod in sync. +By default, vClusters run as a single pod (scheduled by a StatefulSet) that consists of 2 containers: +- [**Control Plane**](./control_plane/control_plane.mdx) +- [**Syncer**](./syncer/syncer.mdx) -### vcluster Control Plane -Each vcluster has its own control plane consisting of: -- **Kubernetes API** server (point your kubectl requests to this vcluster API server) -- **Data store** (where the API stores all resources, real clusters run with etcd) -- **Controller Manager** (creates pods objects in the data store according to replica number in ReplicaSets etc.) -- **(Optional) Scheduler** (schedules workloads inside the virtual cluster. See [scheduling](./scheduling.mdx) for more info) +## Host Cluster & Namespace +Every vCluster runs on top of another Kubernetes cluster, called host cluster. Each vCluster runs as a regular StatefulSet inside a namespace of the host cluster. This namespace is called host namespace. Everything that you create inside the vCluster lives either inside the vCluster itself or inside the host namespace. -### vcluster Syncer -The vcluster uses a so-called syncer which copies the pods that are created within the vcluster to the underlying host cluster. Then, the host cluster will schedule the pod and the vcluster will keep the vcluster pod and host cluster pod in sync. - -### Host Cluster & Namespace -Every vcluster runs on top of another Kubernetes cluster, called host cluster. Each vcluster runs as a regular StatefulSet inside a namespace of the host cluster. This namespace is called host namespace. Everything that you create inside the vcluster lives either inside the vcluster itself or inside the host namespace. - -It is possible to run multiple vclusters inside the same namespace and you can even run vclusters inside another vcluster (vcluster nesting). +It is possible to run multiple vClusters inside the same namespace and you can even run vClusters inside another vCluster (vcluster nesting). ## Kubernetes Resources The core idea of virtual clusters is to provision isolated Kubernetes control planes (e.g. API servers) that run on top of "real" Kubernetes clusters. When working with the virtual cluster's API server, resources first only exist in the virtual cluster. However, some low-level Kubernetes resources need to be synchronized to the underlying cluster. ### High-Level = Purely Virtual -Generally, all Kubernetes resource objects that you create using the vcluster API server are stored in the data store of the vcluster (sqlite by default, see [external datastore](../operator/external-datastore.mdx) for more options). That applies in particular to all higher level Kubernetes resources, such as Deployments, StatefulSets, CRDs, etc. These objects only exist inside the virtual cluster and never reach the API server or data store (etcd) of the underlying host cluster. +Generally, all Kubernetes resource objects that you create using the vCluster API server are stored in the data store of the vCluster (sqlite by default, see [external datastore](../deploying-vclusters/persistence.mdx) for more options). That applies in particular to all higher level Kubernetes resources, such as Deployments, StatefulSets, CRDs, etc. These objects only exist inside the virtual cluster and never reach the API server or data store (etcd) of the underlying host cluster. ### Low-Level = Sync'd Resources -To be able to actually start containers, the vcluster synchronizes certain low-level resources (e.g. Pods, ConfigMaps mounted in Pods) to the underlying host namespace, so that the scheduler of the underlying host cluster can schedule these pods. +To be able to actually start containers, the vCluster synchronizes certain low-level resources (e.g. Pods, ConfigMaps mounted in Pods) to the underlying host namespace, so that the scheduler of the underlying host cluster can schedule these pods. ## Design Principles -vcluster has been designed following these principles: +vCluster has been designed following these principles: ### 1. Lightweight / Low-Overhead -vclusters should be as lightweight as possible to minimize resource overhead inside the underlying [host cluster](#host-cluster--namespace). +vClusters should be as lightweight as possible to minimize resource overhead inside the underlying [host cluster](#host-cluster--namespace). -**Implementation:** This is mainly achieved by bundling the vcluster inside a single Pod using k3s as a control plane. +**Implementation:** This is mainly achieved by bundling the vCluster inside a single Pod using k3s as a control plane. ### 2. No Performance Degradation -Workloads running inside a vcluster (even inside [nested vclusters](#host-cluster--namespace)) should run with the same performance as workloads which are running directly on the underlying host cluster. The computing power, the access to underlying persistent storage as well as the network performance should not be degraded at all. +Workloads running inside a vCluster (even inside [nested vClusters](#host-cluster--namespace)) should run with the same performance as workloads which are running directly on the underlying host cluster. The computing power, the access to underlying persistent storage as well as the network performance should not be degraded at all. -**Implementation:** This is mainly achieved by synchonizing pods which means that the pods are actually being scheduled and started just like regular pods of the underlying host cluster, i.e. if you run a pod inside the vcluster and you run the same pod directly on the host cluster will be exactly the same in terms of computing power, storage access and networking. +**Implementation:** This is mainly achieved by synchonizing pods which means that the pods are actually being scheduled and started just like regular pods of the underlying host cluster, i.e. if you run a pod inside the vCluster and you run the same pod directly on the host cluster will be exactly the same in terms of computing power, storage access and networking. ### 3. Reduce Requests On Host Cluster -vclusters should greatly reduce the number of requests to the Kubernetes API server of the underlying [host cluster](#host-cluster--namespace) by ensuring that all high-level resources remain in the virtual cluster only without ever reaching the underlying host cluster. +vClusters should greatly reduce the number of requests to the Kubernetes API server of the underlying [host cluster](#host-cluster--namespace) by ensuring that all high-level resources remain in the virtual cluster only without ever reaching the underlying host cluster. -**Implementation:** This is mainly achieved by using a separate API server which handles all requests to the vcluster and a separate data store which stores all objects inside the vcluster. Only the syncer synchronizes very few low-level resources to the underlying cluster which requires very few API server requests. All of this happens in an asynchronous, non-blocking fashion (as pretty much everything in Kubernetes is desgined to be). +**Implementation:** This is mainly achieved by using a separate API server which handles all requests to the vCluster and a separate data store which stores all objects inside the vCluster. Only the syncer synchronizes very few low-level resources to the underlying cluster which requires very few API server requests. All of this happens in an asynchronous, non-blocking fashion (as pretty much everything in Kubernetes is desgined to be). ### 4. Flexible & Easy Provisioning -vcluster should not make any assumptions about how it is being provisioned. Users should be able to create vclusters on top of any Kubernetes cluster without requiring the installation of any server-side component to provision the vclusters, i.e. provisioning should be possible with any client-only deployment tool (vcluster CLI, helm, kubectl, kustomize, ...). An operator or CRDs may be added to manage vclusters (e.g. using Argo to provision vclusters) but a server-side management plane should never be required for spinning up a vcluster. +vCluster should not make any assumptions about how it is being provisioned. Users should be able to create vClusters on top of any Kubernetes cluster without requiring the installation of any server-side component to provision the vClusters, i.e. provisioning should be possible with any client-only deployment tool (vcluster CLI, helm, kubectl, kustomize, ...). An operator or CRDs may be added to manage vClusters (e.g. using Argo to provision vClusters) but a server-side management plane should never be required for spinning up a vCluster. -**Implementation:** This is mainly achieved by making vcluster basically run as a simple StatefulSet + Service (see kubectl deployment method for details) which can be deployed using any Kubernetes tool. +**Implementation:** This is mainly achieved by making vCluster basically run as a simple StatefulSet + Service (see kubectl deployment method for details) which can be deployed using any Kubernetes tool. ### 5. No Admin Privileges Required -To provision a vcluster, a user should never be required to have any cluster-wide permissions. If a user has the RBAC permissions to deploy a simple web application to a namespace, they should also be able to deploy vclusters to this namespace. +To provision a vCluster, a user should never be required to have any cluster-wide permissions. If a user has the RBAC permissions to deploy a simple web application to a namespace, they should also be able to deploy vClusters to this namespace. -**Implementation:** This is mainly achieved by making vcluster basically run as a simple StatefulSet + Service (see kubectl deployment method for details) which typically every user has the privilege to run if they have any Kubernetes access at all. +**Implementation:** This is mainly achieved by making vCluster basically run as a simple StatefulSet + Service (see kubectl deployment method for details) which typically every user has the privilege to run if they have any Kubernetes access at all. ### 6. Single Namespace Encapsulation -Each vcluster and all the workloads and data inside the vcluster should be encapsulated into a single namespace. Even if the vcluster has hundreds of namespaces, in the underlying [host cluster](#host-cluster--namespace), everything will be encapsulated into a single [host namespace](#host-cluster--namespace). +Each vCluster and all the workloads and data inside the vCluster should be encapsulated into a single namespace. Even if the vCluster has hundreds of namespaces, in the underlying [host cluster](#host-cluster--namespace), everything will be encapsulated into a single [host namespace](#host-cluster--namespace). -**Implementation:** This is mainly achieved by using a separate API server and data store and by the design of the syncer which synchronizes everything to a single underlying host namespace while renaming resources during the sync to prevent naming conflicts when mapping from multiple namespaces inside the vcluster to a single namespace in the host cluster. +**Implementation:** This is mainly achieved by using a separate API server and data store and by the design of the syncer which synchronizes everything to a single underlying host namespace while renaming resources during the sync to prevent naming conflicts when mapping from multiple namespaces inside the vCluster to a single namespace in the host cluster. ### 7. Easy Cleanup -vclusters should not have any hard wiring with the underlying cluster. Deleting a vcluster or merely deleting the vcluster's [host namespace](#host-cluster--namespace) should always be possible without any negative impacts on the underlying cluster (no namespaces stuck in terminating state or anything comparable) and should always guarantee that all vcluster-related resources are being deleted cleanly and immediately without leaving any orphan resources behind. +vClusters should not have any hard wiring with the underlying cluster. Deleting a vCluster or merely deleting the vCluster's [host namespace](#host-cluster--namespace) should always be possible without any negative impacts on the underlying cluster (no namespaces stuck in terminating state or anything comparable) and should always guarantee that all vCluster-related resources are being deleted cleanly and immediately without leaving any orphan resources behind. -**Implementation:** This is mainly achieved by not adding any control plane or server-side elements to the provisioning of vclusters. A vcluster is just a StatefulSet and few other Kubernetes resources. All synchronized resources in the host namespace have an appropriate owner reference, that means if you delete the vcluster itself, everything that belongs to the vcluster will be automatically deleted by Kubernetes as well (this is a similar mechanism as Deployments and StatefulSets use to clean up their Pods). +**Implementation:** This is mainly achieved by not adding any control plane or server-side elements to the provisioning of vClusters. A vCluster is just a StatefulSet and few other Kubernetes resources. All synchronized resources in the host namespace have an appropriate owner reference, that means if you delete the vCluster itself, everything that belongs to the vCluster will be automatically deleted by Kubernetes as well (this is a similar mechanism as Deployments and StatefulSets use to clean up their Pods). diff --git a/docs/pages/architecture/scheduling.mdx b/docs/pages/architecture/scheduling.mdx index f3b9921ce..e0dc3a27e 100644 --- a/docs/pages/architecture/scheduling.mdx +++ b/docs/pages/architecture/scheduling.mdx @@ -8,20 +8,20 @@ sidebar_label: Pod Scheduling
vcluster - Pod Scheduling
-Vcluster runs your workloads by replicating pods from the virtual cluster to the host cluster. We call this process synchronization or sync for short. This process is executed by the "syncer" component of the vcluster. -To control how vcluster pods are scheduled on the host cluster, you may need to pass additional arguments to the syncer, or set certain helm chart values during vcluster installation and upgrade. Some of these options are described in the chapters below. +Vcluster runs your workloads by replicating pods from the virtual cluster to the host cluster. We call this process synchronization or sync for short. This process is executed by the "syncer" component of the vCluster. +To control how vCluster pods are scheduled on the host cluster, you may need to pass additional arguments to the syncer, or set certain helm chart values during vCluster installation and upgrade. Some of these options are described in the chapters below. -## Separate vcluster Scheduler +## Separate vCluster Scheduler -By default, vcluster will reuse the scheduler of the host cluster to schedule workloads. This saves computing resources, but also has some limitations: +By default, vCluster will reuse the scheduler of the host cluster to schedule workloads. This saves computing resources, but also has some limitations: 1. Labeling nodes inside the virtual cluster has no effect on scheduling 2. Draining or tainting nodes inside the virtual cluster has no effect on scheduling -3. You cannot use custom schedulers inside the vcluster +3. You cannot use custom schedulers inside the vCluster -Sometimes you want to label a node inside the vcluster to modify workload scheduling through features such as [affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) or [topology spreading](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). vcluster supports running a scheduler inside the virtual cluster instead of reusing the host cluster's scheduler. -vcluster will then only sync pods that already have a node assigned to the host cluster. +Sometimes you want to label a node inside the vCluster to modify workload scheduling through features such as [affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) or [topology spreading](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). vCluster supports running a scheduler inside the virtual cluster instead of reusing the host cluster's scheduler. +vCluster will then only sync pods that already have a node assigned to the host cluster. -You can enable the virtual scheduler via the `values.yaml` of vcluster: +You can enable the virtual scheduler via the `values.yaml` of vCluster: ```yaml sync: nodes: @@ -31,7 +31,7 @@ sync: syncAllNodes: true ``` -Then create or upgrade a vcluster with: +Then create or upgrade a vCluster with: ``` vcluster create my-vcluster -f values.yaml ``` @@ -44,7 +44,7 @@ If the `persistentvolumeclaims` syncer is also enabled, relevant `csistoragecapa ## Reuse Host Scheduler -If you don't want to use a separate scheduler inside the vcluster, you can also customize to a certain degree how the host scheduler will schedule your virtual cluster workloads. +If you don't want to use a separate scheduler inside the vCluster, you can also customize to a certain degree how the host scheduler will schedule your virtual cluster workloads. ### Using priority classes @@ -54,24 +54,24 @@ sync: priorityclasses: enabled: true ``` -then create or upgrade the vcluster with: +then create or upgrade the vCluster with: ``` vcluster create my-vcluster --upgrade -f values.yaml ``` -This will pass the necessary flags to the "syncer" container and create or update the ClusterRole used by vcluster to include necessary permissions. +This will pass the necessary flags to the "syncer" container and create or update the ClusterRole used by vCluster to include necessary permissions. ### Limiting pod scheduling to selected nodes -Vcluster allows you to limit on which nodes the pods synced by vcluster will run. +Vcluster allows you to limit on which nodes the pods synced by vCluster will run. You can achieve this by combining `--node-selector` and `--enforce-node-selector` syncer flags. The `--enforce-node-selector` flag is enabled by default. When `--enforce-node-selector` flag is disabled, and a `--node-selector` is specified nodes will be synced based on the selector, as well as nodes running pod workloads. -When using vcluster helm chart or CLI, there are two options for setting the `--node-selector` flag. +When using vCluster helm chart or CLI, there are two options for setting the `--node-selector` flag. This first option is recommended if you are not enabling node synchronization, and use [the fake nodes](./nodes.mdx), which are enabled by default. In such case, you would write a string representation of your node selector(e.g. "nodeLabel=labelValue") and set it as the value of `--node-selector` argument for syncer in your `values.yaml`: ``` @@ -79,7 +79,7 @@ syncer: extraArgs: - --node-selector=nodeLabel=labelValue ``` -then create or upgrade the vcluster with: +then create or upgrade the vCluster with: ``` vcluster create my-vcluster --upgrade -f values.yaml @@ -92,20 +92,20 @@ sync: enabled: true nodeSelector: "nodeLabel=labelValue" ``` -then create or upgrade the vcluster with: +then create or upgrade the vCluster with: ``` vcluster create my-vcluster --upgrade -f values.yaml ``` :::info -When sync of the real nodes is enabled and nodeSelector is set, all nodes that match the selector will be synced into vcluster. Read more about Node sync modes on the [Nodes documentation page](./nodes.mdx). +When sync of the real nodes is enabled and nodeSelector is set, all nodes that match the selector will be synced into vCluster. Read more about Node sync modes on the [Nodes documentation page](./nodes.mdx). ::: -### Automatically applying tolerations to all pods synced by vcluster +### Automatically applying tolerations to all pods synced by vCluster -Kubernetes has a concept of [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), which is used for controlling scheduling. If you have a use case requiring all pods synced by vcluster to have a toleration set automatically, then you can achieve this with the `--enforce-toleration` syncer flag. You can pass multiple `--enforce-toleration` flags with different toleration expressions, and syncer will add them to every new pod that gets synced by vcluster. +Kubernetes has a concept of [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), which is used for controlling scheduling. If you have a use case requiring all pods synced by vCluster to have a toleration set automatically, then you can achieve this with the `--enforce-toleration` syncer flag. You can pass multiple `--enforce-toleration` flags with different toleration expressions, and syncer will add them to every new pod that gets synced by vCluster. This is how toleration is set in yaml format: ``` @@ -130,6 +130,6 @@ syncer: ``` :::info -vcluster does not support setting the `tolerationSeconds` field of a toleration through the syntax that the `--enforce-toleration` flag uses. If your use case requires this, please raise an issue in [the vcluster repo on GitHub](https://github.com/loft-sh/vcluster/issues). +vCluster does not support setting the `tolerationSeconds` field of a toleration through the syntax that the `--enforce-toleration` flag uses. If your use case requires this, please raise an issue in [the vCluster repo on GitHub](https://github.com/loft-sh/vcluster/issues). ::: diff --git a/docs/pages/architecture/synced-resources.mdx b/docs/pages/architecture/synced-resources.mdx deleted file mode 100644 index 1809207a1..000000000 --- a/docs/pages/architecture/synced-resources.mdx +++ /dev/null @@ -1,328 +0,0 @@ ---- -title: Synced Resources -sidebar_label: Synced Resources ---- - -This section lists all resources that can be synced or mirrored by vcluster currently in the table below. Those resources can be activated or deactivated via the `values.yaml` as described below, or with the `--sync` flag of the syncer. By default, certain resources are already activated and you can either disable the default resources or tell vcluster to sync other supported resources as well. - -| Resource | Description | Default Enabled | -| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| services | Mirrors services between host and virtual cluster | Yes | -| endpoints | Mirrors endpoints between host and virtual cluster | Yes | -| configmaps | Mirrors used configmaps by pods between host and virtual cluster | Yes | -| secrets | Mirrors used secrets by ingresses or pods between host and virtual cluster | Yes | -| events | Syncs events from host cluster to virtual cluster | Yes | -| pods | Mirrors pods between host and virtual cluster | Yes | -| persistentvolumeclaims | Mirrors persistent volume claims between host and virtual cluster | Yes | -| fake-nodes | Creates fake nodes based on spec.nodeName fields of synced pods. Requires no cluster role | Yes | -| fake-persistentvolumes | Creates fake persistent volumes based on spec.volumeName of persistent volume claims. Requires no cluster role | Yes | -| ingresses | Mirrors ingresses between host and virtual cluster. Automatically tries to detect the supported ingress version (networking.k8s.io/v1 or networking.k8s.io/v1beta1) | No | -| ingressclasses | Syncs IngressClasses from host cluster to virtual cluster. This is automatically enabled when Ingresses sync is enabled. | No _*_ | -| nodes | Syncs real nodes from host cluster to virtual cluster. If enabled, implies that fake-nodes is disabled. For more information see [nodes](./nodes.mdx). | No | -| persistentvolumes | Mirrors persistent volumes from vcluster to host cluster and dynamically created persistent volumes from host cluster to virtual cluster. If enabled, implies that fake-persistentvolumes is disabled. For more information see [storage](./storage.mdx). | No | -| storageclasses | Syncs created storage classes from virtual cluster to host cluster | No | -| hoststorageclasses | Syncs real storage classes from host cluster to virtual cluster. This is only needed if you require to be able to get/list StorageClasses from vcluster API server. Host storage classes can be used in PersistentVolumes and PersistentVolumeClaims without syncing them to the virtual cluster. This option was formerly named "legacy-storageclasses". | No | -| priorityclasses | Syncs created priority classes from virtual cluster to host cluster | No | -| networkpolicies | Syncs created network policies from virtual cluster to host cluster | No | -| volumesnapshots | Enables volumesnapshot, volumesnapshotcontents and volumesnapshotclasses support. Syncing behaves similar to persistentvolumeclaims, persistentvolumes and storage classes. For more information see [storage](./storage.mdx). | No | -| poddisruptionbudgets | Syncs created poddisruptionbudgets from virtual cluster to host cluster | No | -| serviceaccounts | Syncs created service accounts from virtual cluster to host cluster. This is useful for using [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) with vcluster | No | -| csidrivers | Mirrors CSIDriver objects from host cluster to vcluster. Enabled automatically when [virtual scheduler](./scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | -| csinodes | Mirrors CSINode objects from host cluster to vcluster. Enabled automatically when [virtual scheduler](./scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | -| csistoragecapacities | Mirrors CSIStorageCapacity Objects from host cluster to vcluster if the .nodeTopology matches a synced node. Enabled automatically when [virtual scheduler](./scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | - -_\* refer to the description column for claryfying information about default behavior._ - -By default, vcluster runs with a minimal set of RBAC permissions to allow execution in restricted environments. Certain resources require extra permissions, which will be automatically given to the vcluster ServiceAccount if you enable the resource sync with the associated helm value. - -## Enable or disable synced resources - -To enable a resource syncronization, for example persistent volumes, and automatically create the necessary RBAC permissions, add the following to your `values.yaml`: -``` -sync: - persistentvolumes: - enabled: true -``` -then create or upgrade the vcluster with: - -``` -vcluster create my-vcluster --upgrade -f values.yaml -``` - -To disable a resource that is synced by default, for example if you don't want to sync services, set the following in your `values.yaml`: -``` -sync: - services: - enabled: false -``` -then create or upgrade the vcluster with: - -``` -vcluster create my-vcluster --upgrade -f values.yaml -``` - -:::warning Correct Cluster Functionality -Disabling certain resources such as services, endpoints or pods can lead to a non-functional virtual Kubernetes cluster, so be careful with what resources you are deactivating. -::: - -## Sync all Secrets and Configmaps -With the new generic sync, vcluster currently only knows about a couple of resources that actually use secrets / configmaps and will try to sync only those into the host cluster, but this allows syncing of all secrets and configmaps to avoid the problem that needed secrets / configmaps are not synced to the host cluster. -To enable this, simply add the following values to the helm chart / vcluster cli options: -```yaml -sync: - secrets: - all: true - configmaps: - all: true -``` - -## Extra Pod Options - -By default [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) and [readiness gates](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate) will not be synced by vcluster, as they require additional permissions. To enable those, please activate those within your values.yaml: - -``` -sync: - pods: - enabled: true - # Sync ephemeralContainers to host cluster - ephemeralContainers: true - # Sync readiness gates to host cluster - status: true -``` - -## Sync other resources - -Syncing other resources such as deployments, statefulsets and namespaces is usually not needed as those just control lower level resources and since those lower level resources are synced the cluster can function correctly. - -However, there might be cases though where custom syncing of resources might be needed or beneficial. In order to accomplish this, vcluster provides an [SDK](https://github.com/loft-sh/vcluster-sdk) to develop your own resource syncers as plugins. To find out more, please take a look at the [plugins documentation](../plugins/overview.mdx). - -### Generic sync -Besides the plugins, vcluster provides a way to define additional resources that should be synced in a generic and declarative way with just a few lines of a YAML configuration. This feature is a successor to the [vcluster-generic-crd-sync-plugin](https://github.com/loft-sh/vcluster-generic-crd-sync-plugin) project and is included since v0.14.0 release. The full range of the generic sync features is available only in the vcluster created in the "multi-namespace mode", see the ["Multi-namespace mode" chapter](#multi-namespace-mode) for details. - -You will need to declare which CRD Kinds you would like to sync from the virtual cluster to the host cluster, or vice versa, and the vcluster will automatically copy the CRD definition from the host cluster into vcluster at the start. Then it will take care of watching the resources of the predefined Kinds and execute the synchronization logic based on the configuration provided to it. The vcluster may automatically transform the resource metadata(such as name, namespace, labels, etc.) as is common for resources synced by vcluster. In addition to the implicit metadata transformations, you can configure transformations that will be performed on other fields of the resource, and these will depend on the meaning of those fields. You may also declare which fields will be copied in the opposite direction, from the synced resource to the original one. -Many controllers create Kubernetes resources as a result of custom resources, for example, cert-manager creates Secrets based on Certificate custom resources, and this feature will allow you to sync these resources from the host cluster into the virtual one. The following chapters describe the configuration options in more detail. - -:::tip -You may find configuration examples in the ["generic-sync-examples" folder in the vcluster repo](https://github.com/loft-sh/vcluster/tree/main/generic-sync-examples). -::: - -:::warning Alpha feature -Generic sync feature is currently in an alpha state. This is an advanced feature that requires more permissions in the host cluster, and as a result, it can potentially cause significant disruption in the host cluster. -::: - -#### Configuration syntax -The helm values snippet below shows an example of the generic sync configuration and related RBAC roles. There you can notice some key fields nested under `.sync.generic` value: -- the RBAC namespaced `role` and cluster scoped `clusterRole` required for the plugin - these would be adjusted to fit the needs of your use case and the configuration that you define. Note that when the ["Multi-namespace mode"](#multi-namespace-mode) is used, the namespaced role will become ClusterRole. -- the `config` field, which will populate the `CONFIG` environment variable of the vcluster syncer container - this must be a string with valid YAML formatting. It uses a custom syntax to define the behavior of the plugin. - -```yaml -sync: - generic: - clusterRole: - extraRules: - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "list", "watch"] - role: - extraRules: - # Example for Cert Manager - - apiGroups: ["cert-manager.io"] - resources: ["issuers", "certificates", "certificaterequests"] - verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] - config: |- - version: v1beta1 - export: - # ... -``` - - -#### Virtual to Host sync -We use the top-level `export` field in the configuration to declare which virtual resources we want to sync to the host cluster. Each item in the `export` array defines the resource via `apiVersion` and `kind` strings. Each `apiVersion` and `kind` pair can have only one entry in the `export` array. The `patches` field allows you to define how are certain fields of the synced resource modified before its creation(or update) in the host cluster. -The `reversePatches` field allows you to declare how changes to certain fields(implicitly this is done for the `status`) of the synced resource(the one created in the host cluster) are propagated back to the original resource in the virtual cluster. Besides the status, only the fields referenced in the `copyFromObject` reverse patch operations are propagated. -Both these fields follow the same syntax, as documented in [the "Patch syntax" chapter of this doc](#patch-syntax). - - -Example: -```yaml -sync: - generic: - config: |- - version: v1beta1 - export: - - apiVersion: cert-manager.io/v1 - kind: Certificate - patches: - - op: rewriteName - path: spec.issuerRef.name - - op: rewriteName - path: spec.secretName - reversePatches: - # Implicit reverse patch for status would be declared like so: - # - op: copyFromObject - # fromPath: status - # path: status -``` - -:::info -Only the namespaced resources are supported at this time. -::: - -**Selector for a generic Virtual to Host sync** -You can limit which resources will be synced from the virtual cluster by configuring the `selector` array. The virtual resource is synced when it matches one or more selectors, or when the `selector` field is empty. Supported selector types are: -`labelSelector` - the `key: value` map of the resource labels. All of the defined label key and values should match on the resource in the virtual cluster to be synced. Example: -```yaml -sync: - generic: - config: |- - version: v1beta1 - export: - - apiVersion: cert-manager.io/v1 - kind: Certificate - selector: - labelSelector: - "label-key": "label-value" -``` - - -#### Host to Virtual sync -We use the top-level `import` field in the configuration to declare which host resources we want to sync to the virtual cluster. Each item in the `import` array defines the resource via `apiVersion` and `kind` strings. Each `apiVersion` and `kind` pair can have only one entry in the `import` array. The `patches` field allows you to define how are certain fields of the synced resource modified before its creation(or update) in the virtual cluster. -The `reversePatches` field allows you to declare how changes to certain fields of the synced resource(in this case, the one created in the virtual cluster) are propagated back to the original resource in the host cluster. Only the fields referenced in the `copyFromObject` reverse patch operations are propagated. -Both these fields follow the same syntax, as documented in [the "Patch syntax" chapter of this doc](#patch-syntax). - - -Example: -```yaml -sync: - generic: - config: |- - version: v1beta1 - import: - - kind: Secret - apiVersion: v1 - - kind: IngressClass - apiVersion: networking.k8s.io/v1 -``` - -:::info -The sync from Host to Virtual cluster is supported only in ["Multi-namespace mode"](#multi-namespace-mode) -::: - -#### Patch syntax -The patch defines how will the vcluster behave when syncing each resource to and from the host cluster. Generally, a patch is defined by the field `path` and `op`(operation) that should be performed on said field. -An array of `conditions` may also be set, and in such case, the field value will be modified by a patch only if the field value matches all the [conditions](#patch-conditions). -Some operation types may utilize additional fields, and these will be explained in the next chapter. - - - -**Patch operations** - -| op | Support | Description | -| -------------------------------------- | :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| copyFromObject | all | Copy value of the field referenced in the `fromPath` from the originating object to the `path` field of the destination object. The `fromPath` can be omitted, in such case, it will default to the same field path as referenced in the `path`. | -| add | all | Add contents of the `value` into the `path` field. The `value` can be either scalar or a complex object. | -| replace | all | Replace the contents of the `path` field with the contents of the `value`. The `value` can be either scalar or a complex object. | -| remove | all | Remove the contents of the `path` field | -| rewriteName | V->H | Replaces the contents of the `path` field with transformed content based on the namespace of the synced resource. This is typically done on the fields that refer to a resource name, and on the `.metadata.name` as well(implicit). This is done to avoid naming collisions when syncing resources to the host cluster, but it is not necessary when using the ["Multi-namespace mode"](#multi-namespace-mode).
As an example, the "logstash" value of a resource in the "logging" namespace of the vcluster named "vc" is rewritten to "logstash-x-logging-x-vc". If the resulting length of the value would be over 63 characters, the last 10 characters will be replaced with a hash of the full value. | -| rewriteName + namePath + namespacePath | V->H | Similar to `rewriteName`, but with an addition of the `namePath` and/or `namespacePath`. This is used when a field of the synced resource is referencing a different resource via namespace and name via two separate fields. When using this option you would set the `path` to reference a field that is a common parent of both `namePath` and `namespacePath`, and these two fields would then contain just the relative path. For example, `path: spec.includes` + `namePath: name` + `namespacePath: namespace` for a resource that contains name in `spec.includes.name` and namespace in `spec.includes.namespace`. | -| rewriteName + regex | V->H | Similar to `rewriteName`, but with an addition of the `regex` option for the patch. This is used when a string contains not just the resource name, but optionally a namespace, and other characters. For example, a string containing "namespace/name" can be correctly rewritten with the addition of this configuration option - `regex: "$NAMESPACE/$NAME"`. The vcluster uses Go regular expressions to recognize the name part with the "NAME" capture group (can be written as `$NAME`), and the namespace with the "NAMESPACE" capture group (can be written as `$NAMESPACE`). | -| rewriteLabelKey | V->H | The keys of the `.metadata.labels` of the synced resources are rewritten by vcluster and plugins. This patch type allows you to rewrite the key references in the same way, so the fields that are referencing labels will still reference correct labels in their rewritten form. For example, the label key-value pair "app: curl" is rewritten to "vcluster.loft.sh/label-vcluster-x-a172cedcae: curl", so with this patch operation you can rewrite a field that contains "app" to "vcluster.loft.sh/label-vcluster-x-a172cedcae, and the controllers operating on the synced resources will work with this label just as expected.
This is not necessary when using the ["Multi-namespace mode"].(#multi-namespace-mode). | -| rewriteLabelSelector | V->H | This operation exists for the same reasons as described for the rewriteLabelKey operation. It is intended to be used for the key-value map fields that represent a label selector. This patch operation will rewrite all keys in the field referenced by `path` to the expected format for the label keys, and it will also add additional key-value pairs(with virtual namespace and vcluster name) to avoid naming conflicts.
This is not necessary when using the ["Multi-namespace mode"]. | -| rewriteLabelExpressionsSelector | V->H | Similar to the `rewriteLabelSelector`, but expects `path` reference a field with the `matchLabels` and `matchExpressions` sub-fields, which will have the label keys rewritten just as described for `rewriteLabelKey`.
This is not necessary when using the ["Multi-namespace mode"]. | - - -_V->H - patch operation is supported only for patches, or reverse patches, that are executed in the virtual to host direction._ - - - -**Example of various patch operations** -The example below is a useful configuration for pushing service metrics into a Prometheus instance installed in the host cluster. -You can refer to [ServiceMonitor spec documentation](https://prometheus-operator.dev/docs/operator/api/#monitoring.coreos.com/v1.ServiceMonitorSpec) to understand the example better. -```yaml -sync: - generic: - config: |- - version: v1beta1 - export: - - apiVersion: monitoring.coreos.com/v1 - kind: ServiceMonitor - patches: - - op: add - path: .metadata.labels - value: - prometheus-instance: default-instance - - op: copyFromObject - fromPath: .metadata.labels['prometheus-instance'] # could be omitted when equal to path - path: .metadata.labels['prometheus-instance'] - - op: replace - path: .spec.namespaceSelector - value: - any: false - matchNames: [] - - op: rewriteName - path: .spec.endpoints[*] - - op: rewriteLabelKey - path: .spec.jobLabel - - op: rewriteLabelKey - path: .spec.targetLabels[*] - - op: rewriteLabelKey - path: .spec.podTargetLabels[*] - - op: rewriteLabelExpressionsSelector - path: .spec.selector - # A regex example, e.g. if we have a field in "namespace/name" string - - op: rewriteName - path: .metadata.annotations['custom-one-namespace-slash-name'] - regex: > - ^$NAMESPACE/$NAME$ -``` - -**Patch conditions** -A patch can be applied conditionally by populating the `conditions` field of the patch definition. The condition will be checked either on a field referenced relative to the patch `path` via the `subPath` field of the condition or referenced as an absolute path in the synced resource via the `path` field of the condition. -A condition can have one of these three fields set to check the value respectively: -- `equal` - either a scalar value or a complex object can be used -- `notEqual` - either a scalar value or a complex object can be used -- `empty` - a boolean (true or false) value -Examples: -```yaml -... -patches: - - op: add - path: .metadata.labels - value: - prometheus-instance: default-instance - conditions: - - path: .metadata.labels['prometheus-instance'] - equal: "forbidden-instance" - - op: copyFromObject - fromPath: .metadata.labels['prometheus-instance'] # could be omitted when equal to path - path: .metadata.labels['prometheus-instance'] - conditions: - - subPath: "." - notEqual: "forbidden-instance" - - op: add - path: .metadata.labels - value: - mandatory: setIt - conditions: - - path: .metadata.labels['mandatory'] - empty: true - -``` - -#### More Examples -A list of sample configurations can be found here - [vcluster generic-sync-examples](https://github.com/loft-sh/vcluster/tree/main/generic-sync-examples) - -## Multi-namespace mode -In this mode vcluster diverges from the [architecture described previously](./basics.mdx). By default, all namespaced resources that need to be synced to the host cluster are created in the namespace where vcluster is installed. But in multi-namespace mode vcluster will create a namespace in the host cluster for each namespace in the virtual cluster. The namespace name is modified to avoid conflicts between multiple vcluster instances in the same host, but the synced namespaced resources are created with the same name as in the virtual cluster. To enable this mode use the following helm value: - -```yaml -multiNamespaceMode: - enabled: true -``` - -:::warning This mode must be enabled during vcluster creation. -Enabling, or disabling, it on an existing vcluster instance will force it into an inconsistent state. -::: - -:::warning Alpha feature -Multi-namespace mode is currently in an alpha state. This is an advanced feature that requires more permissions in the host cluster, and as a result, it can potentially cause significant disruption in the host cluster. -::: diff --git a/docs/pages/architecture/syncer/single_vs_multins.mdx b/docs/pages/architecture/syncer/single_vs_multins.mdx new file mode 100644 index 000000000..65937fcc0 --- /dev/null +++ b/docs/pages/architecture/syncer/single_vs_multins.mdx @@ -0,0 +1,24 @@ +--- +title: Single vs Multi-Namespace Sync +sidebar_label: Single vs Multi-Namespace Sync +--- + +
+ vcluster Multi-Namespace Architecture +
vcluster Multi-Namespace Architecture
+
+ +In this mode vCluster diverges from the [architecture described previously](../overview.mdx). By default, all namespaced resources that need to be synced to the host cluster are created in the namespace where vCluster is installed. But in multi-namespace mode vCluster will create a namespace in the host cluster for each namespace in the virtual cluster. The namespace name is modified to avoid conflicts between multiple vCluster instances in the same host, but the synced namespaced resources are created with the same name as in the virtual cluster. To enable this mode use the following helm value: + +```yaml +multiNamespaceMode: + enabled: true +``` + +:::warning This mode must be enabled during vCluster creation. +Enabling, or disabling, it on an existing vCluster instance will force it into an inconsistent state. +::: + +:::warning Alpha feature +Multi-namespace mode is currently in an alpha state. This is an advanced feature that requires more permissions in the host cluster, and as a result, it can potentially cause significant disruption in the host cluster. +::: \ No newline at end of file diff --git a/docs/pages/architecture/syncer/syncer.mdx b/docs/pages/architecture/syncer/syncer.mdx new file mode 100644 index 000000000..adb0d0abc --- /dev/null +++ b/docs/pages/architecture/syncer/syncer.mdx @@ -0,0 +1,6 @@ +--- +title: What is the Syncer? +sidebar_label: What is the Syncer? +--- + +What makes a vCluster virtual is the fact that it does not have actual worker nodes or network. Instead, it uses a so-called syncer which copies the pods that are created within the vCluster to the underlying host cluster. Then, the host cluster will actually schedule the pod and the vCluster will keep the vCluster pod and host cluster pod in sync. \ No newline at end of file diff --git a/docs/pages/cli.md b/docs/pages/cli.md new file mode 100644 index 000000000..80838ad1f --- /dev/null +++ b/docs/pages/cli.md @@ -0,0 +1,31 @@ +--- +title: "vcluster --help" +sidebar_label: vcluster +--- + +## vcluster + +Welcome to vcluster! + +### Synopsis + +vcluster root command + +### Options + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + -h, --help help for vcluster + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + +``` + +``` + + +## Flags +## Global & Inherited Flags \ No newline at end of file diff --git a/docs/pages/cli/vcluster_connect.md b/docs/pages/cli/vcluster_connect.md new file mode 100644 index 000000000..16180cb25 --- /dev/null +++ b/docs/pages/cli/vcluster_connect.md @@ -0,0 +1,61 @@ +--- +title: "vcluster connect --help" +sidebar_label: vcluster connect +--- + + +Connect to a virtual cluster + +## Synopsis + + +``` +vcluster connect VCLUSTER_NAME [flags] +``` + +``` +####################################################### +################## vcluster connect ################### +####################################################### +Connect to a virtual cluster + +Example: +vcluster connect test --namespace test +# Open a new bash with the vcluster KUBECONFIG defined +vcluster connect test -n test -- bash +vcluster connect test -n test -- kubectl get ns +####################################################### +``` + + +## Flags + +``` + --address string The local address to start port forwarding under + --background-proxy If specified, vcluster will create the background proxy in docker [its mainly used for vclusters with no nodeport service.] + --cluster-role string If specified, vcluster will create the service account if it does not exist and also add a cluster role binding for the given cluster role to it. Requires --service-account to be set + -h, --help help for connect + --insecure If specified, vcluster will create the kube config with insecure-skip-tls-verify + --kube-config string Writes the created kube config to this file (default "./kubeconfig.yaml") + --kube-config-context-name string If set, will override the context name of the generated virtual cluster kube config with this name + --local-port int The local port to forward the virtual cluster to. If empty, vcluster will use a random unused port + --pod string The pod to connect to + --print When enabled prints the context to stdout + --project string [PRO] The pro project the vcluster is in + --server string The server to connect to + --service-account string If specified, vcluster will create a service account token to connect to the virtual cluster instead of using the default client cert / key. Service account must exist and can be used as namespace/name. + --token-expiration int If specified, vcluster will create the service account token for the given duration in seconds. Defaults to eternal + --update-current If true updates the current kube config (default true) +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_create.md b/docs/pages/cli/vcluster_create.md new file mode 100644 index 000000000..bfa33b98c --- /dev/null +++ b/docs/pages/cli/vcluster_create.md @@ -0,0 +1,67 @@ +--- +title: "vcluster create --help" +sidebar_label: vcluster create +--- + + +Create a new virtual cluster + +## Synopsis + + +``` +vcluster create VCLUSTER_NAME [flags] +``` + +``` +####################################################### +################### vcluster create ################### +####################################################### +Creates a new virtual cluster + +Example: +vcluster create test --namespace test +####################################################### +``` + + +## Flags + +``` + --chart-name string The virtual cluster chart name to use (default "vcluster") + --chart-repo string The virtual cluster chart repo to use (default "https://charts.loft.sh") + --chart-version string The virtual cluster chart version to use (e.g. v0.9.1) + --cluster string [PRO] The vCluster.Pro connected cluster to use + --connect If true will run vcluster connect directly after the vcluster was created (default true) + --create-namespace If true the namespace will be created if it does not exist (default true) + --disable-pro If true vcluster will not try to create a vCluster.Pro. You can also use 'vcluster logout' to prevent vCluster from creating any pro clusters + --distro string Kubernetes distro to use for the virtual cluster. Allowed distros: k3s, k0s, k8s, eks (default "k3s") + --expose If true will create a load balancer service to expose the vcluster endpoint + --extra-values strings DEPRECATED: use --values instead + -h, --help help for create + --isolate If true vcluster and its workloads will run in an isolated environment + --kube-config-context-name string If set, will override the context name of the generated virtual cluster kube config with this name + --kubernetes-version string The kubernetes version to use (e.g. v1.20). Patch versions are not supported + --link stringArray [PRO] A link to add to the vCluster. E.g. --link 'prod=http://exampleprod.com' + --params string [PRO] If a template is used, this can be used to use a file for the parameters. E.g. --params path/to/my/file.yaml + --project string [PRO] The vCluster.Pro project to use + --set stringArray Set values for helm. E.g. --set 'persistence.enabled=true' + --set-param stringArray [PRO] If a template is used, this can be used to set a specific parameter. E.g. --set-param 'my-param=my-value' + --template string [PRO] The vCluster.Pro template to use + --template-version string [PRO] The vCluster.Pro template version to use + --update-current If true updates the current kube config (default true) + --upgrade If true will try to upgrade the vcluster instead of failing if it already exists + -f, --values stringArray Path where to load extra helm values from +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_delete.md b/docs/pages/cli/vcluster_delete.md new file mode 100644 index 000000000..b3e595bf7 --- /dev/null +++ b/docs/pages/cli/vcluster_delete.md @@ -0,0 +1,49 @@ +--- +title: "vcluster delete --help" +sidebar_label: vcluster delete +--- + + +Deletes a virtual cluster + +## Synopsis + + +``` +vcluster delete VCLUSTER_NAME [flags] +``` + +``` +####################################################### +################### vcluster delete ################### +####################################################### +Deletes a virtual cluster + +Example: +vcluster delete test --namespace test +####################################################### +``` + + +## Flags + +``` + --auto-delete-namespace If enabled, vcluster will delete the namespace of the vcluster if it was created by vclusterctl. In the case of multi-namespace mode, will also delete all other namespaces created by vcluster (default true) + --delete-namespace If enabled, vcluster will delete the namespace of the vcluster. In the case of multi-namespace mode, will also delete all other namespaces created by vcluster + -h, --help help for delete + --keep-pvc If enabled, vcluster will not delete the persistent volume claim of the vcluster + --project string [PRO] The pro project the vcluster is in + --wait If enabled, vcluster will wait until the vcluster is deleted (default true) +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_disconnect.md b/docs/pages/cli/vcluster_disconnect.md new file mode 100644 index 000000000..dc8496469 --- /dev/null +++ b/docs/pages/cli/vcluster_disconnect.md @@ -0,0 +1,46 @@ +--- +title: "vcluster disconnect --help" +sidebar_label: vcluster disconnect +--- + + +Disconnects from a virtual cluster + +## Synopsis + + +``` +vcluster disconnect [flags] +``` + +``` +####################################################### +################# vcluster disconnect ################# +####################################################### +Disconnect switches back the kube context if +vcluster connect --update-current was used + +Example: +vcluster connect --update-current +vcluster disconnect +####################################################### +``` + + +## Flags + +``` + -h, --help help for disconnect +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_get.md b/docs/pages/cli/vcluster_get.md new file mode 100644 index 000000000..9f0801a9f --- /dev/null +++ b/docs/pages/cli/vcluster_get.md @@ -0,0 +1,35 @@ +--- +title: "vcluster get --help" +sidebar_label: vcluster get +--- + + +Gets cluster related information + +## Synopsis + + +``` +####################################################### +#################### vcluster get ##################### +####################################################### +``` + + +## Flags + +``` + -h, --help help for get +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_get_service-cidr.md b/docs/pages/cli/vcluster_get_service-cidr.md new file mode 100644 index 000000000..277fe2706 --- /dev/null +++ b/docs/pages/cli/vcluster_get_service-cidr.md @@ -0,0 +1,45 @@ +--- +title: "vcluster get service-cidr --help" +sidebar_label: vcluster get service-cidr +--- + + +Prints Service CIDR of the cluster + +## Synopsis + + +``` +vcluster get service-cidr [flags] +``` + +``` +####################################################### +############### vcluster get service-cidr ############ +####################################################### +Prints Service CIDR of the cluster + +Ex: +vcluster get service-cidr +10.96.0.0/12 +####################################################### +``` + + +## Flags + +``` + -h, --help help for service-cidr +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_import.md b/docs/pages/cli/vcluster_import.md new file mode 100644 index 000000000..6c694f74d --- /dev/null +++ b/docs/pages/cli/vcluster_import.md @@ -0,0 +1,48 @@ +--- +title: "vcluster import --help" +sidebar_label: vcluster import +--- + + +Imports a vcluster into a vCluster.Pro project + +## Synopsis + +``` +vcluster import VCLUSTER_NAME [flags] +``` + +``` +######################################################## +################### vcluster import #################### +######################################################## +Imports a vcluster into a vCluster.Pro project. + +Example: +vcluster import my-vcluster --cluster connected-cluster \ +--namespace vcluster-my-vcluster --project my-project --importname my-vcluster +####################################################### +``` + + +## Flags + +``` + --cluster string Cluster name of the cluster the virtual cluster is running on + --disable-upgrade If true, will disable auto-upgrade of the imported vcluster to vCluster.Pro + -h, --help help for import + --importname string The name of the vcluster under projects. If unspecified, will use the vcluster name + --project string The project to import the vcluster into +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_list.md b/docs/pages/cli/vcluster_list.md new file mode 100644 index 000000000..ab86a35e0 --- /dev/null +++ b/docs/pages/cli/vcluster_list.md @@ -0,0 +1,47 @@ +--- +title: "vcluster list --help" +sidebar_label: vcluster list +--- + + +Lists all virtual clusters + +## Synopsis + + +``` +vcluster list [flags] +``` + +``` +####################################################### +#################### vcluster list #################### +####################################################### +Lists all virtual clusters + +Example: +vcluster list +vcluster list --output json +vcluster list --namespace test +####################################################### +``` + + +## Flags + +``` + -h, --help help for list + --output string Choose the format of the output. [table|json] (default "table") +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_login.md b/docs/pages/cli/vcluster_login.md new file mode 100644 index 000000000..1cb9e81d9 --- /dev/null +++ b/docs/pages/cli/vcluster_login.md @@ -0,0 +1,47 @@ +--- +title: "vcluster login --help" +sidebar_label: vcluster login +--- + + +Login to a vCluster.Pro instance + +## Synopsis + +``` +vcluster login [VCLUSTER_PRO_HOST] [flags] +``` + +``` +######################################################## +#################### vcluster login #################### +######################################################## +Login into vCluster.Pro + +Example: +vcluster login https://my-vcluster-pro.com +vcluster login https://my-vcluster-pro.com --access-key myaccesskey +######################################################## +``` + + +## Flags + +``` + --access-key string The access key to use + --docker-login If true, will log into the docker image registries the user has image pull secrets for (default true) + -h, --help help for login + --insecure Allow login into an insecure Loft instance (default true) +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_logout.md b/docs/pages/cli/vcluster_logout.md new file mode 100644 index 000000000..4df9589a6 --- /dev/null +++ b/docs/pages/cli/vcluster_logout.md @@ -0,0 +1,43 @@ +--- +title: "vcluster logout --help" +sidebar_label: vcluster logout +--- + + +Log out of a vCluster.Pro instance + +## Synopsis + +``` +vcluster logout [flags] +``` + +``` +######################################################## +################### vcluster logout #################### +######################################################## +Log out of vCluster.Pro + +Example: +vcluster logout +######################################################## +``` + + +## Flags + +``` + -h, --help help for logout +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pause.md b/docs/pages/cli/vcluster_pause.md new file mode 100644 index 000000000..91823fccc --- /dev/null +++ b/docs/pages/cli/vcluster_pause.md @@ -0,0 +1,52 @@ +--- +title: "vcluster pause --help" +sidebar_label: vcluster pause +--- + + +Pauses a virtual cluster + +## Synopsis + + +``` +vcluster pause VCLUSTER_NAME [flags] +``` + +``` +####################################################### +################### vcluster pause #################### +####################################################### +Pause will stop a virtual cluster and free all its used +computing resources. + +Pause will scale down the virtual cluster and delete +all workloads created through the virtual cluster. Upon resume, +all workloads will be recreated. Other resources such +as persistent volume claims, services etc. will not be affected. + +Example: +vcluster pause test --namespace test +####################################################### +``` + + +## Flags + +``` + -h, --help help for pause + --prevent-wakeup vcluster resume vcluster [PRO] The amount of seconds this vcluster should sleep until it can be woken up again (use 0 for infinite sleeping). During this time the space can only be woken up by vcluster resume vcluster, manually deleting the annotation on the namespace or through the loft UI (default -1) + --project string [PRO] The pro project the vcluster is in +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro.md b/docs/pages/cli/vcluster_pro.md new file mode 100644 index 000000000..b12a29c57 --- /dev/null +++ b/docs/pages/cli/vcluster_pro.md @@ -0,0 +1,39 @@ +--- +title: "vcluster pro --help" +sidebar_label: vcluster pro +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +vCluster.Pro subcommands + +## Synopsis + +``` +####################################################### +#################### vcluster pro ##################### +####################################################### +``` + + +## Flags + +``` + -h, --help help for pro +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro_generate.md b/docs/pages/cli/vcluster_pro_generate.md new file mode 100644 index 000000000..01648acdd --- /dev/null +++ b/docs/pages/cli/vcluster_pro_generate.md @@ -0,0 +1,39 @@ +--- +title: "vcluster pro generate --help" +sidebar_label: vcluster pro generate +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro generate` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +Generate configuration + +## Synopsis + +``` +######################################################## +################## vcluster pro generate ################## +######################################################## +``` + + +## Flags + +``` + -h, --help help for generate +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro_generate_admin-kube-config.md b/docs/pages/cli/vcluster_pro_generate_admin-kube-config.md new file mode 100644 index 000000000..c46908acd --- /dev/null +++ b/docs/pages/cli/vcluster_pro_generate_admin-kube-config.md @@ -0,0 +1,51 @@ +--- +title: "vcluster pro generate admin-kube-config --help" +sidebar_label: vcluster pro generate admin-kube-config +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro generate admin-kube-config` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +Generates a new kube config for connecting a cluster + +## Synopsis + + +``` +vcluster pro generate admin-kube-config [flags] +``` + +``` +####################################################### +######### vcluster pro generate admin-kube-config ########### +####################################################### +Creates a new kube config that can be used to connect +a cluster to vCluster.Pro + +Example: +vcluster pro generate admin-kube-config +####################################################### +``` + + +## Flags + +``` + -h, --help help for admin-kube-config + --namespace string The namespace to generate the service account in. The namespace will be created if it does not exist (default "loft") + --service-account string The service account name to create (default "loft-admin") +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro_login.md b/docs/pages/cli/vcluster_pro_login.md new file mode 100644 index 000000000..5b3063328 --- /dev/null +++ b/docs/pages/cli/vcluster_pro_login.md @@ -0,0 +1,40 @@ +--- +title: "vcluster pro login --help" +sidebar_label: vcluster pro login +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro login` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + +## vcluster pro login + +Log in to the vcluster.pro server + +``` +vcluster pro login [flags] +``` + +### Options + +``` + -h, --help help for login +``` + +### Options inherited from parent commands + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + +``` + +``` + + +## Flags +## Global & Inherited Flags \ No newline at end of file diff --git a/docs/pages/cli/vcluster_pro_reset.md b/docs/pages/cli/vcluster_pro_reset.md new file mode 100644 index 000000000..b77ff3c12 --- /dev/null +++ b/docs/pages/cli/vcluster_pro_reset.md @@ -0,0 +1,39 @@ +--- +title: "vcluster pro reset --help" +sidebar_label: vcluster pro reset +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro reset` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +Reset configuration + +## Synopsis + +``` +######################################################## +################## vcluster pro reset ################## +######################################################## +``` + + +## Flags + +``` + -h, --help help for reset +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro_reset_password.md b/docs/pages/cli/vcluster_pro_reset_password.md new file mode 100644 index 000000000..8b360a11a --- /dev/null +++ b/docs/pages/cli/vcluster_pro_reset_password.md @@ -0,0 +1,53 @@ +--- +title: "vcluster pro reset password --help" +sidebar_label: vcluster pro reset password +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro reset password` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +Resets the password of a user + +## Synopsis + +``` +vcluster pro reset password [flags] +``` + +``` +######################################################## +############## vcluster pro reset password ############# +######################################################## +Resets the password of a user. + +Example: +vcluster pro reset password +vcluster pro reset password --user admin +####################################################### +``` + + +## Flags + +``` + --create Creates the user if it does not exist + --force If user had no password will create one + -h, --help help for password + --password string The new password to use + --user string The name of the user to reset the password (default "admin") +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_pro_start.md b/docs/pages/cli/vcluster_pro_start.md new file mode 100644 index 000000000..9f285cc81 --- /dev/null +++ b/docs/pages/cli/vcluster_pro_start.md @@ -0,0 +1,71 @@ +--- +title: "vcluster pro start --help" +sidebar_label: vcluster pro start +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +`vcluster pro start` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + + +Start a vCluster.Pro instance and connect via port-forwarding + +## Synopsis + +``` +vcluster pro start [flags] +``` + +``` +######################################################## +################## vcluster pro start ################## +######################################################## + +Starts a vCluster.Pro instance in your Kubernetes cluster +and then establishes a port-forwarding connection. + +Please make sure you meet the following requirements +before running this command: + +1. Current kube-context has admin access to the cluster +2. Helm v3 must be installed +3. kubectl must be installed + +######################################################## +``` + + +## Flags + +``` + --chart-name string The chart name to deploy vCluster.Pro (default "vcluster-control-plane") + --chart-path string The vCluster.Pro chart path to deploy vCluster.Pro + --chart-repo string The chart repo to deploy vCluster.Pro (default "https://charts.loft.sh/") + --context string The kube context to use for installation + --email string The email to use for the installation + -h, --help help for start + --host string Provide a hostname to enable ingress and configure its hostname + --local-port string The local port to bind to if using port-forwarding + --namespace string The namespace to install vCluster.Pro into (default "vcluster-pro") + --no-login If true, vCluster.Pro will not login to a vCluster.Pro instance on start + --no-port-forwarding If true, vCluster.Pro will not do port forwarding after installing it + --no-tunnel If true, vCluster.Pro will not create a loft.host tunnel for this installation + --no-wait If true, vCluster.Pro will not wait after installing it + --password string The password to use for the admin account. (If empty this will be the namespace UID) + --reset If true, an existing loft instance will be deleted before installing vCluster.Pro + --reuse-values Reuse previous vCluster.Pro helm values on upgrade (default true) + --upgrade If true, vCluster.Pro will try to upgrade the release + --values string Path to a file for extra vCluster.Pro helm chart values + --version string The vCluster.Pro version to install (default "latest") +``` + + +## Global & Inherited Flags + +``` + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_resume.md b/docs/pages/cli/vcluster_resume.md new file mode 100644 index 000000000..24ae573ae --- /dev/null +++ b/docs/pages/cli/vcluster_resume.md @@ -0,0 +1,47 @@ +--- +title: "vcluster resume --help" +sidebar_label: vcluster resume +--- + + +Resumes a virtual cluster + +## Synopsis + + +``` +vcluster resume VCLUSTER_NAME [flags] +``` + +``` +####################################################### +################### vcluster resume ################### +####################################################### +Resume will start a vcluster after it was paused. +vcluster will recreate all the workloads after it has +started automatically. + +Example: +vcluster resume test --namespace test +####################################################### +``` + + +## Flags + +``` + -h, --help help for resume + --project string [PRO] The pro project the vcluster is in +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_telemetry.md b/docs/pages/cli/vcluster_telemetry.md new file mode 100644 index 000000000..b5ecad379 --- /dev/null +++ b/docs/pages/cli/vcluster_telemetry.md @@ -0,0 +1,40 @@ +--- +title: "vcluster telemetry --help" +sidebar_label: vcluster telemetry +--- + + +Sets your vcluster telemetry preferences + +## Synopsis + + +``` +####################################################### +################## vcluster telemetry ################# +####################################################### +Sets your vcluster telemetry preferences. +Default: enabled. + +More information about the collected telmetry is in the +docs: https://www.vcluster.com/docs/telemetry +``` + + +## Flags + +``` + -h, --help help for telemetry +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_telemetry_disable.md b/docs/pages/cli/vcluster_telemetry_disable.md new file mode 100644 index 000000000..439aa1263 --- /dev/null +++ b/docs/pages/cli/vcluster_telemetry_disable.md @@ -0,0 +1,45 @@ +--- +title: "vcluster telemetry disable --help" +sidebar_label: vcluster telemetry disable +--- + + +Disables collection of anonymized vcluster telemetry + +## Synopsis + + +``` +vcluster telemetry disable [flags] +``` + +``` +####################################################### +############## vcluster telemetry disable ############# +####################################################### +Disables collection of anonymized vcluster telemetry. + +More information about the collected telmetry is in the +docs: https://www.vcluster.com/docs/telemetry + +####################################################### +``` + + +## Flags + +``` + -h, --help help for disable +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_telemetry_enable.md b/docs/pages/cli/vcluster_telemetry_enable.md new file mode 100644 index 000000000..9cf1b5c28 --- /dev/null +++ b/docs/pages/cli/vcluster_telemetry_enable.md @@ -0,0 +1,45 @@ +--- +title: "vcluster telemetry enable --help" +sidebar_label: vcluster telemetry enable +--- + + +Enables collection of anonymized vcluster telemetry + +## Synopsis + + +``` +vcluster telemetry enable [flags] +``` + +``` +####################################################### +############### vcluster telemetry enable ############# +####################################################### +Enables collection of anonymized vcluster telemetry + +More information about the collected telmetry is in the +docs: https://www.vcluster.com/docs/telemetry + +####################################################### +``` + + +## Flags + +``` + -h, --help help for enable +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_ui.md b/docs/pages/cli/vcluster_ui.md new file mode 100644 index 000000000..45ad9eaf2 --- /dev/null +++ b/docs/pages/cli/vcluster_ui.md @@ -0,0 +1,43 @@ +--- +title: "vcluster ui --help" +sidebar_label: vcluster ui +--- + + +Start the web UI + +## Synopsis + +``` +vcluster ui [flags] +``` + +``` +######################################################## +##################### vcluster ui ###################### +######################################################## +Open the vCluster.Pro web UI + +Example: +vcluster ui +######################################################## +``` + + +## Flags + +``` + -h, --help help for ui +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_upgrade.md b/docs/pages/cli/vcluster_upgrade.md new file mode 100644 index 000000000..867ed7461 --- /dev/null +++ b/docs/pages/cli/vcluster_upgrade.md @@ -0,0 +1,42 @@ +--- +title: "vcluster upgrade --help" +sidebar_label: vcluster upgrade +--- + + +Upgrade the vcluster CLI to the newest version + +## Synopsis + + +``` +vcluster upgrade [flags] +``` + +``` +####################################################### +################## vcluster upgrade ################### +####################################################### +Upgrades the vcluster CLI to the newest version +####################################################### +``` + + +## Flags + +``` + -h, --help help for upgrade + --version string The version to update vcluster to. Defaults to the latest stable version available +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/cli/vcluster_version.md b/docs/pages/cli/vcluster_version.md new file mode 100644 index 000000000..2aeb4c1eb --- /dev/null +++ b/docs/pages/cli/vcluster_version.md @@ -0,0 +1,36 @@ +--- +title: "vcluster version --help" +sidebar_label: vcluster version +--- + + +Print the version number of vcluster + +## Synopsis + +``` +vcluster version [flags] +``` + +``` +All software has versions. This is Vcluster's. +``` + + +## Flags + +``` + -h, --help help for version +``` + + +## Global & Inherited Flags + +``` + --context string The kubernetes config context to use + --debug Prints the stack trace if an error occurs + --log-output string The log format to use. Can be either plain, raw or json (default "plain") + -n, --namespace string The kubernetes namespace to use + -s, --silent Run in silent mode and prevents any vcluster log output except panics & fatals +``` + diff --git a/docs/pages/config-reference.mdx b/docs/pages/config-reference.mdx index 48f575957..865d1aeb9 100644 --- a/docs/pages/config-reference.mdx +++ b/docs/pages/config-reference.mdx @@ -10,13 +10,13 @@ Before using any particular flag mentioned below, we recommend making yourself f --bind-address string The address to bind the server to (default "0.0.0.0") --client-ca-cert string The path to the client ca certificate (default "/data/server/tls/client-ca.crt") --cluster-domain string The cluster domain ending that should be used for the virtual cluster (default "cluster.local") - --default-image-registry string This address will be prepended to all deployed system images by vcluster + --default-image-registry string This address will be prepended to all deployed system images by vCluster --disable-fake-kubelets If disabled, the virtual cluster will not create fake kubelet endpoints to support metrics-servers - --disable-plugins If enabled, vcluster will not load any plugins + --disable-plugins If enabled, vCluster will not load any plugins --enable-scheduler If enabled, will expect a scheduler running in the virtual cluster --enforce-node-selector If enabled and --node-selector is set then the virtual cluster will ensure that no pods are scheduled outside of the node selector (default true) - --enforce-pod-security-standard string This can be set to privileged, baseline, restricted and vcluster would make sure during translation that these policies are enforced. - --enforce-toleration strings If set will apply the provided tolerations to all pods in the vcluster + --enforce-pod-security-standard string This can be set to privileged, baseline, restricted and vCluster would make sure during translation that these policies are enforced. + --enforce-toleration strings If set will apply the provided tolerations to all pods in the vCluster -h, --help help for start --host-metrics-bind-address string If set, metrics for the controller manager for the resources managed in the host cluster will be exposed at this address --kube-config string The path to the virtual cluster admin kube config (default "/data/server/cred/admin.kubeconfig") @@ -26,11 +26,11 @@ Before using any particular flag mentioned below, we recommend making yourself f --map-host-service strings Maps a given service inside the host cluster to a service inside the virtual cluster. E.g. other-namespace/my-service=my-vcluster-namespace/my-service --map-virtual-service strings Maps a given service inside the virtual cluster to a service inside the host cluster. E.g. default/test=physical-service --name string The name of the virtual cluster - --node-selector string If nodes sync is enabled, nodes with the given node selector will be synced to the virtual cluster. If fake nodes are used, and --enforce-node-selector flag is set, then vcluster will ensure that no pods are scheduled outside of the node selector. + --node-selector string If nodes sync is enabled, nodes with the given node selector will be synced to the virtual cluster. If fake nodes are used, and --enforce-node-selector flag is set, then vCluster will ensure that no pods are scheduled outside of the node selector. --out-kube-config-secret string If specified, the virtual cluster will write the generated kube config to the given secret --out-kube-config-secret-namespace string If specified, the virtual cluster will write the generated kube config in the given namespace --out-kube-config-server string If specified, the virtual cluster will use this server for the generated kube config (e.g. https://my-vcluster.domain.com) - --override-hosts If enabled, vcluster will override a containers /etc/hosts file if there is a subdomain specified for the pod (spec.subdomain). (default true) + --override-hosts If enabled, vCluster will override a containers /etc/hosts file if there is a subdomain specified for the pod (spec.subdomain). (default true) --override-hosts-container-image string The image for the init container that is used for creating the override hosts file. (default "library/alpine:3.13.1") --plugin-listen-address string The plugin address to listen to. If this is changed, you'll need to configure your plugins to connect to the updated port (default "localhost:10099") --plugins strings The plugins to wait for during startup @@ -41,12 +41,12 @@ Before using any particular flag mentioned below, we recommend making yourself f --server-ca-cert string The path to the server ca certificate (default "/data/server/tls/server-ca.crt") --server-ca-key string The path to the server ca key (default "/data/server/tls/server-ca.key") --service-account string If set, will set this host service account on the synced pods - --service-name string The service name where the vcluster proxy will be available + --service-name string The service name where the vCluster proxy will be available --service-account-token-secrets bool Create secrets for pod service account tokens instead of injecting it as annotations --set-owner If true, will set the same owner the currently running syncer pod has on the synced resources (default true) --sync strings A list of sync controllers to enable. 'foo' enables the sync controller named 'foo', '-foo' disables the sync controller named 'foo' --sync-all-nodes If enabled and --fake-nodes is false, the virtual cluster will sync all nodes instead of only the needed ones - --sync-labels strings The specified labels will be synced to physical resources, in addition to their vcluster translated versions. + --sync-labels strings The specified labels will be synced to physical resources, in addition to their vCluster translated versions. --sync-node-changes If enabled and --fake-nodes is false, the virtual cluster will proxy node updates from the virtual cluster to the host cluster. This is not recommended and should only be used if you know what you are doing. --target-namespace string The namespace to run the virtual cluster in (defaults to current namespace) --tls-san strings Add additional hostname or IP as a Subject Alternative Name in the TLS cert @@ -54,7 +54,7 @@ Before using any particular flag mentioned below, we recommend making yourself f --virtual-metrics-bind-address string If set, metrics for the controller manager for the resources managed in the virtual cluster will be exposed at this address ``` -All of these syncer flags can be set and configured through the values override file when creating the vcluster. In particular these syncer flags go into the `extraArgs` section of the values +All of these syncer flags can be set and configured through the values override file when creating the vCluster. In particular these syncer flags go into the `extraArgs` section of the values ``` syncer: extraArgs: diff --git a/docs/pages/deploying-vclusters/high-availability.mdx b/docs/pages/deploying-vclusters/high-availability.mdx new file mode 100644 index 000000000..87cb29b63 --- /dev/null +++ b/docs/pages/deploying-vclusters/high-availability.mdx @@ -0,0 +1,381 @@ +--- +title: High Availability +sidebar_label: High Availability +--- +By default, vCluster runs one instance of each of its components. That’s fine for many use cases, like ones that are very ephemeral (dev environments, CI/CD, etc.). But suppose your situation requires virtual clusters with more redundancy. In that case, you can use vCluster’s High Availability feature to run multiple copies of the vCluster components so that the cluster is more resistant to partial failures. + +# Prerequisites +For this tutorial, you will need a multi-node Kubernetes cluster. This can be a local minikube or kind cluster, or any full Kubernetes cluster. You just need more than one node in the cluster. + +You also need your kube config pointing to that cluster (you can connect and run kubectl commands against it). + +# 1. Install the vCluster client +If you’re on a Mac and using Homebrew, you can install the vCluster CLI with this command: + +``` +brew install vcluster +``` +For other platforms, see [the installation instructions](../getting-started/setup.mdx). + +# 2. Understanding vCluster distros +A distro in vcluster is the Kubernetes distribution that runs inside the virtual cluster. There are four supported distros at the time of this writing: + +- k3s (the default distro) +- k0s +- eks (the Kubernetes that is installed in AWS EKS clusters) +- k8s (a Kubernetes with etcd) +The vcluster HA feature is only supported by the k3s and k8s distros. You need to choose one of those to take advantage of HA. + +For this tutorial, we will use the k8s distro. +If you're interested in enabling HA in rootless mode, or using the k3s distro, see some examples at the bottom of this page. + +# 3. Create a values.yaml file +The values.yaml file is used to specify configuration options for the virtual cluster. In the case of the HA feature, we will specify the number of replicas we want to run for each vCluster component. + +Create the file called values.yaml on the computer that the vcluster client is installed on with these contents: + +``` +# Enable HA mode +enableHA: true + +# Scale up syncer replicas +syncer: + replicas: 3 + +# Scale up etcd +etcd: + replicas: 3 + +# Scale up controller manager +controller: + replicas: 3 + +# Scale up api server +api: + replicas: 3 + +# Scale up DNS server +coredns: + replicas: 3 +``` +That first line enables the HA feature, and the rest specify the number of copies you want to run of each component. Set the number of replicas for each to equal the number of compute nodes your cluster has. In this example I’m using a four node Minikube cluster (one control plane node and three compute nodes), so I set the number of replicas to three in values.yaml. + +``` +kubectl get nodes +``` +``` +NAME STATUS ROLES AGE VERSION +minikube Ready control-plane 2m5s v1.26.3 +minikube-m02 Ready 105s v1.26.3 +minikube-m03 Ready 93s v1.26.3 +minikube-m04 Ready 83s v1.26.3 +``` + +# 4. Create the HA virtual cluster + +To create a virtual cluster using the vcluster CLI, we run the vcluster create command. To enable HA, we’ll need to specify the distro and the values.yaml file to use. + +``` +vcluster create ha-tutorial --connect=false --distro k8s -f values.yaml +``` +We’ve named the virtual cluster ha-tutorial. By default, the vcluster create command connects to the virtual cluster, but for the purposes of this tutorial, we’ve disabled that with the --connect=false flag. And we’ve specified the distro and the values.yaml file to use when creating the virtual cluster. + +You should see output like this: +``` +info Creating namespace vcluster-ha-tutorial +info failed to find IPv6 service CIDR: couldn't find host cluster Service CIDR ("Service "test-service-tm4c9" is invalid: spec.clusterIPs[0]: Invalid value: []string{"2001:DB8::1"}: IPv6 is not configured on this cluster") +info Detected local kubernetes cluster minikube. Will deploy vcluster with a NodePort & sync real nodes +info Create vcluster ha-tutorial... +info execute command: helm upgrade ha-tutorial /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/vcluster-k8s-0.15.2.tgz-1797632188 --kubeconfig /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/3126958598 --namespace vcluster-ha-tutorial --install --repository-config='' --values /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/2770602786 --values values.yaml +done √ Successfully created virtual cluster ha-tutorial in namespace vcluster-ha-tutorial. +- Use 'vcluster connect ha-tutorial --namespace vcluster-ha-tutorial' to access the virtual cluster +``` +Some of your output may differ depending on whether you use a local or remote cluster. + +As you can see, vcluster has created a namespace called vcluster-ha-tutorial. The virtual cluster lives inside that namespace on the host cluster. Next, let’s see what pods are running in that namespace. + +``` +kubectl get pods -n vcluster-ha-tutorial +``` +``` +NAME READY STATUS RESTARTS AGE +ha-tutorial-7c5c5844c5-27j2v 0/1 Running 0 20s +ha-tutorial-7c5c5844c5-gb2sm 0/1 Running 0 20s +ha-tutorial-7c5c5844c5-pwn7k 0/1 Running 0 20s +ha-tutorial-api-74f8665656-jhjnj 0/1 Running 0 20s +ha-tutorial-api-74f8665656-t5wcp 0/1 Running 0 20s +ha-tutorial-api-74f8665656-z5xl8 0/1 Running 0 20s +ha-tutorial-controller-75fb977dc5-pw5sb 0/1 Running 0 20s +ha-tutorial-controller-75fb977dc5-qzxgm 0/1 Running 0 20s +ha-tutorial-controller-75fb977dc5-wzf5v 0/1 Running 0 20s +ha-tutorial-etcd-0 0/1 Running 0 20s +ha-tutorial-etcd-1 0/1 Running 0 20s +ha-tutorial-etcd-2 0/1 Running 0 20s +``` +There are now three replicas of each component of the virtual cluster running. If one API server pod were down, the virtual cluster would continue functioning. + +If you’d like more information about how the vcluster pods were scheduled, add the -o wide flag to that previous command. + +``` +kubectl get pods -n vcluster-ha-tutorial -o wide +``` +The hostnames of the nodes will be listed in the NODES column. + +# 5. Connect to the virtual cluster + +We can connect to the vcluster using the vcluster connect command. + +``` +vcluster connect ha-tutorial +``` +``` +info Starting proxy container... +done √ Switched active kube context to vcluster_ha-tutorial_vcluster-ha-tutorial_minikube +- Use `vcluster disconnect` to return to your previous kube context +- Use `kubectl get namespaces` to access the vcluster +``` +vcluster connect automatically switches our kube context for kubectl to the virtual cluster. Now we can see the namespaces inside of the virtual cluster by running this command: +``` +kubectl get namespaces +Copy +NAME STATUS AGE +default Active 31s +kube-node-lease Active 33s +kube-public Active 33s +kube-system Active 33s +``` +Our virtual cluster only contains the default namespaces that are created by Kubernetes. + +Now let’s disconnect from the virtual cluster. + +``` +vcluster disconnect +``` +This will switch your kube context back to the host cluster. + +# 6. Cleanup + +One of the great things about vcluster is that it’s very fast and easy to clean up the virtual clusters when you’re done using them. + +``` +vcluster delete ha-tutorial +``` +That will delete the vcluster and the namespace it was in. + +# Other Examples + +### Enabling High Availability with k3s + +In order to run vCluster with k3s as Kubernetes distribution in high availability mode, the following steps are required: + +* create and use an [external datastore](./persistence.mdx) (as opposed to the embedded SQLite datastore used in single-server setups) +* run two or more k3s pods that will serve the Kubernetes API and run other control plane services + +First create a `values.yaml` in the following form and make sure to change the connection string in `K3S_DATASTORE_ENDPOINT`: + +``` +# Enable HA mode +enableHA: true + +# Scale up k3s replicas +replicas: 2 + +# Set external datastore endpoint +vcluster: + env: + - name: K3S_DATASTORE_ENDPOINT + value: mysql://username:password@tcp(hostname:3306)/database-name + +# Disable persistent storage as all data (including bootstrap data) is stored in external datastore +storage: + persistence: false + +# Scale up CoreDNS replicas +coredns: + replicas: 2 +``` + +Then create the vCluster with the following command: + +``` +vcluster create ... --connect=false -f values.yaml +``` + +Check that vCluster including the control plane is running correctly: + +``` +kubectl get pods -n vcluster +NAME READY STATUS RESTARTS AGE +coredns-66ffcc6b58-bhk4s-x-kube-system-x-vcluster 1/1 Running 0 21s +coredns-66ffcc6b58-n7npd-x-kube-system-x-vcluster 1/1 Running 0 21s +vcluster-54fb5dd76-92szq 2/2 Running 0 3m1s +vcluster-54fb5dd76-ntbrh 2/2 Running 0 3m1s +``` + +Now connect to the vCluster: + +``` +vcluster connect vcluster -n vcluster + +# Then execute in a new terminal +export KUBECONFIG=kubeconfig.yaml +kubectl get ns +... +``` + + +Check the [GitHub repository](https://github.com/loft-sh/vcluster/tree/main/charts/k3s) for all available chart options. + + +### Enabling High Availability with Vanilla k8s + +In order to run vCluster in high availability mode, create a `values.yaml` in the following form: + +``` +# Enable HA mode +enableHA: true + +# Scale up syncer replicas +syncer: + replicas: 3 + +# Scale up etcd +etcd: + replicas: 3 + +# Scale up controller manager +controller: + replicas: 3 + +# Scale up api server +api: + replicas: 3 + +# Scale up DNS server +coredns: + replicas: 3 +``` + +Then create the vCluster with the following command: +``` +vcluster create ... --connect=false --distro k8s -f values.yaml +``` + +Check that vCluster including the control plane are running correctly: +``` +kubectl get po -n vcluster +NAME READY STATUS RESTARTS AGE +coredns-6ff7df994d-m5pcd-x-kube-system-x-vcluster 1/1 Running 0 23m +coredns-6ff7df994d-dfgjb-x-kube-system-x-vcluster 1/1 Running 0 23m +coredns-6ff7df994d-weuir-x-kube-system-x-vcluster 1/1 Running 0 23m +vcluster-9d88f577-m55xf 1/1 Running 0 30m +vcluster-9d88f577-drsxz 1/1 Running 0 30m +vcluster-9d88f577-maslo 1/1 Running 0 30m +vcluster-api-66bfc4cf94-cp28t 1/1 Running 0 30m +vcluster-api-66bfc4cf94-drnll 1/1 Running 0 30m +vcluster-api-66bfc4cf94-jfbnn 1/1 Running 0 30m +vcluster-controller-b4cd55bb6-9mvc4 1/1 Running 0 30m +vcluster-controller-b4cd55bb6-bmfdj 1/1 Running 0 30m +vcluster-controller-b4cd55bb6-kcxr7 1/1 Running 0 30m +vcluster-etcd-0 1/1 Running 0 30m +vcluster-etcd-1 1/1 Running 0 29m +vcluster-etcd-2 1/1 Running 0 29m +``` + +Now connect to the vCluster: +``` +vcluster connect vcluster-1 -n host-namespace-1 + +# Then execute in a new terminal +export KUBECONFIG=kubeconfig.yaml +kubectl get ns +... +``` + +### Enable HA in rootless mode +Rootless mode means running vCluster without root user privileges in container, making host k8s cluster more secure. +You can find more about rootless mode [here](../security/rootless-mode.mdx). + +Below is HA configuration for running rootless vCluster with vanilla Kubernetes distribution. +``` +# Enable HA mode +enableHA: true + +# Scale up syncer replicas +syncer: + replicas: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 12345 + runAsNonRoot: true + runAsUser: 12345 + seccompProfile: + type: RuntimeDefault + +# Scale up etcd +etcd: + replicas: 3 + fsGroup: 12345 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 12345 + runAsNonRoot: true + runAsUser: 12345 + seccompProfile: + type: RuntimeDefault + +# Scale up controller manager +controller: + replicas: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 12345 + runAsNonRoot: true + runAsUser: 12345 + seccompProfile: + type: RuntimeDefault + +# Scale up api server +api: + replicas: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 12345 + runAsNonRoot: true + runAsUser: 12345 + seccompProfile: + type: RuntimeDefault + +# Scale up DNS server +coredns: + replicas: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsGroup: 12345 + runAsNonRoot: true + runAsUser: 12345 + seccompProfile: + type: RuntimeDefault +``` + +Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/k8s) for all available chart options. diff --git a/docs/pages/operator/init-manifests.mdx b/docs/pages/deploying-vclusters/init-charts.mdx similarity index 56% rename from docs/pages/operator/init-manifests.mdx rename to docs/pages/deploying-vclusters/init-charts.mdx index dff289ef8..38022f9e7 100644 --- a/docs/pages/operator/init-manifests.mdx +++ b/docs/pages/deploying-vclusters/init-charts.mdx @@ -1,36 +1,10 @@ --- -title: Applying manifests on initialization -sidebar_label: Applying manifests and charts on init +title: Helm Charts +sidebar_label: Helm Charts --- -## Applying manifests on initialization -Starting in version 0.8.0, vcluster allows users to apply manifests as soon as a virtual cluster is started. This can be useful for users configuring internal vcluster resources. These manifests are applied before applying the helm charts. - -This can be configured via `helm` values: - -```yaml -init: - manifests: |- - apiVersion: v1 - kind: Service - ... - --- - apiVersion: v1 - kind: ConfigMap - ... - manifestsTemplate: |- - --- - apiVersion: v1 - kind: ConfigMap - data: - example: {{ .Release.Name }} - ... -``` - -The contents of `init.manifests` will be applied as-is, while the contents of `init.manifestsTemplate` will be templated using helm to allow you to use helm values inside, e.g.: `{{ .Release.Name }}`. `init.manifests` and `init.manifestsTemplate` will be concatenated to form a single config map. - -## Applying charts on vcluster initialization -vcluster now supports applying helm charts while initializing a new vcluster. Currently 2 methods of applying charts are supported: +## Applying charts on vCluster initialization +vCluster now supports applying helm charts while initializing a new vCluster. Currently 2 methods of applying charts are supported: 1. [Upstream Mode](#upstream-mode) 2. [Bundle Mode](#bundle-mode) @@ -66,8 +40,8 @@ init: namespace: my-release-namespace ``` -### Chart Bundle Mode -If you're interested in applying a local chart directory, or a chart pulled from a repo that exists locally as a `tar` archive, then bundle mode can come handy. Let's say we have a chart named `my-chart.tar.gz` in the current directory, we can use the bundle mode to deploy it during vcluster initialization as follows: +### Bundle Mode +If you're interested in applying a local chart directory, or a chart pulled from a repo that exists locally as a `tar` archive, then bundle mode can come handy. Let's say we have a chart named `my-chart.tar.gz` in the current directory, we can use the bundle mode to deploy it during vCluster initialization as follows: ``` # copy the chart to your clipboard cat my-chart.tar.gz | base64 | pbcopy diff --git a/docs/pages/deploying-vclusters/init-manifests.mdx b/docs/pages/deploying-vclusters/init-manifests.mdx new file mode 100644 index 000000000..26f6c442f --- /dev/null +++ b/docs/pages/deploying-vclusters/init-manifests.mdx @@ -0,0 +1,30 @@ +--- +title: Manifests +sidebar_label: Manifests +--- + +## Applying manifests on initialization +Starting in version 0.8.0, vCluster allows users to apply manifests as soon as a virtual cluster is started. This can be useful for users configuring internal vCluster resources. These manifests are applied before applying the helm charts. + +This can be configured via `helm` values: + +```yaml +init: + manifests: |- + apiVersion: v1 + kind: Service + ... + --- + apiVersion: v1 + kind: ConfigMap + ... + manifestsTemplate: |- + --- + apiVersion: v1 + kind: ConfigMap + data: + example: {{ .Release.Name }} + ... +``` + +The contents of `init.manifests` will be applied as-is, while the contents of `init.manifestsTemplate` will be templated using helm to allow you to use helm values inside, e.g.: `{{ .Release.Name }}`. `init.manifests` and `init.manifestsTemplate` will be concatenated to form a single config map. diff --git a/docs/pages/deploying-vclusters/integrations-openshift.mdx b/docs/pages/deploying-vclusters/integrations-openshift.mdx new file mode 100644 index 000000000..077d69a85 --- /dev/null +++ b/docs/pages/deploying-vclusters/integrations-openshift.mdx @@ -0,0 +1,21 @@ +--- +title: OpenShift +sidebar_label: OpenShift +--- + +import NonRootSegment from '../fragments/non-root-vcluster.mdx' +import OpenshiftSegment from '../fragments/deploy-to-openshift.mdx' + + +By default, OpenShift doesn't allow running containers with the root user, but it assigns a random UID from the allowed range automatically, which means that you can skip the steps described in the [Running as non-root user](../security/rootless-mode.mdx) section of this document and your vCluster should run as non-root user by default. + +OpenShift also imposes some restrictions that are not common to other Kubernetes distributions. +When deploying vCluster to OpenShift you will need to follow these additional steps: + + + +:::info Additional permission when running on OpenShift +vCluster requires `create` permission for the `endpoints/restricted` resource in the default group when running on OpenShift. +This permission is required because OpenShift has additional built-in admission controller for the Endpoint resources, which denies creation of the endpoints pointing into the cluster network or service network CIDR ranges, unless this additional permission is given. +Following the steps outline above ensures that the vCluster Role includes this permission, as it is necessary for certain networking features. +::: diff --git a/docs/pages/operator/external-datastore.mdx b/docs/pages/deploying-vclusters/persistence.mdx similarity index 89% rename from docs/pages/operator/external-datastore.mdx rename to docs/pages/deploying-vclusters/persistence.mdx index 9813c2d39..5f4ad08af 100644 --- a/docs/pages/operator/external-datastore.mdx +++ b/docs/pages/deploying-vclusters/persistence.mdx @@ -1,12 +1,12 @@ --- -title: External Datastorage (etcd etc.) -sidebar_label: External Data Storage (etcd etc.) +title: Persisting vCluster data +sidebar_label: Persisting vCluster data --- import Tabs from '@theme/Tabs' import TabItem from '@theme/TabItem' -vcluster uses k3s as Kubernetes distribution for the virtual control plane. The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to vcluster operators. The available datastore options allow you to select a datastore that best fits your use case. +vCluster uses k3s as Kubernetes distribution for the virtual control plane. The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to vCluster operators. The available datastore options allow you to select a datastore that best fits your use case. K3s supports the following datastore options: * [Embedded SQLite](https://www.sqlite.org/index.html) (default with Persistent Volume) @@ -19,7 +19,7 @@ For more information, please take a look at the [k3s documentation](https://ranc ## Embedded SQLite without Persistent Volume -By default vcluster will deploy k3s to use a persistent volume claim to store the data in. You can also instead use an [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) to store the virtual cluster data. +By default vCluster will deploy k3s to use a persistent volume claim to store the data in. You can also instead use an [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) to store the virtual cluster data. In order to use an emptyDir to store the data instead of a persistent volume, please create a `values.yaml` with the following contents: @@ -28,19 +28,19 @@ storage: persistence: false ``` -Then upgrade or recreate the vcluster with: +Then upgrade or recreate the vCluster with: ``` vcluster create my-vcluster -n my-vcluster --upgrade -f values.yaml ``` :::warning Potential Data Loss -This method should only be used for testing purposes, as data will be lost upon pod recreation. If you are having problems with k3s in general, please consider [using another Kubernetes distribution such as k0s or vanilla k8s](./other-distributions.mdx) +This method should only be used for testing purposes, as data will be lost upon pod recreation. If you are having problems with k3s in general, please consider [using another Kubernetes distribution such as k0s or vanilla k8s](./supported-distros.mdx) ::: ## Datastore Options -If you want to use an external datastore such as PostgreSQL, MySQL, or etcd you must set the `K3S_DATASTORE_ENDPOINT` environment variable of the vcluster container so that K3s knows how to connect to it. You may also specify environment variables to configure the authentication and encryption of the connection. The following environment variables are available: +If you want to use an external datastore such as PostgreSQL, MySQL, or etcd you must set the `K3S_DATASTORE_ENDPOINT` environment variable of the vCluster container so that K3s knows how to connect to it. You may also specify environment variables to configure the authentication and encryption of the connection. The following environment variables are available: * **K3S_DATASTORE_ENDPOINT**: Specify a PostgresSQL, MySQL, or etcd connection string. This is a string used to describe the connection to the datastore. The structure of this string is specific to each backend and is detailed below. * **K3S_DATASTORE_CAFILE**: TLS Certificate Authority (CA) file used to help secure communication with the datastore. If your datastore serves requests over TLS using a certificate signed by a custom certificate authority, you can specify that CA using this parameter so that the K3s client can properly verify the certificate. @@ -100,7 +100,7 @@ If you specify a database name and it does not exist, the server will attempt to ### Postgres Example -The following example could be used to launch a vcluster that connects to a PostgresSQL database named k3s. Create a `values.yaml` with: +The following example could be used to launch a vCluster that connects to a PostgresSQL database named k3s. Create a `values.yaml` with: ```yaml vcluster: @@ -109,14 +109,14 @@ vcluster: value: postgres://username:password@hostname:5432/k3s ``` -Create the vcluster with: +Create the vCluster with: ``` vcluster create my-vcluster -n my-vcluster -f values.yaml ``` ### MySQL Example -The following example could be used to connect the vcluster to a MySQL database using client certificate authentication. Create a `values.yaml` with: +The following example could be used to connect the vCluster to a MySQL database using client certificate authentication. Create a `values.yaml` with: ```yaml vcluster: @@ -143,7 +143,7 @@ volumes: path: client.crt ``` -Create the vcluster with: +Create the vCluster with: ``` vcluster create my-vcluster -n my-vcluster -f values.yaml ``` diff --git a/docs/pages/deploying-vclusters/supported-distros.mdx b/docs/pages/deploying-vclusters/supported-distros.mdx new file mode 100644 index 000000000..c307449f3 --- /dev/null +++ b/docs/pages/deploying-vclusters/supported-distros.mdx @@ -0,0 +1,113 @@ +--- +title: Supported distributions +sidebar_label: Supported distributions +--- + +By default, vCluster will use [k3s](https://github.com/k3s-io/k3s) as the virtual Kubernetes cluster. However, it is not tied to a specific distribution and should work with all certified Kubernetes distributions. By default, we recommend to use k3s, because it has a small footprint and widely adopted, but if your use case requires a different k8s distribution, vCluster currently also supports k0s or vanilla k8s. If that is also not enough, you can also add your custom Kubernetes distribution as outlined below. + +## k3s + +[k3s](https://github.com/k3s-io/k3s) is a highly available, certified Kubernetes distribution designed for production workloads in unattended, resource-constrained, remote locations or inside IoT appliances. + +In order to use k3s as backing cluster, create a vCluster with the following command: + +``` +vcluster create my-vcluster +``` + +Start using it: +``` +kubectl get ns +... +``` + +Behind the scenes the default helm chart will be deployed, that holds specific configuration to support k3s. Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/k3s) for all available chart options. + +## k0s + +[k0s](https://github.com/k0sproject/k0s) is an all-inclusive Kubernetes distribution, which is configured with all of the features needed to build a Kubernetes cluster and packaged as a single binary for ease of use. vCluster supports k0s as backing virtual Kubernetes cluster. + +In order to use k0s as backing cluster, create a vCluster with the following command: + +``` +vcluster create my-vcluster --distro k0s +``` + +Start using it: +``` +kubectl get ns +... +``` + +Behind the scenes a different helm chart will be deployed (`vcluster-k0s`), that holds specific configuration to support k0s. Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/k0s) for all available chart options. + +## Vanilla k8s + +When choosing this option, vCluster will deploy a separate etcd cluster, kubernetes controller manager and api server alongside the vCluster hypervisor. + +In order to use vanilla k8s as backing cluster, create a vCluster with the following command: + +``` +vcluster create my-vcluster --distro k8s +``` + +Connect to the vCluster and start using it: +``` +kubectl get ns +... +``` + +Behind the scenes a different helm chart will be deployed (`vcluster-k8s`), that holds specific configuration to support vanilla k8s. Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/k8s) for all available chart options. + + +## eks + +When choosing this option, vCluster will deploy a separate etcd cluster, kubernetes controller manager and api server alongside the vCluster hypervisor based on eks images. + +In order to use eks as backing cluster, create a vCluster with the following command: + +``` +vcluster create my-vcluster --distro eks +``` + +Connect to the vCluster and start using it: +``` +kubectl get ns +... +``` + +Behind the scenes a different helm chart will be deployed (`vcluster-eks`), that holds specific configuration to support vanilla k8s. Check the [github repository](https://github.com/loft-sh/vcluster/tree/main/charts/eks) for all available chart options. + + +## Other Distributions + +vCluster has no dependencies on any specific Kubernetes distribution, so you should be able to run it with most certified Kubernetes distributions. +One requirement vCluster has, is that the distribution can be deployed without a scheduler and kubelet, meaning that vCluster just requires the api server, controller manager and data storage of the distribution. + +For single binary distributions, such as k3s or k0s, extra bundled components can usually be disabled through flags, for multi binary distributions, such as vanilla k8s, you just need to deploy the virtual control plane with api server, controller manager and usually etcd. +Most multi binary distributions work by just overriding the images of the k8s chart in a `values.yaml`, e.g.: + +```yaml +api: + image: custom-domain.com/custom-kube-apiserver:v1.21.5 +controller: + image: custom-domain.com/custom-kube-controller-manager:v1.21.5 +etcd: + image: custom-domain.com/custom-etcd:v3.4.16 +``` + +And then deploy vCluster with: + +``` +vcluster create my-vcluster -n test --distro k8s -f values.yaml +``` + +If you want to create a separate chart for the Kubernetes distribution, a good starting point is to copy one of [our distro charts](https://github.com/loft-sh/vcluster/tree/main/charts) and then modifying it to work with your distribution. +vCluster only needs the following information from the virtual Kubernetes distribution to function properly: +1. The api server central authority certificate (usually found at `/pki/ca.crt`) +2. The api server central authority key (usually found at `/pki/ca.key`) +3. An admin kube config to contact the virtual Kubernetes control plane (usually found at `/pki/admin.conf`) + +For multi binary distributions, vCluster can even create those with a pre-install hook as found in the [k8s chart](https://github.com/loft-sh/vcluster/tree/main/charts/k8s/templates). + +In general, if you need vCluster to support another Kubernetes distribution, we are always happy to help you or accept a pull request in our github repository. diff --git a/docs/pages/fragments/connect.mdx b/docs/pages/fragments/connect.mdx index a7fc25d53..13e925dd6 100644 --- a/docs/pages/fragments/connect.mdx +++ b/docs/pages/fragments/connect.mdx @@ -6,5 +6,5 @@ vcluster connect vcluster-1 --service-account viewer --cluster-role view ``` :::info -By default vcluster will try to detect the Kubernetes distribution and either use port-forwarding for remote clusters or it tries to configure the local Kubernetes environment to access the vcluster directly. For remote clusters you can also use `--expose` to create a LoadBalancer to access the vcluster directly or one of the external connection flows. +By default vCluster will try to detect the Kubernetes distribution and either use port-forwarding for remote clusters or it tries to configure the local Kubernetes environment to access the vCluster directly. For remote clusters you can also use `--expose` to create a LoadBalancer to access the vCluster directly or one of the external connection flows. ::: \ No newline at end of file diff --git a/docs/pages/fragments/delete-vcluster.mdx b/docs/pages/fragments/delete-vcluster.mdx index e57ed0dd3..d585b1d57 100644 --- a/docs/pages/fragments/delete-vcluster.mdx +++ b/docs/pages/fragments/delete-vcluster.mdx @@ -15,7 +15,7 @@ import TabItem from '@theme/TabItem' # switch the kube context back vcluster disconnect -# OR: switch context back and delete vcluster +# OR: switch context back and delete vCluster vcluster delete my-vcluster ``` @@ -34,7 +34,7 @@ The easiest option to delete a virtual cluster using `kubectl` is to delete the kubectl delete namespace vcluster-my-vcluster ``` -In case you have multiple vclusters or any other resources in this namespace, you can also just delete the vcluster-related resources: +In case you have multiple vClusters or any other resources in this namespace, you can also just delete the vCluster-related resources: ```bash kubectl delete -n vcluster-my-vcluster serviceaccount vcluster-1 kubectl delete -n vcluster-my-vcluster role vcluster-1 diff --git a/docs/pages/fragments/deploy-to-openshift.mdx b/docs/pages/fragments/deploy-to-openshift.mdx index d94a00ddd..39e8adc56 100644 --- a/docs/pages/fragments/deploy-to-openshift.mdx +++ b/docs/pages/fragments/deploy-to-openshift.mdx @@ -18,7 +18,7 @@ openshift: enable: true ``` -Then create the vcluster with the following command: +Then create the vCluster with the following command: ``` vcluster create my-vcluster -f values.yaml ``` diff --git a/docs/pages/fragments/deploy-vcluster.mdx b/docs/pages/fragments/deploy-vcluster.mdx index b42c22324..e52595b3a 100644 --- a/docs/pages/fragments/deploy-vcluster.mdx +++ b/docs/pages/fragments/deploy-vcluster.mdx @@ -12,19 +12,19 @@ import TabItem from '@theme/TabItem' ```bash -# Create a new vcluster in namespace vcluster-my-vcluster +# Create a new vCluster in namespace vCluster-my-vcluster vcluster create my-vcluster -# OR: Use --expose to create a vcluster in a remote cluster with an externally accessible LoadBalancer +# OR: Use --expose to create a vCluster in a remote cluster with an externally accessible LoadBalancer vcluster create my-vcluster --expose -# OR: Use -f to use an additional helm values.yaml with extra chart options to deploy vcluster +# OR: Use -f to use an additional helm values.yaml with extra chart options to deploy vCluster vcluster create my-vcluster -f values.yaml # OR: Use --distro to specify either k0s or vanilla k8s as backing virtual cluster vcluster create my-vcluster --distro k8s -# OR: Use --isolate to create an isolated environment for the vcluster workloads +# OR: Use --isolate to create an isolated environment for the vCluster workloads vcluster create my-vcluster --isolate ``` diff --git a/docs/pages/fragments/high-availability-k3s.mdx b/docs/pages/fragments/high-availability-k3s.mdx index 62958e4e0..126e11c20 100644 --- a/docs/pages/fragments/high-availability-k3s.mdx +++ b/docs/pages/fragments/high-availability-k3s.mdx @@ -1,8 +1,8 @@ ### Enabling High Availability -In order to run vcluster with k3s as Kubernetes distribution in high availability mode, the following steps are required: +In order to run vCluster with k3s as Kubernetes distribution in high availability mode, the following steps are required: -* create and use an [external datastore](../operator/external-datastore.mdx) (as opposed to the embedded SQLite datastore used in single-server setups) +* create and use an [external datastore](../deploying-vclusters/persistence.mdx) (as opposed to the embedded SQLite datastore used in single-server setups) * run two or more k3s pods that will serve the Kubernetes API and run other control plane services First create a `values.yaml` in the following form and make sure to change the connection string in `K3S_DATASTORE_ENDPOINT`: @@ -29,13 +29,13 @@ coredns: replicas: 2 ``` -Then create the vcluster with the following command: +Then create the vCluster with the following command: ``` vcluster create ... --connect=false -f values.yaml ``` -Check that vcluster including the control plane is running correctly: +Check that vCluster including the control plane is running correctly: ``` kubectl get pods -n vcluster @@ -46,7 +46,7 @@ vcluster-54fb5dd76-92szq 2/2 Running 0 vcluster-54fb5dd76-ntbrh 2/2 Running 0 3m1s ``` -Now connect to the vcluster: +Now connect to the vCluster: ``` vcluster connect vcluster -n vcluster diff --git a/docs/pages/fragments/high-availability-k8s.mdx b/docs/pages/fragments/high-availability-k8s.mdx index e55c7ff35..62af0c8c6 100644 --- a/docs/pages/fragments/high-availability-k8s.mdx +++ b/docs/pages/fragments/high-availability-k8s.mdx @@ -1,6 +1,6 @@ ### Enabling High Availability -In order to run vcluster in high availability mode, create a `values.yaml` in the following form: +In order to run vCluster in high availability mode, create a `values.yaml` in the following form: ``` # Enable HA mode @@ -27,12 +27,12 @@ coredns: replicas: 3 ``` -Then create the vcluster with the following command: +Then create the vCluster with the following command: ``` vcluster create ... --connect=false --distro k8s -f values.yaml ``` -Check that vcluster including the control plane are running correctly: +Check that vCluster including the control plane are running correctly: ``` kubectl get po -n vcluster NAME READY STATUS RESTARTS AGE @@ -53,7 +53,7 @@ vcluster-etcd-1 1/1 Running 0 vcluster-etcd-2 1/1 Running 0 29m ``` -Now connect to the vcluster: +Now connect to the vCluster: ``` vcluster connect vcluster-1 -n host-namespace-1 @@ -64,10 +64,10 @@ kubectl get ns ``` ### Enable HA in rootless mode -Rootless mode means running vcluster without root user privileges in container, making host k8s cluster more secure. -You can find more about rootless mode [here](../operator/restricted-hosts.mdx). +Rootless mode means running vCluster without root user privileges in container, making host k8s cluster more secure. +You can find more about rootless mode [here](../security/rootless-mode.mdx). -Below is HA configuration for running rootless vcluster with vanilla Kubernetes distribution. +Below is HA configuration for running rootless vCluster with vanilla Kubernetes distribution. ``` # Enable HA mode enableHA: true diff --git a/docs/pages/fragments/install/cli.mdx b/docs/pages/fragments/install/cli.mdx index ee97a55ec..6dc3a1c3e 100644 --- a/docs/pages/fragments/install/cli.mdx +++ b/docs/pages/fragments/install/cli.mdx @@ -1,11 +1,11 @@ -import Tabs from '@theme/Tabs' -import TabItem from '@theme/TabItem' - +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + + +```bash +brew install loft-sh/tap/vcluster +``` + + ```bash @@ -63,7 +70,7 @@ You may need to reboot your computer to use the CLI due to changes to the PATH v ::: :::info Check Environment Variable $PATH -Line 4 of this install script adds the install directory `%APPDATA%\vcluster` to the `$PATH` environment variable. This is only effective for the current Powershell session, i.e. when opening a new terminal window, `vcluster` may not be found. +Line 4 of this install script adds the install directory `%APPDATA%\vcluster` to the `$PATH`environment variable. This is only effective for the current Powershell session, i.e. when opening a new terminal window,`vcluster` may not be found. **Make sure to add the folder `%APPDATA%\vcluster` to the `PATH` environment variable after installing vcluster CLI via Powershell. Afterward, a reboot might be necessary.** ::: @@ -73,7 +80,8 @@ Line 4 of this install script adds the install directory `%APPDATA%\vcluster` to Alternatively, you can download the binary for your platform from the [GitHub Releases](https://github.com/loft-sh/vcluster/releases) page and add this binary to your PATH. -To confirm that vcluster CLI is successfully installed, test via: +To confirm that vCluster CLI is successfully installed, test via: + ```bash vcluster --version ``` diff --git a/docs/pages/fragments/non-root-vcluster.mdx b/docs/pages/fragments/non-root-vcluster.mdx index 970f0a807..27bf414e0 100644 --- a/docs/pages/fragments/non-root-vcluster.mdx +++ b/docs/pages/fragments/non-root-vcluster.mdx @@ -22,7 +22,7 @@ securityContext: ``` -Then create the vcluster with the following command: +Then create the vCluster with the following command: ``` vcluster create my-vcluster -f values.yaml ``` diff --git a/docs/pages/fragments/telemetry-opt-out.mdx b/docs/pages/fragments/telemetry-opt-out.mdx index 823dd97b4..4f874f07b 100644 --- a/docs/pages/fragments/telemetry-opt-out.mdx +++ b/docs/pages/fragments/telemetry-opt-out.mdx @@ -18,7 +18,7 @@ vcluster telemetry disable ``` Your decision will be saved in `$HOME/.vcluster/config.json`. -All subsequently created or upgraded vcluster instances managed by the vcluster CLI will have the telemetry disabled. +All subsequently created or upgraded vCluster instances managed by the vCluster CLI will have the telemetry disabled. To re-enable telemetry, you can execute the following command: diff --git a/docs/pages/getting-started/cleanup.mdx b/docs/pages/getting-started/cleanup.mdx index 2d19549ae..9f1443e34 100644 --- a/docs/pages/getting-started/cleanup.mdx +++ b/docs/pages/getting-started/cleanup.mdx @@ -1,5 +1,5 @@ --- -title: Delete vclusters +title: Delete vClusters sidebar_label: 4. Cleanup --- @@ -7,6 +7,6 @@ import DeleteFragment from '../fragments/delete-vcluster.mdx' -:::caution Resources inside vclusters -Deleting a vcluster will also delete all objects within and all state related to the vcluster. +:::caution Resources inside vClusters +Deleting a vCluster will also delete all objects within and all state related to the vCluster. ::: diff --git a/docs/pages/getting-started/connect.mdx b/docs/pages/getting-started/connect.mdx index d024ec9f6..022d05c01 100644 --- a/docs/pages/getting-started/connect.mdx +++ b/docs/pages/getting-started/connect.mdx @@ -1,37 +1,37 @@ --- -title: Connect to and use vcluster -sidebar_label: 3. Use vcluster +title: Connect to and use vCluster +sidebar_label: 3. Use vCluster --- -Now that we deployed a vcluster, let's connect to it, run a couple of `kubectl` commands inside of it and then understand what happens behind the scenes inside our vcluster's host namespace that is part of the underlying host cluster. +Now that we deployed a vCluster, let's connect to it, run a couple of `kubectl` commands inside of it and then understand what happens behind the scenes inside our vCluster's host namespace that is part of the underlying host cluster. -## Connection to the vcluster +## Connection to the vCluster -By default, vcluster CLI will connect to the virtual cluster either directly (on local Kubernetes distributions) or via port-forwarding for remote clusters. +By default, vCluster CLI will connect to the virtual cluster either directly (on local Kubernetes distributions) or via port-forwarding for remote clusters. -If you want to use vcluster without port-forwarding, you can take a look at [other supported exposing methods](../operator/external-access.mdx). +If you want to use vCluster without port-forwarding, you can take a look at [other supported exposing methods](../using-vclusters/access.mdx). ## Run kubectl commands -A virtual cluster behaves the same way as a regular Kubernetes cluster. That means you can run any `kubectl` command and since you are admin of this vcluster, you can even run commands like these: +A virtual cluster behaves the same way as a regular Kubernetes cluster. That means you can run any `kubectl` command and since you are admin of this vCluster, you can even run commands like these: ```bash kubectl get namespace kubectl get pods -n kube-system ``` -Let's create a namespace and a demo nginx deployment to understand how vclusters work: +Let's create a namespace and a demo nginx deployment to understand how vClusters work: ```bash kubectl create namespace demo-nginx kubectl create deployment nginx-deployment -n demo-nginx --image=nginx ``` -You can check that this demo deployment will create pods inside the vcluster: +You can check that this demo deployment will create pods inside the vCluster: ```bash kubectl get pods -n demo-nginx ``` ## What happens in the host cluster? -The first thing to understand is that **most** resources inside your vcluster will only exist in your vcluster and **not** make it to the underlying host cluster / host namespace. +The first thing to understand is that **most** resources inside your vCluster will only exist in your vCluster and **not** make it to the underlying host cluster / host namespace. ### 1. Use Host Cluster Kube-Context Let's verify this and switch our kube-context back to the host cluster: @@ -53,7 +53,7 @@ kube-node-lease Active 11d kube-public Active 11d kube-system Active 11d ``` -You will notice that there is **no namespace `demo-nginx`** because this namespace only exists inside the vcluster. Everything that belongs to the vcluster will always remain inside the vcluster's host namespace `vcluster-my-vcluster`. +You will notice that there is **no namespace `demo-nginx`** because this namespace only exists inside the vCluster. Everything that belongs to the vCluster will always remain inside the vCluster's host namespace `vcluster-my-vcluster`. ### 3. Check Deployments So, let's check to see if our deployment `nginx-deployment` has made it to the underlying host cluster: @@ -61,7 +61,7 @@ So, let's check to see if our deployment `nginx-deployment` has made it to the u kubectl get deployments -n vcluster-my-vcluster ``` ```bash -No resources found in vcluster-my-vcluster namespace. +No resources found in vCluster-my-vcluster namespace. ``` You will see that there is **no deployment `nginx-deployment`** because it also just lives inside the virtual cluster. @@ -81,10 +81,10 @@ my-vcluster-0 2/2 Running And there it is! The pod that has been scheduled for our `nginx-deployment` has actually made it to the underlying host cluster. -The reason for this is that vclusters do **not** have separate nodes\*. Instead, they have a **syncer** which synchronizes resources from the vcluster to the underlying host namespace to actually get the pods of the vcluster running on the host cluster's nodes and the containers started inside the underlying host namespace. +The reason for this is that vClusters do **not** have separate nodes\*. Instead, they have a **syncer** which synchronizes resources from the vCluster to the underlying host namespace to actually get the pods of the vCluster running on the host cluster's nodes and the containers started inside the underlying host namespace. :::info Renaming -As you can see above in line 3, the names of pods get rewritten during the sync process since we are mapping pods from X namespaces inside the vcluster into one single host namespace in the underlying host cluster. +As you can see above in line 3, the names of pods get rewritten during the sync process since we are mapping pods from X namespaces inside the vCluster into one single host namespace in the underlying host cluster. ::: ## Benefits of Virtual Clusters @@ -94,24 +94,24 @@ Virtual clusters provide immense benefits for large-scale Kubernetes deployments - Taint and label nodes without any influence on the host cluster. - Reuse and share services across multiple virtual clusters with ease. - **Cost Savings:** - - You can create lightweight vclusters that share the underlying host cluster instead of creating separate "real" clusters. - - vclusters are just deployments, so they can be easily auto-scaled, purged, snapshotted and moved. + - You can create lightweight vClusters that share the underlying host cluster instead of creating separate "real" clusters. + - vClusters are just deployments, so they can be easily auto-scaled, purged, snapshotted and moved. - **Low Overhead:** - - vclusters are super lightweight and only reside in a single namespace. - - vclusters run with k3s, a super low-footprint k8s distribution, but they can also run with "real" k8s. - - The control plane of a vcluster runs inside a single pod (+1 CoreDNS pod for vcluster-internal DNS capabilities). + - vClusters are super lightweight and only reside in a single namespace. + - vClusters run with k3s, a super low-footprint k8s distribution, but they can also run with "real" k8s. + - The control plane of a vCluster runs inside a single pod (+1 CoreDNS pod for vCluster-internal DNS capabilities). - **No Network Degradation:** - - Since the pods and services inside a vcluster are actually being synchronized down to the host cluster\*, they are effectively using the underlying cluster's pod and service networking and are therefore not a bit slower than any other pods in the underlying host cluster. + - Since the pods and services inside a vCluster are actually being synchronized down to the host cluster\*, they are effectively using the underlying cluster's pod and service networking and are therefore not a bit slower than any other pods in the underlying host cluster. - **API Server Compatibility:** - - vclusters run with the k3s API server which is certified k8s distro which ensures 100% Kubernetes API server compliance. - - vcluster have their own API server, controller-manager and a separate, isolated data store (sqlite for easiest option but this is configurable, you can also deploy a full-blown etcd if needed). + - vClusters run with the k3s API server which is certified k8s distro which ensures 100% Kubernetes API server compliance. + - vCluster have their own API server, controller-manager and a separate, isolated data store (sqlite for easiest option but this is configurable, you can also deploy a full-blown etcd if needed). - **Security:** - - vcluster users need much fewer permissions in the underlying host cluster / host namespace. - - vcluster users can manage their own CRDs independently and can even mess with RBAC inside their own vclusters. - - vclusters provide an extra layer of isolation because each vcluster has its own API server and control plane (much fewer requests to the underlying cluster that need to be secured\*). + - vCluster users need much fewer permissions in the underlying host cluster / host namespace. + - vCluster users can manage their own CRDs independently and can even mess with RBAC inside their own vClusters. + - vClusters provide an extra layer of isolation because each vCluster has its own API server and control plane (much fewer requests to the underlying cluster that need to be secured\*). - **Scalability:** - Less pressure / fewer requests on the k8s API server in large-scale cluster\* - - Higher scalability of cluster via cluster sharding / API server sharding into smaller vclusters + - Higher scalability of cluster via cluster sharding / API server sharding into smaller vClusters - No need for cluster admins to worry about conflicting CRDs or CRD versions with growing number of users and deployments -\* Only very few resources and API server requests actually reach the underlying Kubernetes API server. Only workload-related resources (e.g. Pod) and networking-related resources (e.g. Service) need to be synchronized down to the host cluster since the vcluster does **not** have any nodes or network itself. +\* Only very few resources and API server requests actually reach the underlying Kubernetes API server. Only workload-related resources (e.g. Pod) and networking-related resources (e.g. Service) need to be synchronized down to the host cluster since the vCluster does **not** have any nodes or network itself. diff --git a/docs/pages/getting-started/deployment.mdx b/docs/pages/getting-started/deployment.mdx index 6352a39b6..5726e92e8 100644 --- a/docs/pages/getting-started/deployment.mdx +++ b/docs/pages/getting-started/deployment.mdx @@ -1,6 +1,6 @@ --- -title: Deploy vcluster -sidebar_label: 2. Deploy vclusters +title: Deploy vCluster +sidebar_label: 2. Deploy vClusters --- import DeploySegment from '../fragments/deploy-vcluster.mdx' @@ -14,17 +14,17 @@ The host namespace will be generated if it does not yet exist. No matter which n ::: :::caution Air-gapped Clusters -If you want to deploy vclusters in an air-gapped environment, you can set the following option in the `values.yaml` used to deploy vcluster: +If you want to deploy vClusters in an air-gapped environment, you can set the following option in the `values.yaml` used to deploy vCluster: ``` defaultImageRegistry: my-private-registry:5000/vcluster/ ``` -This will tell vcluster to prepend the above image registry to all images used by vcluster, such as syncer, k3s, coredns etc. So for example `rancher/k3s:v1.22.2-k3s1` will become `my-private-registry:5000/vcluster/rancher/k3s:v1.22.2-k3s1` +This will tell vCluster to prepend the above image registry to all images used by vCluster, such as syncer, k3s, coredns etc. So for example `rancher/k3s:v1.22.2-k3s1` will become `my-private-registry:5000/vcluster/rancher/k3s:v1.22.2-k3s1` -You can find a list of all needed images by vcluster in the file `vcluster-images.txt` at the [releases page](https://github.com/loft-sh/vcluster/releases), as well as two scripts (download-images.sh & push-images.sh) to pull and push those to your private registry. +You can find a list of all needed images by vCluster in the file `vcluster-images.txt` at the [releases page](https://github.com/loft-sh/vcluster/releases), as well as two scripts (download-images.sh & push-images.sh) to pull and push those to your private registry. ::: ## Admin Permissions Required? -One of the biggest benefits of vcluster compared to other virtual cluster approaches is that it does not require any special permissions. Even if you are not cluster-admin and only have access to deploy applications to one specific namespace, you will very likely be able to spin up a virtual cluster. +One of the biggest benefits of vCluster compared to other virtual cluster approaches is that it does not require any special permissions. Even if you are not cluster-admin and only have access to deploy applications to one specific namespace, you will very likely be able to spin up a virtual cluster. Check out the `kubectl` tab above to see what `vcluster create` is actually deploying to the host-namespace. diff --git a/docs/pages/getting-started/setup.mdx b/docs/pages/getting-started/setup.mdx index 3a44235ce..91a2155c4 100644 --- a/docs/pages/getting-started/setup.mdx +++ b/docs/pages/getting-started/setup.mdx @@ -1,12 +1,12 @@ --- -title: Install vcluster CLI +title: Install vCluster CLI sidebar_label: 1. Download CLI --- import InstallCLIFragment from '../fragments/install/cli.mdx' :::note About This Guide -**Goal**: Install vcluster CLI, deploy virtual clusters and learn how they work +**Goal**: Install vCluster CLI, deploy virtual clusters and learn how they work **Estimated time**: 10 minutes **Requirements:** - `kubectl` (check via `kubectl version`) @@ -15,8 +15,8 @@ import InstallCLIFragment from '../fragments/install/cli.mdx' ::: -## Download vcluster CLI -Use one of the following commands to download the vcluster CLI binary from GitHub: +## Download vCluster CLI +Use one of the following commands to download the vCluster CLI binary from GitHub: diff --git a/docs/pages/help&tutorials/bootstrapping.mdx b/docs/pages/help&tutorials/bootstrapping.mdx new file mode 100644 index 000000000..27a6a8b57 --- /dev/null +++ b/docs/pages/help&tutorials/bootstrapping.mdx @@ -0,0 +1,166 @@ +--- +title: Bootstrapping vClusters +sidebar_label: Bootstrapping vClusters +--- + +In this hands-on tutorial, we’ll look at how to configure newly provisioned virtual clusters with init manifests (Kubernetes YAML) and Helm charts that are applied automatically after the virtual cluster is created. This feature lets you configure your virtual clusters fully or install other tools that will configure them, depending on your needs. + +If you’re new to vcluster, you may want to go through our introductory [hands-on tutorial](https://loft.sh/blog/intro-to-vcluster-tutorial/?utm_medium=reader&utm_source=other&utm_campaign=blog_bootstrapping-virtual-kubernetes-clusters-with-vcluster) first. + +# Prerequisites +The only prerequisite is a Kubernetes cluster that you can run the virtual cluster in. Any Kubernetes cluster, local or remote, should work. + +# Install the vcluster CLI +If you use a Mac with Homebrew, you can install the CLI with this command: + +``` +brew install vcluster +``` +For other platforms, see [the installation docs](../getting-started/setup.mdx). The CLI is supported on macOS, Linux, and Windows. + +# Apply a manifest on initialization +The manifests we’re referring to are any valid Kubernetes YAML that you could apply with `kubectl apply -f`. Being able to apply Kubernetes YAML immediately after the cluster is created allows us to automate just about anything we’d want to do when creating or configuring a cluster. + +Let’s look at an example. Create a file called values.yaml with the following contents: +``` +init: + manifests: |- + apiVersion: v1 + kind: Namespace + metadata: + name: nginx + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: nginx-deployment + namespace: nginx + spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.25.1 + ports: + - containerPort: 80 +``` + +Let’s break down what’s happening here. + +The first two lines specify that this is the `init.manifest` section of the vcluster YAML file and that what follows are the manifests to be applied. + +The next four lines create a namespace in the virtual cluster called nginx. + +Then there is a separator, which needs to be present between each manifest. + +The rest of the file creates a deployment to launch two NGINX pods in the nginx namespace that was just created. It will launch two pods in the virtual cluster with a label of nginx. + +Next, let’s create a virtual cluster that will automatically apply those manifests. The manifests will be applied in the order they appear in the file. + +Make sure you are pointed at the kube context for the host cluster you use. Then create the virtual cluster. + +``` +vcluster create init-tutorial -f values.yaml +``` +This will create a virtual cluster with the name init-tutorial, and a namespace to run it in. + +You should see output like: +``` +info Creating namespace vcluster-init-tutorial +info failed to find IPv6 service CIDR: couldn't find host cluster Service CIDR ("Service "test-service-9wpzn" is invalid: spec.clusterIPs[0]: Invalid value: []string{"2001:DB8::1"}: IPv6 is not configured on this cluster") +info Detected local kubernetes cluster docker-desktop. Will deploy vcluster with a NodePort & sync real nodes +info Create vcluster init-tutorial... +info execute command: helm upgrade init-tutorial /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/vcluster-0.15.2.tgz-86186483 --kubeconfig /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/3759283775 --namespace vcluster-init-tutorial --install --repository-config='' --values /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/1562320995 --values values.yaml +done √ Successfully created virtual cluster init-tutorial in namespace vcluster-init-tutorial +info Waiting for vcluster to come up... +done √ Switched active kube context to vcluster_init-tutorial_vcluster-init-tutorial_docker-desktop +- Use `vcluster disconnect` to return to your previous kube context +- Use `kubectl get namespaces` to access the vcluster +``` +The exact output will depend a bit on how your host cluster is set up. + +If your host cluster is local (Docker Desktop, Minikube, etc.), vcluster will configure it to use a NodePort and connect to it automaticaly. + +For remote clusters, vcluster will connect using port forwarding. + +Now that we’re connected to the virtual cluster, we can run kubectl commands against it like any other cluster. Let’s look at the pods that are running. +``` +kubectl get pods -n nginx +``` +You should see output like: +``` +NAMESPACE NAME READY STATUS RESTARTS AGE +nginx nginx-deployment-775b6549b5-nr8jh 1/1 Running 0 68s +nginx nginx-deployment-775b6549b5-vcx7w 1/1 Running 0 68s +``` +In this example the two NGINX pods are running in the nginx namespace. The pods and the namespace were created automatically as soon as the vcluster was created. + +Now let’s disconnect from the virtual cluster and delete it to clean up. + +``` +vcluster disconnect +``` +Disconnecting will switch you back to the context of the host cluster. Then run: +``` +vcluster delete init-tutorial +``` + +# Apply a Helm chart on initialization +Now let’s create a NGIX namespace and deployment again, but this time instead of creating the deployment manually, we’ll install NGINX with a public Helm chart. + +First, edit the values.yaml. Remove the previous contents and set them to: +``` +init: + helm: + - chart: + name: nginx + repo: https://charts.bitnami.com/bitnami + # optional field + values: |- + replicaCount: 2 + release: + name: nginx + namespace: nginx +``` +As you can see, we specify the chart name and repo first. Then we can optionally include Helm values for the chart inline (we’ve set the replica count to 2). And last, the release section contains the name (the pods will use this name) and the namespace that’s being used. + +Create the vcluster: + +``` +vcluster create init-tutorial-2 -f values.yaml +``` +Once again you should be automatically connected to the vcluster. And we can view the NGINX pods with: +``` +kubectl get pods -n nginx +``` +Your output should look like: + +``` +NAME READY STATUS RESTARTS AGE +nginx-769c898b4f-g922n 1/1 Running 0 108s +nginx-769c898b4f-vwh6w 1/1 Running 0 108s +``` +And that’s the process for automatically applying a Helm chart when the vcluster is initialized. + +We can clean up once again with: +``` +vcluster disconnect +vcluster delete init-tutorial-2 +``` +You can also apply private Helm charts when vclusters are initialized from a private repo or your local filesystem. For more info, see the docs. + +# Troubleshooting +A quick note on troubleshooting. When a virtual cluster is initialized with manifests or Helm charts, you won’t see any errors that result in the output from the `vcluster create` command. You can see more information in the host cluster’s logs, though. + +First, if you are connected to the virtual cluster, disconnect with the vcluster disconnect command. Then run this command against the host cluster: +``` +kubectl logs -n vcluster-NAME -l app=vcluster,release=NAME -c syncer +``` +Substitute the name you gave your virtual cluster in the vcluster connect command for NAME in both places. By default, vcluster prepends the string vcluster- to the name of the virtual cluster’s namespace. \ No newline at end of file diff --git a/docs/pages/help&tutorials/helm-provisioning.mdx b/docs/pages/help&tutorials/helm-provisioning.mdx new file mode 100644 index 000000000..8d953cb6b --- /dev/null +++ b/docs/pages/help&tutorials/helm-provisioning.mdx @@ -0,0 +1,162 @@ +--- +title: Provision with Helm +sidebar_label: Provision with Helm +--- + +The vCluster CLI is excellent for interactively provisioning virtual clusters, but if you want to create virtual clusters using automation, vCluster’s Helm install is probably a better option for you. + +In this hands-on tutorial, we’ll look at how to provision virtual clusters using Helm and how to change vCluster’s behavior using a Helm values file. +If you’re new to vCluster, we recommend going through [the introductory tutorial first](https://loft.sh/blog/intro-to-vcluster-tutorial/?utm_medium=reader&utm_source=other&utm_campaign=blog_creating-virtual-kubernetes-clusters-with-vcluster-and-helm). + +# Prerequsites +For this tutorial, you will need: + +- A Kubernetes cluster to use as the host cluster and a kube context that points at it. vCluster works both with local and remote Kubernetes clusters. +- The Helm Command Line Interface (CLI). You can find installation instructions [here](https://helm.sh/docs/intro/install/?_gl=1*158hplf*_ga*MjEzODEwMTQ1My4xNjY3MjA5OTgx*_ga_4RQQZ3WGE9*MTY5NjMyMTgwMy4xMzcuMS4xNjk2MzI0MDAzLjE2LjAuMA..*_gcl_au*MTYzMDM2NDc5NC4xNjkwMzczNjIyLjIwMTE4NTM2MTIuMTY5NTA0NjA0Ny4xNjk1MDQ2MDQ2/). +- The vCluster CLI. If you use a Mac with Homebrew, you can install it with this command: brew install vcluster. For other platforms, see [the installation docs](../getting-started/setup.mdx). +# 1. Provision a vCluster with Helm +If you have previously used the vCluster CLI to create a virtual cluster, you may have noticed that the vcluster create command uses Helm. Here’s some example output: + +``` +info Creating namespace vcluster-my-vcluster +info failed to find IPv6 service CIDR: couldn't find host cluster Service CIDR ("Service "test-service-kltbh" is invalid: spec.clusterIPs[0]: Invalid value: []string{"2001:DB8::1"}: IPv6 is not configured on this cluster") +info Detected local kubernetes cluster docker-desktop. Will deploy vcluster with a NodePort & sync real nodes +info Create vcluster my-vcluster... +info execute command: helm upgrade my-vcluster /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/vcluster-0.15.5.tgz-3388530343 --kubeconfig /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/1101813885 --namespace vcluster-my-vcluster --install --repository-config='' --values /var/folders/gy/d3_c4t1x731_hl8qtrfkhr_h0000gn/T/2950643978 +done √ Successfully created virtual cluster my-vcluster in namespace vcluster-my-vcluster +info Waiting for vcluster to come up... +done √ Switched active kube context to vcluster_my-vcluster_vcluster-my-vcluster_docker-desktop +- Use `vcluster disconnect` to return to your previous kube context +- Use `kubectl get namespaces` to access the vcluster +``` +Notice the line that begins with info execute command: helm upgrade. +But instead of using the vCluster CLI, in this tutorial, we’ll run the Helm command directly. + +First, let’s create a namespace for the virtual cluster. The default pattern is to name it vcluster- followed by the name of the virtual cluster. + +``` +kubectl create namespace vcluster-my-vcluster +``` +Then run the helm upgrade command: + +``` +helm upgrade --install my-vcluster vcluster \ + --repo https://charts.loft.sh \ + --namespace vcluster-my-vcluster \ + --repository-config='' +``` +You should see output like this: + +``` +Release "my-vcluster" does not exist. Installing it now. +NAME: my-vcluster +LAST DEPLOYED: Tue Aug 22 13:46:13 2023 +NAMESPACE: vcluster-my-vcluster +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +Thank you for installing vcluster. + +Your vcluster is named my-vcluster in namespace vcluster-my-vcluster. + +To connect to the vcluster, use vcluster CLI (https://www.vcluster.com/docs/getting-started/setup): + $ vcluster connect my-vcluster -n vcluster-my-vcluster + $ vcluster connect my-vcluster -n vcluster-my-vcluster -- kubectl get ns +``` + +You can verify the virtual cluster was created correctly using the vcluster list command. + +``` +vcluster list +``` +Here’s my example output: + +``` + NAME NAMESPACE STATUS CONNECTED CREATED AGE CONTEXT + my-vcluster vcluster-my-vcluster Running 2023-08-22 13:46:13 -0700 PDT 4m14s docker-desktop +``` +To interact with the virtual cluster, we use the connect command. + +``` +vcluster connect my-vcluster +``` +The output will look like: + +``` +done √ Switched active kube context to vcluster_my-vcluster_vcluster-my-vcluster_docker-desktop +warn Since you are using port-forwarding to connect, you will need to leave this terminal open +- Use CTRL+C to return to your previous kube context +- Use `kubectl get namespaces` in another terminal to access the vcluster +Forwarding from 127.0.0.1:12168 -> 8443 +Forwarding from [::1]:12168 -> 8443 +``` +At this point, the virtual cluster is up and running, and we can interact with it just like any other Kubernetes cluster. + +Hit CTRL+C as the output instructed to disconnect the port forwarding. Then delete the virtual cluster using Helm. + +``` +helm delete my-vcluster -n vcluster-my-vcluster --repository-config='' +``` +You should see: + +``` +release "my-vcluster" uninstalled +``` + +# 2. Create a vCluster using a Helm values file +One of the big differences between creating a virtual cluster with the vCluster CLI and creating one with Helm is that with the CLI we can pass command line flags to control vCluster’s behavior. That’s not the case with Helm. To make those modifications when we install with Helm we use a values file. + +Create a file called vcluster.yaml in your current directory with these contents: + +``` +vcluster: + image: rancher/k3s:v1.27.4-k3s1 +``` +By placing this entry in the file, we’ll pin the version of Kubernetes that runs in the virtual cluster to this specific version (1.27.4). + +Now let’s create a vCluster with that file. + +``` +helm upgrade --install my-vcluster vcluster \ + --values vcluster.yaml \ + --repo https://charts.loft.sh \ + --namespace vcluster-my-vcluster \ + --repository-config='' +``` +Then run vcluster list. Your output should look something like this: + +``` +NAME NAMESPACE STATUS CONNECTED CREATED AGE CONTEXT + my-vcluster vcluster-my-vcluster Running 2023-08-23 10:44:54 -0700 PDT 29s docker-desktop +``` +Next, let’s check that we got the version of Kubernetes that we wanted. We can run the vcluster connect command and pass in a kubectl command directly instead of port forwarding. + +``` +vcluster connect my-vcluster -n vcluster-my-vcluster -- kubectl version +``` +You should get output like: +``` +Client Version: v1.28.0 +Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3 +Server Version: v1.27.4+k3s1 +``` +That’s the server version we specified in the values file, 1.27.4. + +Run these commands to clean up: + +``` +helm delete my-vcluster -n vcluster-my-vcluster --repository-config='' +kubectl delete namespace vcluster-my-vcluster +``` + +# 3. Examine the default Helm values on GitHub +We can see the complete list of values that can be modified and their default settings by looking in the [GitHub repo for vCluster](https://github.com/loft-sh/vcluster) . Inside the vCluster repo is a subdirectory called charts, and inside of that is a subdirectory for each vCluster distro. + +If you’re not familiar with what a vCluster distro is, the distro is the distribution of Kubernetes that runs inside of the virtual cluster. The default is k3s, and the other supported distros are k0s, eks, and k8s (a larger Kubernetes install that uses etcd). + +We’ve used the default distro for this tutorial, k3s. To view its default values, we’d want to examine the file vcluster/charts/k3s/values.yaml. +You can view the [entire file here](https://github.com/loft-sh/vcluster/blob/main/charts/k3s/values.yaml?_gl=1*8boa70*_ga*MjEzODEwMTQ1My4xNjY3MjA5OTgx*_ga_4RQQZ3WGE9*MTY5NjMyMTgwMy4xMzcuMS4xNjk2MzIzMzkwLjYwLjAuMA..*_gcl_au*MTYzMDM2NDc5NC4xNjkwMzczNjIyLjIwMTE4NTM2MTIuMTY5NTA0NjA0Ny4xNjk1MDQ2MDQ2/). + +You’ll see the list of all the values supported for that distro and their defaults. To override those default values, put the option and the new value in your vcluster.yaml file that you use for the Helm install, as we did in the previous step. + diff --git a/docs/pages/troubleshooting.mdx b/docs/pages/help&tutorials/troubleshooting.mdx similarity index 63% rename from docs/pages/troubleshooting.mdx rename to docs/pages/help&tutorials/troubleshooting.mdx index 95797ecec..3c3514232 100644 --- a/docs/pages/troubleshooting.mdx +++ b/docs/pages/help&tutorials/troubleshooting.mdx @@ -3,28 +3,28 @@ title: Troubleshooting sidebar_label: Troubleshooting --- -In this section you will find common problems and their solutions or workarounds. In general, it's always a good start to check the vcluster and syncer logs via kubectl: +In this section you will find common problems and their solutions or workarounds. In general, it's always a good start to check the vCluster and syncer logs via kubectl: ``` # Retrieve syncer logs kubectl logs -n test -l app=vcluster,release=test -c syncer -# Retrieve vcluster logs +# Retrieve vCluster logs kubectl logs -n test -l app=vcluster,release=test -c vcluster ``` -If you are having problems with k3s not starting or database being locked, you can also try to [use a different distribution such as k0s or k8s](./operator/other-distributions.mdx) or try to use another [storage type for k3s](./operator/external-datastore.mdx). +If you are having problems with k3s not starting or database being locked, you can also try to [use a different distribution such as k0s or k8s](../deploying-vclusters/supported-distros.mdx) or try to use another [storage type for k3s](../deploying-vclusters/persistence.mdx). -### Problem: using vcluster with an ingress causes unauthorized errors +### Problem: using vCluster with an ingress causes unauthorized errors -The problem is that SSL termination does happen at the ingress controller level and not at vcluster itself. By default, vcluster uses client cert authentication, which will be sent to the ingress controller and the ingress controller will then forward the request to vcluster, but without the client cert, which causes the error. There are possible solutions to this problem: +The problem is that SSL termination does happen at the ingress controller level and not at vCluster itself. By default, vCluster uses client cert authentication, which will be sent to the ingress controller and the ingress controller will then forward the request to vCluster, but without the client cert, which causes the error. There are possible solutions to this problem: -1. Use SSL pass through for your ingress controller as described [here](./operator/external-access.mdx). Make sure you do not have `spec.tls` defined. -2. Use service account authentication instead of client-cert and client-key described [here](./operator/accessing-vcluster.mdx#connect-via-service-accounts) +1. Use SSL pass through for your ingress controller as described [here](../using-vclusters/access.mdx). Make sure you do not have `spec.tls` defined. +2. Use service account authentication instead of client-cert and client-key described here -### Problem: installing vcluster causes Error: Chart.yaml file is missing +### Problem: installing vCluster causes Error: Chart.yaml file is missing -You have a folder or file called vcluster in the current working directory. This is a known helm problem, where helm thinks this is a chart directory. The solution is to install vcluster in a folder where no other folder or file with the name of vcluster is present. +You have a folder or file called vCluster in the current working directory. This is a known helm problem, where helm thinks this is a chart directory. The solution is to install vCluster in a folder where no other folder or file with the name of vCluster is present. ### CoreDNS Problem: [FATAL] plugin/loop: Loop (127.0.0.1:59658 -> :53) detected for zone "." @@ -40,7 +40,7 @@ syncer: - --sync=-ingresses ``` -And then either upgrading or recreating the vcluster with: +And then either upgrading or recreating the vCluster with: ``` vcluster create test -n test --upgrade -f values.yaml diff --git a/docs/pages/index.mdx b/docs/pages/index.mdx new file mode 100644 index 000000000..3915edd49 --- /dev/null +++ b/docs/pages/index.mdx @@ -0,0 +1,11 @@ +--- +title: vCluster Documentation +slug: / +hide_table_of_contents: true +--- + +import { Redirect } from "@docusaurus/router"; + +export default function Home() { + return ; +} diff --git a/docs/pages/networking/coreDNS.mdx b/docs/pages/networking/coreDNS.mdx new file mode 100644 index 000000000..d50062368 --- /dev/null +++ b/docs/pages/networking/coreDNS.mdx @@ -0,0 +1,18 @@ +--- +title: DNS +sidebar_label: DNS +--- + +## CoreDNS + +Each vCluster has its own DNS service (CoreDNS by default) which allows pods in the vCluster to get the IP addresses of services that are also running in this vCluster. +The vCluster syncer ensures that the intuitive naming logic of Kubernetes DNS names for services applies and users can connect to these DNS names which in fact map to the IP address of the synchronized services that are present in the underlying host cluster. + +However, this also means that you cannot directly access host services inside the virtual cluster via DNS as well as host pods can only access virtual cluster services by their synced name. vCluster offers a feature to map services from the virtual cluster to the host cluster and vice versa. + + +### Fallback to host DNS +If enabled, will fallback to host dns for resolving domains. This is useful if using istio or dapr in the host cluster and sidecar containers cannot connect to the central instance. Its also useful if you want to access host cluster services from within the vCluster. We can enable this feature with +```yaml +fallbackHostDns: true +``` \ No newline at end of file diff --git a/docs/pages/networking/ingress_traffic.mdx b/docs/pages/networking/ingress_traffic.mdx new file mode 100644 index 000000000..018e2d74c --- /dev/null +++ b/docs/pages/networking/ingress_traffic.mdx @@ -0,0 +1,27 @@ +--- +title: Ingress Traffic +sidebar_label: Ingress Traffic +--- + +## Ingress Controller Traffic +The vCluster has the option to enable Ingress resources synchronization. That means that you can create an ingress in a vCluster to make a service in this vCluster available via a hostname/domain. However, instead of having to run a separate ingress controller in each vCluster, the ingress resource will be synchronized to the underlying cluster (when enabled) which means that the vCluster can use a shared ingress controller that is running in the host cluster. This helps to share resources across different vClusters and is easier for users of vClusters because otherwise, they would need to install an ingress controller and manually configure DNS for each vCluster. + +:::info +Before the v0.12.0 release of vCluster, the Ingress synchronization was enabled by default. +::: + +### Enable Ingress Sync +If you want to use an ingress controller from the underlying cluster by synchronizing the Ingress resources, set the following in your `values.yaml`: +``` +sync: + ingresses: + enabled: true +``` +then create or upgrade the vCluster with: + +``` +vcluster create my-vcluster --upgrade -f values.yaml +``` + +### SSL Certificates +Because the syncer keeps typical SSL provisioning related annotations for ingresses, you may also set the cert-manager ingress annotations on an ingress in your vClusters to use the cert-manager of the underlying host cluster to automatically provision SSL certificates from Let's Encrypt. \ No newline at end of file diff --git a/docs/pages/networking/internal_traffic/host_to_vcluster.mdx b/docs/pages/networking/internal_traffic/host_to_vcluster.mdx new file mode 100644 index 000000000..9348f4ee2 --- /dev/null +++ b/docs/pages/networking/internal_traffic/host_to_vcluster.mdx @@ -0,0 +1,15 @@ +--- +title: Map Host Cluster Service to vCluster Service +sidebar_label: From Host to vCluster +--- + +For example, to map a service `my-host-service` in the namespace `my-host-namespace` to the virtual cluster service `my-virtual-service` in the virtual cluster namespace `my-virtual-namespace`, you can use the following config in your `values.yaml`: + +```yaml +mapServices: + fromHost: + - from: my-host-namespace/my-host-service + to: my-virtual-namespace/my-virtual-service +``` + +With this configuration, vCluster will manage a service called `my-virtual-service` inside the virtual cluster that points to the host service `my-host-service` in namespace `my-host-namespace`. So pods inside the vCluster will be able to access the host service via e.g. `curl http://my-virtual-service.my-virtual-namespace`. \ No newline at end of file diff --git a/docs/pages/networking/internal_traffic/vcluster_to_host.mdx b/docs/pages/networking/internal_traffic/vcluster_to_host.mdx new file mode 100644 index 000000000..c8e2f30d2 --- /dev/null +++ b/docs/pages/networking/internal_traffic/vcluster_to_host.mdx @@ -0,0 +1,16 @@ +--- +title: Map vCluster Service to Host Cluster Service +sidebar_label: From vCluster to Host +--- + +It is also possible to map a virtual cluster service to an host cluster service. This is especially useful if you want to expose an application that runs inside the virtual cluster to other workloads running in the host cluster. This makes it also easier to share services across vCluster's. +For example, to map a virtual service `my-virtual-service` in the namespace `my-virtual-namespace` to the vCluster host namespace service `my-host-service`, you can use the following config in your `values.yaml`: + +```yaml +mapServices: + fromVirtual: + - from: my-virtual-namespace/my-virtual-service + to: my-host-service +``` + +With this configuration, vCluster will manage a service called `my-host-service` inside the namespace where the vCluster workloads are synced to, which points to the virtual service `my-virtual-service` in namespace `my-virtual-namespace` inside the vCluster. So pods in the host cluster will be able to access the virtual service via e.g. `curl http://my-host-service`. \ No newline at end of file diff --git a/docs/pages/networking/network_policies.mdx b/docs/pages/networking/network_policies.mdx new file mode 100644 index 000000000..61f9b2eb6 --- /dev/null +++ b/docs/pages/networking/network_policies.mdx @@ -0,0 +1,25 @@ +--- +title: Network Policies +sidebar_label: Network Policies +--- + +Kubernetes has a [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) resource type that allows creation of the rules that govern how pods communicate with each other. + +By default, vCluster ignores these resources. However, once you enable synchronization of the Network Policies, vCluster will ensure correct policies are created in the host cluster to achieve the desired traffic behaviour. + +:::info +Network Policies in vCluster rely on the support for this feature in the host cluster. Make sure that your host cluster satisfies the [Network Policy prerequisites](https://kubernetes.io/docs/concepts/services-networking/network-policies/#prerequisites). +::: + +### Enable Network Policy Sync +To enable the synchronization of the Network Policy resources add the following to your `values.yaml`: +``` +sync: + networkpolicies: + enabled: true +``` +then create or upgrade the vCluster with: + +``` +vcluster create my-vcluster --upgrade -f values.yaml +``` \ No newline at end of file diff --git a/docs/pages/networking/networking.mdx b/docs/pages/networking/networking.mdx new file mode 100644 index 000000000..48d8db34b --- /dev/null +++ b/docs/pages/networking/networking.mdx @@ -0,0 +1,19 @@ +--- +title: Networking +sidebar_label: Overview +--- + +
+ vcluster Networking +
vcluster - Networking
+
+ +By default, resources such as `Service` and `Ingress` are synced from the virtual cluster to the host cluster in order to enable correct network functionality for the vCluster. + + +## Pod-To-Pod Traffic +Since pods are synchronized by the [syncer component](../architecture/syncer/syncer.mdx) of the vCluster, they actually run inside the host namespace of the underlying cluster. That means that these pods have regular cluster-internal IP addresses and can communicate with each other via IP-based networking. + +## Pod-To-Service Traffic +By default, the vCluster also synchronizes Services (while stripping away unnecessary information from the resource) to allow pods to communicate with services. However, instead of using the DNS names of the services inside the host cluster, the vCluster has its own DNS service which allows the vCluster pods to use much more intuitive DNS mappings just as in a regular cluster. + diff --git a/docs/pages/o11y/logging/central_hpm.mdx b/docs/pages/o11y/logging/central_hpm.mdx new file mode 100644 index 000000000..5cffff8d7 --- /dev/null +++ b/docs/pages/o11y/logging/central_hpm.mdx @@ -0,0 +1,35 @@ +--- +title: Centralized Hostpath Mapper +sidebar_label: Centralized Hostpath Mapper +--- + +This feature is an extention to the existing [hostpath mapper](./hpm.mdx) component of vCluster. +Currently when enabled, hostpath mapper can support the following usecases: +1. [Enabling container based logging used by tools like fluentd, logstash etc.](./elk_stack.mdx) inside vCluster +2. [Enabling pod based logging used by loki](./grafana_loki.mdx) inside vCluster +3. [Velero restic backups] inside vCluster +4. Support for kubevirt workloads. + +In the normal deployment of hostpath mapper which can be enabled by: +```yaml +hostpathMapper: + enabled: true +``` + +A `daemonset` is spawned per node, per vCluster – which can be a resource hog as the number of `Nodes` and `vclusters` increase. + +The hostpath mapper feature was extended to a Central Hostpath Mapper to overcome this potential bottleneck. + +This is a loft-integrated feature, and the related docs can be found [here](https://loft.sh/docs/virtual-clusters/central-hostpath-mapper). +Alternatively, one can follow the instructions below: + +* Select the 'Projects' field on the left menu bar. +* From the project drop-down menu, select the project you'd like to create the virtual cluster in. +* Click the Virtual Clusters option on the project pane. +* Click the 'Create Virtual Cluster' button on the right side of the page. +* [Optional] select the cluster in which to create the virtual cluster. +* Click the 'Select' button to continue. +* Click the 'Advanced Options' configuration tab and expand the 'Host Path Mapper' section. +* Toggle the slider 'Enable'. This will add the `loft.sh/hpm-enabled` annotation to the template metadata which will be automatically synced to the virtual cluster on creation. Secondly, it will add the `rewrite-host-paths=true` as extraArgs to the helm values for the syncer. +* Finish configuring anything else you'd like on your virtual cluster, then click the +button. \ No newline at end of file diff --git a/docs/pages/o11y/logging/elk_stack.mdx b/docs/pages/o11y/logging/elk_stack.mdx new file mode 100644 index 000000000..9ae0c720f --- /dev/null +++ b/docs/pages/o11y/logging/elk_stack.mdx @@ -0,0 +1,44 @@ +--- +title: Logging with ELK and fluentd +sidebar_label: ELK and fluentd +--- + +### Install the ELK stack: +```yaml +helm upgrade --install elk-elasticsearch elastic/elasticsearch -f elastic_values.yaml -n logging --create-namespace +helm upgrade --install elk-logstash elastic/logstash -f logstash_values.yaml -n logging +helm upgrade --install elk-kibana elastic/kibana -f kibana_values.yaml -n logging + +# optionally install filebeat if you plan to use filebeat instead of fluentd +helm upgrade --install elk-filebeat elastic/filebeat -f filebeat_values.yaml -n logging +``` + +### Install fluentd daemonset, this can be found on [github](https://github.com/fluent/fluentd-kubernetes-daemonset/blob/master/fluentd-daemonset-elasticsearch-rbac.yaml): +```yaml +kubectl apply -f fluentd-daemonset-elasticsearch.yaml +``` +Alternatively, you can also deploy via the [helm charts provided by fluentbit](https://docs.fluentbit.io/manual/installation/kubernetes#installing-with-helm-chart). + +### Setup ELK indexes +1. Check for available indices - `port-forward` the `elasticsearch-master` on port `9100` and visit the [http://localhost:9200/\_cat/indices](http://localhost:9200/_cat/indices), + you should see the following `logstash-*` indices available: + ``` + green open .geoip_databases rP6BifVQSuCv1XmctC0M_Q 1 0 40 0 38.4mb 38.4mb + green open .kibana_task_manager_7.17.3_001 p5Idg-xWTpCj4TWh6YpNrQ 1 0 17 543 123.6kb 123.6kb + yellow open logstash-2022.10.10 nyG-OW_qRKCBertmmOwwyw 1 1 895 0 416.6kb 416.6kb ◀─────┐ + green open .apm-custom-link jv3jzCztQUujEYwYv1iTIw 1 0 0 0 226b 226b │ ┌───────────────┐ + green open .apm-agent-configuration NsZHlaeGSmSc7xSa8CGcOA 1 0 0 0 226b 226b │ │ Logstash │ + yellow open logstash-2022.10.07 cW3b1TJlROCwV2BKkzpt2Q 1 1 212 0 52.1kb 52.1kb ◀─────┼──────│ Entries │ + yellow open logstash-2022.10.08 yzU4pqq3QOyZkukcmGKpaw 1 1 172 0 43.6kb 43.6kb ◀─────┤ └───────────────┘ + yellow open logstash-2022.10.09 n9GQnFB4RSWlWwkFG1848g 1 1 866 0 100.4kb 100.4kb ◀─────┘ + green open .kibana_7.17.3_001 BjXjQqXcRoiiGQg_zsrSrg 1 0 21 8 2.3mb 2.3mb + ``` + 1. Next `port-forward` the kibana dashboard on its default port `5601` and navigate to [http://localhost:5601/app/management](http://localhost:5601/app/management) or + choose "Stack Management" from left menu side bar. + Screenshot 2022-10-10 at 3 46 50 PM + 1. Choose "Index Patterns" and click on "Create index Pattern" + Screenshot 2022-10-10 at 3 49 07 PM + 1. Type the **Name** as `logstash*` and `@timestamp` for the **Timestamp field** and click on "Create index pattern" + Screenshot 2022-10-10 at 3 50 13 PM + 1. Now you can navigate to [http://localhost:5601/app/discover](http://localhost:5601/app/discover) or click on "Discover" from the left sidebar menu and should start seeing your logs. + image diff --git a/docs/pages/o11y/logging/grafana_loki.mdx b/docs/pages/o11y/logging/grafana_loki.mdx new file mode 100644 index 000000000..9f5df709c --- /dev/null +++ b/docs/pages/o11y/logging/grafana_loki.mdx @@ -0,0 +1,31 @@ +--- +title: Logging with Grafana and Loki +sidebar_label: Grafana and Loki +--- + +### Install the Prometheus stack: +``` +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitor --create-namespace +``` + +### Install Loki: +``` +helm repo add loki https://grafana.github.io/loki/charts +helm upgrade --install loki --namespace=monitoring grafana/loki-stack --create-namespace +``` + +### Setup Data Sources in Grafana +1. Open the Grafana Dashboard: + * Port-forward grafana dashboard `kubectl port-forward -n monitor service/prometheus-grafana 3000:80` + * Get Grafana credentials `kubectl get secrets -n monitor prometheus-grafana -o jsonpath='{.data.admin-password}' | base64 -D` + * Navigate to [http://localhost:3000](http://localhost:3000) +1. Add a data source by navigating to [http://localhost:3000/datasources](http://localhost:3000/datasources) or click "Data Sources" under the ⚙️ icon from left menu. + + image +1. Click on "Add Data Sources" and select "Loki" from the list. + image +1. Enter the loki endpoint in the `URL` field as `http://loki.monitoring:3100` or to the corresponding `.:` value according to your deployment, and click on "Save & test". + image +1. Next click on "Explore" or navigate to [http://localhost:3000/explore](http://localhost:3000/explore) and select "Loki" from the dropdown menu. Select the desired Labels and Click on "Run query". Youre logs should now start appearing. + image \ No newline at end of file diff --git a/docs/pages/o11y/logging/hpm.mdx b/docs/pages/o11y/logging/hpm.mdx new file mode 100644 index 000000000..a32638195 --- /dev/null +++ b/docs/pages/o11y/logging/hpm.mdx @@ -0,0 +1,34 @@ +--- +title: Enabling the HostPath Mapper +sidebar_label: HostPath Mapper +--- + +Vcluster internal logging relies on separate component called the [Hostpath Mapper](https://github.com/loft-sh/vcluster-hostpath-mapper). This will make sure to resolve the correct virtual pod and container names to their physical counterparts. +To deploy this component, its basically a 2 step process +### Update the vCluster +You would want to create the vCluster with the following `values.yaml`: +``` +syncer: + extraArgs: + - --mount-physical-host-paths=true +``` + +* For new vCluster run `vcluster create -f values.yaml` +* For existing vCluster run `vcluster create --upgrade -f values.yaml` + +### Deploy the Hostpath Mapper Daemonset +Now that the vCluster itself is ready, we can deploy the hostpath mapper component. We need the following 2 pieces of information for this: +* The Hostpath Mapper has to be deployed in the same namespace and the target vCluster +* We need to set the `.Values.VclusterReleaseName` value when deploying this helm chart equal to the name of the target vCluster + +To sum up, if your vCluster is named `my-vcluster` and is deployed in namespace `my-namespace` then you should run +``` +helm install vcluster-hpm vcluster-hostpath-mapper \ + --repo https://charts.loft.sh \ + -n my-namespace \ + --set VclusterReleaseName=my-vcluster +``` + +Once deployed successfully a new Daemonset component of vCluster would start running on every node used by the vCluster workloads. + +We can now install our desired logging stack and start collecting the logs. \ No newline at end of file diff --git a/docs/pages/o11y/metrics/metrics_server.mdx b/docs/pages/o11y/metrics/metrics_server.mdx new file mode 100644 index 000000000..7722baeff --- /dev/null +++ b/docs/pages/o11y/metrics/metrics_server.mdx @@ -0,0 +1,46 @@ +--- +title: Metrics Server (in vCluster) +sidebar_label: Metrics Server in vCluster +--- + +### Installing metrics server (inside vCluster) + +In case the above recommended method of getting metrics in vCluster using the metrics server proxy does not fulfil your requirements and you need a dedicated metrics server installation in the vCluster you can follow this section. +Make sure the vCluster has access to the host clusters nodes. [Enabling real nodes synchronization](../../architecture/nodes.mdx) will create the required RBAC permissions. + +Install the [metrics server](https://github.com/kubernetes-sigs/metrics-server#installation) via the official method into the vCluster. + +Wait until the metrics server has started. You should be now able to use `kubectl top pods` and `kubectl top nodes` within the vCluster: +``` +kubectl top pods --all-namespaces +NAMESPACE NAME CPU(cores) MEMORY(bytes) +kube-system coredns-854c77959c-q5878 3m 17Mi +kube-system metrics-server-5fbdc54f8c-fgrqk 0m 6Mi +``` + +If you see below error after installing metrics-server (check [k3s#5334](https://github.com/k3s-io/k3s/issues/5344) for more information): + +``` +loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503 +``` +Create a file named `metrics_patch.yaml` with the following contents: +``` +spec: + template: + spec: + containers: + - name: metrics-server + command: + - /metrics-server + - --metric-resolution=30s + - --kubelet-insecure-tls=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP +``` +and apply the patch with kubectl: +``` +kubectl patch deployment metrics-server --patch-file metrics_patch.yaml -n kube-system +``` + +### How does it work? + +By default, vCluster will create a service for each node which redirects incoming traffic from within the vCluster to the node kubelet to vCluster itself. This means that if workloads within the vCluster try to scrape node metrics the traffic reaches vCluster first. Vcluster will redirect the incoming request to the host cluster and rewrite the response (pod names, pod namespaces etc) and return it to the requester. diff --git a/docs/pages/o11y/metrics/metrics_server_proxy.mdx b/docs/pages/o11y/metrics/metrics_server_proxy.mdx new file mode 100644 index 000000000..9487ac507 --- /dev/null +++ b/docs/pages/o11y/metrics/metrics_server_proxy.mdx @@ -0,0 +1,25 @@ +--- +title: Metrics Server Proxy +sidebar_label: Metrics Server Proxy +--- + +You can monitor the vCluster either from the host cluster or directly from within the vCluster. + +:::info +In order to get node metrics from within the vCluster, vCluster will need to have RBAC permissions to access them. These permissions are given to vCluster when synchronization of the real nodes is enabled. See [Nodes documentation page](../../architecture/nodes.mdx) for more details. +::: + +### Enabling the metrics server proxy (Recommended) +:::info +This feature requires a working installation of metrics server on the host cluster +::: + +Its possible to proxy the metrics server in the underlying host cluster and get the `pod`/`node` metrics individually or both of them according to the use case. This can be enabled with the following values: +``` +proxy: + metricsServer: + nodes: + enabled: true + pods: + enabled: true +``` \ No newline at end of file diff --git a/docs/pages/o11y/metrics/monitoring_vcluster.mdx b/docs/pages/o11y/metrics/monitoring_vcluster.mdx new file mode 100644 index 000000000..d50717901 --- /dev/null +++ b/docs/pages/o11y/metrics/monitoring_vcluster.mdx @@ -0,0 +1,20 @@ +--- +title: Monitoring vCluster +sidebar_label: Monitoring the vCluster +--- + +vCluster is able to rewrite node stats and metrics. This means monitoring a vCluster works similar to monitoring a regular Kubernetes cluster. + +:::info +You need to make sure that vCluster has access to the host clusters nodes. [Enabling real nodes synchronization](../../architecture/nodes.mdx) will create the required RBAC permissions. +::: + +Please follow the [official Kuberentes documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-usage-monitoring/) on how to monitor a Kubernetes cluster. + +### How does it work? + +By default, vCluster will create a service for each node which redirects incoming traffic from within the vCluster to the node kubelet to vCluster itself. This means that if workloads within the vCluster try to scrape node metrics the traffic reaches vCluster first. Vcluster will redirect the incoming request to the host cluster and rewrite the response (pod names, pod namespaces etc) and return it to the requester. + +## Monitoring the vCluster StatefulSet + +vcluster exposes metrics endpoints on `https://0.0.0.0:8443/metrics` (syncer metrics) and `https://0.0.0.0:6444/metrics` (k3s metrics). In order to scrape those metrics, you will need to send an `Authorization` header with a valid virtual cluster service account token, that has permissions to access the `/metrics` endpoint within the vcluster. diff --git a/docs/pages/operator/cluster-api-provider.mdx b/docs/pages/operator/cluster-api-provider.mdx deleted file mode 100644 index 52b1254fc..000000000 --- a/docs/pages/operator/cluster-api-provider.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Cluster API Provider -sidebar_label: Cluster API Provider ---- - -[Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/) is a Kubernetes sub-project focused on providing declarative APIs and tooling to simplify provisioning, upgrading, and operating multiple Kubernetes clusters. Kubernetes service providers and distribution maintainers implement CAPI providers which allow users to create Kubernetes clusters of their choice with the CAPI tooling. For the vcluster project, we have also implemented a CAPI provider - [cluster-api-provider-vcluster](https://github.com/loft-sh/cluster-api-provider-vcluster). Instructions below will describe how to instantiate a vcluster instance using CAPI tooling. - -## Prerequisites -[clusterctl (v1.1.5+)](https://cluster-api.sigs.k8s.io/clusterctl/overview.html) CLI tool is used to install the vcluster provider and to generate a custom resource that will serve as a declarative definition of the cluster. It is possible to create the custom resource without clusterctl, but in this guide, we will be using it because it provides access to the templates for the `VCluster` custom resource. Please follow [the official installation instructions for clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl). - -You will need cluster-admin permissions in a Kubernetes cluster where you wish to install the CAPI stack with the vcluster provider. This is required because vcluster instances that will be created by the vcluster provider may have arbitrary RBAC permission requirements. The vcluster provider installation with a more limited permission scope will be introduced in a future version. - -## Installing the CAPI stack with the vcluster provider -Switch your kubectl context to the cluster where you wish to install the CAPI stack with the vcluster provider and run the following command: -```shell -clusterctl init --infrastructure vcluster -``` - -The `"Your management cluster has been initialized successfully!"` message should eventually appear in the output. - -## Creating a vcluster -We will use the clusterctl CLI tool to generate the Kubernetes resource definitions needed for vcluster creation by the CAPI provider. The output is in YAML format, and it can be saved into a file to be used later, or it can be forwarded in the command line into the kubectl command to immediately apply the resources in the cluster. - -A vcluster instance created via vcluster provider is configured using the following environment variables: -- `KUBERNETES_VERSION` (required, string - major.minor.patch): sets the major and minor Kubernetes version of the vcluster, but the patch part is ignored(!). The `--kubernetes-version` flag can be used instead of the environment variable. -- `HELM_VALUES` (required, string - YAML formatted): A string representation of your "values.yaml" file, which defines helm values to be used when installing, or upgrading, your vcluster instance using the helm chart. An empty string - `""` is an acceptable value. -- `CHART_NAME` (optional, string): vcluster helm chart name, defaults to `vcluster` -- `CHART_REPO` (optional, string - url): vcluster helm chart repo, defaults to `https://charts.loft.sh` -- `CHART_VERSION` (optional, string - major.minor.patch): vcluster helm chart version, default value depends on the version of the provider, typicaly latest version at the time of the release -- `VCLUSTER_HOST` (optional, string): a vcluster hostname that shall be used to set the `.spec.controlPlaneEndpoint.hostname` field of the vcluster custom resource, which will be used for example in the generated kubeconfig. -- `VCLUSTER_PORT` (optional, number - 0:65535): a vcluster port that shall be used to set the `.spec.controlPlaneEndpoint.port` field of the vcluster custom resource - - -We will define some configuration elements (CLUSTER_NAME and CLUSTER_NAMESPACE) as environment variables for easier reuse across the commands. These will be used together with some of the environment variables mentioned above to produce a manifest defining your vcluster instance and apply it in the host cluster: -``` -export CLUSTER_NAME=vcluster -export CLUSTER_NAMESPACE=vcluster -export KUBERNETES_VERSION=1.23.0 -export HELM_VALUES="" -kubectl create namespace ${CLUSTER_NAMESPACE} -clusterctl generate cluster ${CLUSTER_NAME} \ - --infrastructure vcluster \ - --target-namespace ${CLUSTER_NAMESPACE} | kubectl apply -f - -``` - -:::tip -You can populate the HELM_VALUES variable from a `values.yaml` file using this command: -```export HELM_VALUES=$(cat values.yaml | python -c 'import yaml,sys; print(yaml.dump(sys.stdin.read()).strip()[1:-1])')``` -::: - -Next, we need to wait until vcluster custom resource reports ready status: -```shell -kubectl wait --for=condition=ready vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME -``` -The cluster is ready to be used once the command above exits. - -### Expose vcluster externally -There are multiple methods for exposing your vcluster instance, and they are described on the [exposing vcluster page](./external-access). - -If the documentation instructs you to update your values.yaml file, you will need to update the `HELM_VALUES` environment variable, execute the clusterctl generate cluster again, and apply the updated output to your cluster. -Alternatively, you can edit the `.spec.helmRelease.values` field of the vcluster custom resource using `kubectl edit vcluster -n $CLUSTER_NAMESPACE $CLUSTER_NAME ` command. Be mindful of the formatting of the value: the string should be a valid YAML file, with the expected indentation for the various helm values. The vcluster provider is watching for the changes made to vcluster custom resource, and it will automatically update the vcluster instance when changes are detected. - -### Connecting to your vcluster -The recommended way of connecting to your vcluster is to use vcluster CLI as described in the [accessing vcluster page](./accessing-vcluster). - -However, if your vcluster is created with the vcluster provider, and you expose your cluster externally, you have an alternative: -```clusterctl get kubeconfig ${CLUSTER_NAME} --namespace ${CLUSTER_NAMESPACE} > ./kubeconfig.yaml``` -More details about this command are in the [CAPI docs](https://cluster-api.sigs.k8s.io/clusterctl/commands/get-kubeconfig.html). diff --git a/docs/pages/operator/high-availability.mdx b/docs/pages/operator/high-availability.mdx deleted file mode 100644 index 8d6d0769d..000000000 --- a/docs/pages/operator/high-availability.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: High Availability -sidebar_label: High Availability ---- - -import HighAvailabilityK3s from '../fragments/high-availability-k3s.mdx'; -import HighAvailabilityK8s from '../fragments/high-availability-k8s.mdx'; - -vcluster supports high-availability with k3s as well as the vanilla k8s distribution. k0s is currently not supported for high availability setup of vcluster. - -## k3s - - - -## Vanilla k8s - - diff --git a/docs/pages/operator/monitoring-logging.mdx b/docs/pages/operator/monitoring-logging.mdx deleted file mode 100644 index 0cc6d5852..000000000 --- a/docs/pages/operator/monitoring-logging.mdx +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Monitoring & Logging -sidebar_label: Monitoring & Logging ---- - -You can monitor the vcluster either from the host cluster or directly from within the vcluster. - -:::info -In order to get node metrics from within the vcluster, vcluster will need to have RBAC permissions to access them. These permissions are given to vcluster when synchronization of the real nodes is enabled. See [Nodes documentation page](../architecture/nodes.mdx) for more details. -::: - -### Enabling the metrics server proxy (Recommended) -:::info -This feature requires a working installation of metrics server on the host cluster -::: - -Its possible to proxy the metrics server in the underlying host cluster and get the `pod`/`node` metrics individually or both of them according to the use case. This can be enabled with the following values: -``` -proxy: - metricsServer: - nodes: - enabled: true - pods: - enabled: true -``` - -### Installing metrics server (inside vcluster) - -In case the above recommended method of getting metrics in vcluster using the metrics server proxy does not fulfil your requirements and you need a dedicated metrics server installation in the vcluster you can follow this section. -Make sure the vcluster has access to the host clusters nodes. [Enabling real nodes synchronization](../architecture/nodes.mdx) will create the required RBAC permissions. - -Install the [metrics server](https://github.com/kubernetes-sigs/metrics-server#installation) via the official method into the vcluster. - -Wait until the metrics server has started. You should be now able to use `kubectl top pods` and `kubectl top nodes` within the vcluster: -``` -kubectl top pods --all-namespaces -NAMESPACE NAME CPU(cores) MEMORY(bytes) -kube-system coredns-854c77959c-q5878 3m 17Mi -kube-system metrics-server-5fbdc54f8c-fgrqk 0m 6Mi -``` - -If you see below error after installing metrics-server (check [k3s#5334](https://github.com/k3s-io/k3s/issues/5344) for more information): - -``` -loading OpenAPI spec for "v1beta1.metrics.k8s.io" failed with: failed to retrieve openAPI spec, http error: ResponseCode: 503 -``` -Create a file named 'metrics_patch.yaml' with the following contents: -``` -spec: - template: - spec: - containers: - - name: metrics-server - command: - - /metrics-server - - --metric-resolution=30s - - --kubelet-insecure-tls=true - - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP -``` -and apply the patch with kubectl: -``` -kubectl patch deployment metrics-server --patch-file metrics_patch.yaml -n kube-system -``` - -### How does it work? - -By default, vcluster will create a service for each node which redirects incoming traffic from within the vcluster to the node kubelet to vcluster itself. This means that if workloads within the vcluster try to scrape node metrics the traffic reaches vcluster first. Vcluster will redirect the incoming request to the host cluster and rewrite the response (pod names, pod namespaces etc) and return it to the requester. - -## Monitoring the vcluster - -Vcluster is able to rewrite node stats and metrics. This means monitoring a vcluster works similar to monitoring a regular Kubernetes cluster. - -:::info -You need to make sure that vcluster has access to the host clusters nodes. [Enabling real nodes synchronization](../architecture/nodes.mdx) will create the required RBAC permissions. -::: - -Please follow the [official Kuberentes documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-usage-monitoring/) on how to monitor a Kubernetes cluster. - -### How does it work? - -By default, vcluster will create a service for each node which redirects incoming traffic from within the vcluster to the node kubelet to vcluster itself. This means that if workloads within the vcluster try to scrape node metrics the traffic reaches vcluster first. Vcluster will redirect the incoming request to the host cluster and rewrite the response (pod names, pod namespaces etc) and return it to the requester. - -## Monitoring the vcluster StatefulSet - -vcluster exposes metrics endpoints on `https://0.0.0.0:8443/metrics` (syncer metrics) and `https://0.0.0.0:6444/metrics` (k3s metrics). In order to scrape those metrics, you will need to send an `Authorization` header with a valid virtual cluster service account token, that has permissions to access the `/metrics` endpoint within the vcluster. - - -## Logging - -You can enable logging for vcluster pods right from host cluster or from within each vcluster as well. - -## Enabling Hostpath Mapper -Vcluster internal logging relies on enabling a vcluster component called the Hostpath Mapper. -This will make sure to resolve the correct virtual pod and container names to their physical counterparts. - -To enable this component, simply create or upgrade an existing vcluster with the following values -```yaml -hostpathMapper: - enabled: true -``` - -Once deployed successfully a new `Daemonset` component of vcluster would start running on every node. - -We can now install our desired logging stack and start collecting the logs. - -## Logging with ELK and fluentd inside vcluster: -1. Install the ELK stack: - ```yaml - helm upgrade --install elk-elasticsearch elastic/elasticsearch -f elastic_values.yaml -n logging --create-namespace - helm upgrade --install elk-logstash elastic/logstash -f logstash_values.yaml -n logging - helm upgrade --install elk-kibana elastic/kibana -f kibana_values.yaml -n logging - - # optionally install filebeat if you plan to use filebeat instead of fluentd - helm upgrade --install elk-filebeat elastic/filebeat -f filebeat_values.yaml -n logging - ``` - -2. Next install fluentd daemonset, this can be found on [github](https://github.com/fluent/fluentd-kubernetes-daemonset/blob/master/fluentd-daemonset-elasticsearch-rbac.yaml): - ```yaml - kubectl apply -f fluentd-daemonset-elasticsearch.yaml - ``` - Alternatively, you can also deploy via the [helm charts provided by fluentbit](https://docs.fluentbit.io/manual/installation/kubernetes#installing-with-helm-chart). - -3. Check for available indices - `port-forward` the `elasticsearch-master` on port `9100` and visit the [http://localhost:9200/\_cat/indices](http://localhost:9200/_cat/indices), - you should see the following `logstash-*` indices available: - ``` - green open .geoip_databases rP6BifVQSuCv1XmctC0M_Q 1 0 40 0 38.4mb 38.4mb - green open .kibana_task_manager_7.17.3_001 p5Idg-xWTpCj4TWh6YpNrQ 1 0 17 543 123.6kb 123.6kb - yellow open logstash-2022.10.10 nyG-OW_qRKCBertmmOwwyw 1 1 895 0 416.6kb 416.6kb ◀─────┐ - green open .apm-custom-link jv3jzCztQUujEYwYv1iTIw 1 0 0 0 226b 226b │ ┌───────────────┐ - green open .apm-agent-configuration NsZHlaeGSmSc7xSa8CGcOA 1 0 0 0 226b 226b │ │ Logstash │ - yellow open logstash-2022.10.07 cW3b1TJlROCwV2BKkzpt2Q 1 1 212 0 52.1kb 52.1kb ◀─────┼──────│ Entries │ - yellow open logstash-2022.10.08 yzU4pqq3QOyZkukcmGKpaw 1 1 172 0 43.6kb 43.6kb ◀─────┤ └───────────────┘ - yellow open logstash-2022.10.09 n9GQnFB4RSWlWwkFG1848g 1 1 866 0 100.4kb 100.4kb ◀─────┘ - green open .kibana_7.17.3_001 BjXjQqXcRoiiGQg_zsrSrg 1 0 21 8 2.3mb 2.3mb - ``` - 4. Next `port-forward` the kibana dashboard on its default port `5601` and navigate to http://localhost:5601/app/management or - choose "Stack Management" from left menu side bar. - Screenshot 2022-10-10 at 3 46 50 PM - 5. Choose "Index Patterns" and click on "Create index Pattern" - Screenshot 2022-10-10 at 3 49 07 PM - 6. Type the **Name** as `logstash*` and `@timestamp` for the **Timestamp field** and click on "Create index pattern" - Screenshot 2022-10-10 at 3 50 13 PM - 7. Now you can navigate to http://localhost:5601/app/discover or click on "Discover" from the left sidebar menu and should start seeing your logs. - image - - -## Logging with Grafana and Loki -1. Install the Prometheus stack: - ``` - helm repo add prometheus-community https://prometheus-community.github.io/helm-charts - helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitor --create-namespace - ``` -2. Install Loki: - ``` - helm repo add loki https://grafana.github.io/loki/charts - helm upgrade --install loki --namespace=monitoring grafana/loki-stack --create-namespace - ``` -3. Open the Grafana Dashboard: - * Port-forward grafana dashboard `kubectl port-forward -n monitor service/prometheus-grafana 3000:80` - * Get Grafana credentials `kubectl get secrets -n monitor prometheus-grafana -o jsonpath='{.data.admin-password}' | base64 -D` - * Navigate to http://localhost:3000 -4. Add a data source by navigating to http://localhost:3000/datasources or click "Data Sources" under the ⚙️ icon from left menu - image -5. Click on "Add Data Sources" and select "Loki" from the list. - image -6. Enter the loki endpoint in the `URL` field as `http://loki.monitoring:3100` or to the corresponding `.:` value according to your deployment, and click on "Save & test". - image -7. Next click on "Explore" or navigate to http://localhost:3000/explore and select "Loki" from the dropdown menu. Select the desired Labels and Click on "Run query". Youre logs should now start appearing. - image diff --git a/docs/pages/operator/pausing-vcluster.mdx b/docs/pages/operator/pausing-vcluster.mdx deleted file mode 100644 index 132b270b1..000000000 --- a/docs/pages/operator/pausing-vcluster.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Pausing & Resuming vcluster -sidebar_label: Pausing & Resuming vcluster ---- - -Pausing a vcluster means to temporarily scale down the vcluster and delete all its created workloads on the host cluster. This can be useful to save computing resources used by vcluster workloads in the host cluster. - -## Pausing a vcluster - -In order to pause a vcluster, make sure you have the CLI installed and run the following command: - -``` -vcluster pause my-vcluster -n my-vcluster-namespace -``` - -This command will do the following things: -1. Scale down the vcluster statefulset or deployment depending on which vcluster distro was used -2. Delete all the workloads created by vcluster - -The command leaves the objects within the vcluster untouched, which means that even single pods that were deployed within the vcluster without a controlling replica set or statefulset will be restarted. - -:::warning Temporary Filesystem of Pods erased -Since all the pods will be restarted, this also means that their temporary filesystem is erased as well as pod ip is changed. -::: - -## Resuming a vcluster - -To resume a vcluster, make sure you have the CLI installed and run the following command: - -``` -vcluster resume my-vcluster -n my-vcluster-namespace - -# OR: connect to the vcluster to automatically resume it as well -vcluster connect my-vcluster -``` - -As soon as the vcluster is resumed, vcluster will scale up the paused statefulset or deployment and the vcluster syncer will recreate the vcluster pods. diff --git a/docs/pages/operator/restricted-hosts.mdx b/docs/pages/operator/restricted-hosts.mdx deleted file mode 100644 index bde481c5f..000000000 --- a/docs/pages/operator/restricted-hosts.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Rootless mode & OpenShift -sidebar_label: Rootless mode & OpenShift ---- - -import NonRootSegment from '../fragments/non-root-vcluster.mdx' -import OpenshiftSegment from '../fragments/deploy-to-openshift.mdx' - -Many Kubernetes cluster operators employ policies to restrict the usage of certain features, for example running pods with the root user. -On this page you will see which options allow you to adjust vcluster configuration to successfully deploy it in such restricted host clusters. - -## Running as non-root user -If your host cluster policies disallow running containers with root user, or you simply prefer to run them this way, it is possible to configure it for vcluster components. Steps below show how to set the desired UID for syncer and control plane. The syncer also passes this UID down to the vcluster DNS deployment. - - - -:::info Values of the securityContext fields -You can substitute the runAsUser value as needed, e.g. if the host cluster limits the allowable UID ranges. -And you are free to set other [securityContext fields](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#podsecuritycontext-v1-core) as necessary to fulfill your host cluster policies. -::: - -:::caution -Running as non-root is currently supported only for the k3s distribution. While [other distributions provided by vcluster](./other-distributions.mdx) may make use of the `securityContext` field from the `values.yaml` file, we do not guarantee that they will work as expected. -::: - -:::caution -vcluster doesn't currently provide a migration path from an instance that was running as root to running with a non-root user. -::: - -## Running on OpenShift -By default, OpenShift doesn't allow running containers with the root user, but it assigns a random UID from the allowed range automatically, which means that you can skip the steps described in the [Running as non-root user](#running-as-non-root-user) section of this document and your vcluster should run as non-root user by default. - -OpenShift also imposes some restrictions that are not common to other Kubernetes distributions. -When deploying vcluster to OpenShift you will need to follow these additional steps: - - - -:::info Additional permission when running on OpenShift -vcluster requires `create` permission for the `endpoints/restricted` resource in the default group when running on OpenShift. -This permission is required because OpenShift has additional built-in admission controller for the Endpoint resources, which denies creation of the endpoints pointing into the cluster network or service network CIDR ranges, unless this additional permission is given. -Following the steps outline above ensures that the vcluster Role includes this permission, as it is necessary for certain networking features. -::: diff --git a/docs/pages/operator/security.mdx b/docs/pages/operator/security.mdx deleted file mode 100644 index 3b66d77fd..000000000 --- a/docs/pages/operator/security.mdx +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Isolation & Security -sidebar_label: Isolation & Security ---- - -import NonRootSegment from '../fragments/non-root-vcluster.mdx' - -vcluster can drastically increase security in multi-tenancy clusters. vcluster provides you the three security benefits out of the box: -- **Full control-plane isolation** with separate api endpoint and data storage -- **DNS isolation** as vcluster workloads are not able to resolve any services of the host cluster -- Guarantee that all workloads, services and other namespaced objects are **created in a single namespace** in the host cluster. If deployed with default settings, vcluster also ensures that no access to any cluster scoped object is required. - -Besides these benefits, vcluster by default will **not** provide any workload or network isolation. Starting with version v0.7.0, vcluster has a feature called [isolated mode](#isolated-mode), which you can enable to prevent vcluster workloads from breaking out of their virtual environment. - -In general, we recommend to deploy a single vcluster into a namespace and then isolate the namespace, which is far easier than isolating multiple vclusters from each other in a single namespace. - -## Isolated Mode -vcluster offers a feature called isolated mode to automatically isolate workloads in a virtual cluster. Isolated mode can be enabled via the `--isolate` flag in `vcluster create` or through the helm value `isolation.enabled: true`: - -``` -# Creates a new vcluster with isolated workloads -vcluster create my-vcluster --isolate -``` - -This feature imposes a couple of restrictions on vcluster workloads to make sure they do not break out of their virtual environment: -1. vcluster enforces a [Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/) on syncer level, which means that for example pods that try to run as a privileged container or mount a host path will not be synced to the host cluster. Current valid options are either baseline (default in isolated mode) or restricted. This works for every Kubernetes version regardless of Pod Security Standard support, as this is implemented in vcluster directly. Rejected pods will stay pending in the vcluster and in newer Kubernetes version they will be denied by the admission controller as well. -2. vcluster deploys a [resource quota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) as well as a [limit range](https://kubernetes.io/docs/concepts/policy/limit-range/) alongside the vcluster itself. This allows restricting resource consumption of vcluster workloads. If enabled, sane defaults for those 2 resources are chosen. -3. vcluster deploys a [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) alongside itself that will restrict access of vcluster workloads as well as the vcluster control plane to other pods in the host cluster. (only works if your host [cluster CNI supports network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/#prerequisites)) - -You can adjust isolation settings through helm values. The default values are (also check [values.yaml](https://github.com/loft-sh/vcluster/blob/v0.7.0-alpha.1/charts/k3s/values.yaml)): -```yaml -isolation: - enabled: false - - podSecurityStandard: baseline - - resourceQuota: - enabled: true - quota: - requests.cpu: 10 - requests.memory: 20Gi - requests.storage: "100Gi" - requests.ephemeral-storage: 60Gi - limits.cpu: 20 - limits.memory: 40Gi - limits.ephemeral-storage: 160Gi - services.nodeports: 0 - services.loadbalancers: 1 - count/endpoints: 40 - count/pods: 20 - count/services: 20 - count/secrets: 100 - count/configmaps: 100 - count/persistentvolumeclaims: 20 - scopeSelector: - matchExpressions: - scopes: - - limitRange: - enabled: true - default: - ephemeral-storage: 8Gi - memory: 512Mi - cpu: "1" - defaultRequest: - ephemeral-storage: 3Gi - memory: 128Mi - cpu: 100m - - networkPolicy: - enabled: true - outgoingConnections: - ipBlock: - cidr: 0.0.0.0/0 - except: - - 100.64.0.0/10 - - 127.0.0.0/8 - - 10.0.0.0/8 - - 172.16.0.0/12 - - 192.168.0.0/16 -``` - -:::warn -In case you are using `--isolate` flag or isolated mode along with the `--expose` flag, make sure you appropriately bump up the `isolation.resourceQuotas.quota.services.nodeports` accordingly as some LoadBalancer implementations rely on `NodePorts` -::: - -## Workload Isolation - -vcluster by default will not isolate any workloads in the host cluster and only ensures that those are deployed in the same namespace. -However, isolating workloads in a single namespace can be done with in-built Kubernetes features or using the [isolated mode](#isolated-mode) shown above. - -### Resource Quota & Limit Range - -To ensure a vcluster will not consume too many resources in the host cluster, you can use a single [ResourceQuota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) in the namespace where the virtual cluster is running. This could look like: - -```yaml -apiVersion: v1 -kind: ResourceQuota -metadata: - name: vcluster-quota -spec: - hard: - cpu: "10" - memory: 20Gi - pods: "10" -``` - -This allows the vcluster and all of the pods deployed inside it to only consume up to 10 vCores, 20GB of memory or to have 10 pods at maximum. If you use a resource quota, you probably also want to use a [LimitRange](https://kubernetes.io/docs/concepts/policy/limit-range/) that makes sure that needed resources are defined for each pod. For example: - -```yaml -apiVersion: v1 -kind: LimitRange -metadata: - name: vcluster-limit-range -spec: - limits: - - default: - memory: 512Mi - cpu: "1" - defaultRequest: - memory: 128Mi - cpu: 100m - type: Container -``` - -This limit range would ensure that containers that do not set `resources.requests` and `resources.limits` would get appropriate limits set automatically. - -### Pod Security - -Besides restricting pod resources, it's also necessary to disallow certain potential harmful pod configurations, such as privileged pods or pods that use hostPath. -If you are using Kubernetes v1.23 or higher, you can restrict the namespace where the virtual cluster is running in via the [Pod Security Admission Controller](https://kubernetes.io/docs/concepts/security/pod-security-admission/): - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-vcluster-namespace - labels: - pod-security.kubernetes.io/enforce: baseline - pod-security.kubernetes.io/audit: restricted - pod-security.kubernetes.io/warn: restricted -``` - -To see all supported levels and modes, please take a look at the [Kubernetes docs](https://kubernetes.io/docs/concepts/security/pod-security-standards/). - -If you are using below Kubernetes v1.23 clusters, you can use the deprecated [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) to disallow critical workloads. - -If you want more control over this, you can also use an admission controller, that let's you define your own policies, such as [OPA](https://www.openpolicyagent.org/docs/v0.12.2/kubernetes-admission-control/), [jsPolicy](https://www.jspolicy.com/) or [Kyverno](https://kyverno.io/). - -### Advanced Isolation - -Besides this basic workload isolation, you could also dive into more advanced isolation methods, such as isolating the workloads on separate nodes or through another container runtime. Using different nodes for your vcluster workloads can be accomplished through the [--node-selector flag](../architecture/nodes.mdx) on vcluster syncer. - -You should also be aware that pods created in the vcluster will set their [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), which will affect scheduling decisions. To prevent the pods from being scheduled to the undesirable nodes you can use the [--node-selector flag](../architecture/nodes.mdx) or admission controller as mentioned above. - -## Network Isolation - -Workloads created by vcluster will be able to communicate with other workloads in the host cluster through their cluster ips. This can be sometimes beneficial if you want to purposely access a host cluster service, which is a good method to share services between vclusters. However, you often want to isolate namespaces and do not want the pods running inside vcluster to have access to other workloads in the host cluster. -This requirement can be accomplished by using [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the namespace where vcluster is installed in or using the [isolated mode](#isolated-mode) shown above. - -:::info -Network policies do not work in all Kubernetes clusters and need to be supported by the underlying CNI plugin. -::: - -## Other Topics - -### Running as non root - -vcluster is able to be ran as a non root user. Steps below show how to set the desired UID for syncer and control plane. The syncer also passes this UID down to the vcluster DNS deployment. - - - -### Workload & Network Isolation within the vcluster - -The above mentioned methods also work for isolating workloads inside the vcluster itself, as you can just deploy resource quotas, limit ranges, admission controllers and network policies in there. To allow network policies to function correctly, you'll need to [enable this in vcluster](../architecture/networking.mdx) itself though. - -### Secret based Service Account tokens - -By default vcluster will create Service Account Tokens for each pod and inject them as an annotation in the respective pods -metadata. This can be a security risk in certain senarios. To mitigate this there's a flag `--service-account-token-secrets` in vcluster -which creates separate secrets for each pods Service Account Token and mounts it accordingly using projected volumes. This option -is not enabled by default but can be enabled on demand. To enable this one can use the `extraArgs` options of the vcluster chart as follows - -``` -syncer: - extraArgs: - - --service-account-token-secrets=true -``` \ No newline at end of file diff --git a/docs/pages/plugins/overview.mdx b/docs/pages/plugins/overview.mdx index aaf3f18f6..e7ae5f5d6 100644 --- a/docs/pages/plugins/overview.mdx +++ b/docs/pages/plugins/overview.mdx @@ -1,78 +1,78 @@ --- title: Overview -sidebar_label: Overview +sidebar_label: Plugins Overview --- -Plugins are a feature to extend the capabilities of vcluster. They allow you to add custom functionality, such as: +Plugins are a feature to extend the capabilities of vCluster. They allow you to add custom functionality, such as: 1. Syncing specific resources from or to the virtual clusters, including cluster scoped resources like cluster roles 2. Syncing custom resources from or to the virtual cluster 3. Deploying resources on virtual cluster startup, such as CRDs, applications, etc. 4. Manage resources and applications inside the host or virtual cluster -5. Enforcing certain restrictions on synced resources or extending the existing syncers of vcluster +5. Enforcing certain restrictions on synced resources or extending the existing syncers of vCluster 6. Any other operator use case that could benefit from having access to the virtual cluster and the host cluster simultaneously. A plugin in its purest form is a [Kubernetes operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) that will have access to both the virtual cluster and the host cluster simultaneously. -This is the main difference between a vcluster plugin and a regular Kubernetes operator that you would just install inside the vcluster itself. -Given this dual access, the plugin is able to translate resources between both clusters, which is the basic building block of [how vcluster works](../what-are-virtual-clusters.mdx). +This is the main difference between a vCluster plugin and a regular Kubernetes operator that you would just install inside the vCluster itself. +Given this dual access, the plugin is able to translate resources between both clusters, which is the basic building block of [how vCluster works](../what-are-virtual-clusters.mdx). :::tip Recommended Reads -In order to better understand how vcluster plugins work, it is recommended to read about Kubernetes [operators](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) as well as [controllers](https://kubernetes.io/docs/concepts/architecture/controller/). +In order to better understand how vCluster plugins work, it is recommended to read about Kubernetes [operators](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) as well as [controllers](https://kubernetes.io/docs/concepts/architecture/controller/). ::: ## Architecture -Each plugin will run as a sidecar container inside the vcluster pod. -This is done to allow easier communication between vcluster and the plugins as well as provide certain capabilities such as high-availability out of the box. -The plugin itself will contact the vcluster pod during startup to obtain the access credentials to the virtual and host cluster. -The plugin controllers are started with these credentials, similar to how vcluster itself starts its resource syncers. +Each plugin will run as a sidecar container inside the vCluster pod. +This is done to allow easier communication between vCluster and the plugins as well as provide certain capabilities such as high-availability out of the box. +The plugin itself will contact the vCluster pod during startup to obtain the access credentials to the virtual and host cluster. +The plugin controllers are started with these credentials, similar to how vCluster itself starts its resource syncers. ### Plugin Controllers -Resource syncing is the heart of vcluster which enables the virtual cluster to behave like an actual Kubernetes cluster. -A [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) that is responsible for resource syncing in vcluster is called a syncer. +Resource syncing is the heart of vCluster which enables the virtual cluster to behave like an actual Kubernetes cluster. +A [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) that is responsible for resource syncing in vCluster is called a syncer. This controller reacts on changes to objects within the virtual cluster and on changes to objects within the host cluster. The syncer tries to map each virtual object to a physical object in the host cluster and then compares those. After it discovers a change, the syncer ensures that the virtual cluster object and the physical cluster object are aligned in the desired state, and if not, the syncer changes either one of those objects to reflect the desired state. -Each plugin can define several of those resource syncers that would work exactly like the built-in syncers of vcluster. +Each plugin can define several of those resource syncers that would work exactly like the built-in syncers of vCluster. However, you'll not need to sync every Kubernetes resource to the host cluster, as some can stay purely virtual. Only resources that influence the workloads need to be synced, for example, pods, services, and endpoints, while others such as deployments, replicasets, namespaces etc. are only relevant to the Kubernetes control plane and hence are not needed in the host cluster. -There are sometimes also cases where you want to manage specific core resources yourself without interfering with what vcluster is syncing, for example special secrets or configmaps that were created from the host cluster or a different resource inside the host cluster. -For this use case you can label resources vcluster should ignore either on the physical or virtual side with a label `vcluster.loft.sh/controlled-by` and a custom value of your choosing. This will tell vcluster to ignore the resource in its syncers. +There are sometimes also cases where you want to manage specific core resources yourself without interfering with what vCluster is syncing, for example special secrets or configmaps that were created from the host cluster or a different resource inside the host cluster. +For this use case you can label resources vCluster should ignore either on the physical or virtual side with a label `vcluster.loft.sh/controlled-by` and a custom value of your choosing. This will tell vCluster to ignore the resource in its syncers. ### Plugin Hooks -Plugin hooks are a great feature to adjust current syncing behaviour of vcluster without the need to override an already existing syncer in vcluster completely. -They allow you to change outgoing objects of vcluster similar to an mutating admission controller in Kubernetes. -Requirement for an hook to work correctly is that vcluster itself would sync the resource, so hooks only work for the core resources that are synced by vcluster such as pods, services, secrets etc. +Plugin hooks are a great feature to adjust current syncing behaviour of vCluster without the need to override an already existing syncer in vCluster completely. +They allow you to change outgoing objects of vCluster similar to an mutating admission controller in Kubernetes. +Requirement for an hook to work correctly is that vCluster itself would sync the resource, so hooks only work for the core resources that are synced by vCluster such as pods, services, secrets etc. -If a plugin registers a hook to a specific resource, vcluster will forward all requests that match the plugin's defined hooks to the plugin and the plugin can then adjust or even deny the request completely. +If a plugin registers a hook to a specific resource, vCluster will forward all requests that match the plugin's defined hooks to the plugin and the plugin can then adjust or even deny the request completely. This opens up a wide variety of adjustment possibilities for plugins, where you for example only want to add a custom label or annotation. ### Plugin SDK :::tip Recommended Reads -If you want to start developing your own vcluster plugins, it is recommended that you read about [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) as well as [kube builder](https://book.kubebuilder.io/introduction.html) that uses the controller runtime internally. +If you want to start developing your own vCluster plugins, it is recommended that you read about [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) as well as [kube builder](https://book.kubebuilder.io/introduction.html) that uses the controller runtime internally. ::: -vcluster provides an [SDK](https://github.com/loft-sh/vcluster-sdk) for writing plugin controllers that abstracts a lot of the syncer complexity away from the user, but still gives you access to the underlying structures if you need it. -Internally, the vcluster SDK uses the popular [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) project, that is used by vcluster itself to create the controllers. -The vcluster SDK lets you write custom plugin controllers with just a few lines of code. +vCluster provides an [SDK](https://github.com/loft-sh/vcluster-sdk) for writing plugin controllers that abstracts a lot of the syncer complexity away from the user, but still gives you access to the underlying structures if you need it. +Internally, the vCluster SDK uses the popular [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) project, that is used by vCluster itself to create the controllers. +The vCluster SDK lets you write custom plugin controllers with just a few lines of code. -Since the plugin SDK interfaces are mostly compatible with the vcluster syncers, you can also take a look at how those are implemented in [the vcluster itself](https://github.com/loft-sh/vcluster/tree/main/pkg/controllers/resources), which work in most cases the same way as if those would be implemented in a plugin. -It would be even possible to reimplement all vcluster syncers in a separate plugin. +Since the plugin SDK interfaces are mostly compatible with the vCluster syncers, you can also take a look at how those are implemented in [the vCluster itself](https://github.com/loft-sh/vcluster/tree/main/pkg/controllers/resources), which work in most cases the same way as if those would be implemented in a plugin. +It would be even possible to reimplement all vCluster syncers in a separate plugin. -## Loading and Installing Plugins to vcluster +## Loading and Installing Plugins to vCluster -Since the most common distribution method of vcluster is helm, plugins are also configured via helm values. +Since the most common distribution method of vCluster is helm, plugins are also configured via helm values. If you develop a plugin of your own, we recommend creating a `plugin.yaml` (the name has no special functionality, you could also name it `my-plugin.yaml` or `extra-values.yaml`) in the following format: ``` # Plugin Definition below. This is essentially a valid helm values file that will be merged -# with the other vcluster values during vcluster create or helm install. +# with the other vCluster values during vCluster create or helm install. plugin: myPlugin: image: plugin-image @@ -109,5 +109,5 @@ You can take a look at the [vcluster-sdk repo](https://github.com/loft-sh/vclust ::: :::warning Don't install untrusted plugins -A plugin runs with the same permissions as vcluster itself does in the Kubernetes cluster and can also define additional permissions through its `plugin.yaml`, so make sure you only install plugins you trust. +A plugin runs with the same permissions as vCluster itself does in the Kubernetes cluster and can also define additional permissions through its `plugin.yaml`, so make sure you only install plugins you trust. ::: diff --git a/docs/pages/plugins/tutorial.mdx b/docs/pages/plugins/tutorial.mdx index 22b49ba30..717145fa6 100644 --- a/docs/pages/plugins/tutorial.mdx +++ b/docs/pages/plugins/tutorial.mdx @@ -3,7 +3,7 @@ title: "Development tutorial" sidebar_label: "Development tutorial" --- -In this tutorial we will implement a ConfigMap syncer. Vcluster syncs ConfigMaps out of the box, but only those that are used by one of the pods created in vcluster. Here we will have a step-by-step look at a plugin implementation that will synchronize all ConfigMaps using the [vcluster plugin SDK](https://github.com/loft-sh/vcluster-sdk). +In this tutorial we will implement a ConfigMap syncer. Vcluster syncs ConfigMaps out of the box, but only those that are used by one of the pods created in vCluster. Here we will have a step-by-step look at a plugin implementation that will synchronize all ConfigMaps using the [vcluster plugin SDK](https://github.com/loft-sh/vcluster-sdk). ### Prerequisites @@ -11,13 +11,13 @@ In this tutorial we will implement a ConfigMap syncer. Vcluster syncs ConfigMaps Before starting to develop, make sure you have installed the following tools on your computer: - [docker](https://docs.docker.com/) - [kubectl](https://kubernetes.io/docs/tasks/tools/) with a valid kube context configured -- [helm](https://helm.sh/docs/intro/install/), which is used to deploy vcluster and the plugin +- [helm](https://helm.sh/docs/intro/install/), which is used to deploy vCluster and the plugin - [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.9.1 or higher - [Go](https://go.dev/dl/) programming language build tools ## Implementation -Check out the vcluster plugin example via: +Check out the vCluster plugin example via: ``` git clone https://github.com/loft-sh/vcluster-plugin-example.git ``` @@ -40,11 +40,11 @@ func main() { Let's break down what is happening in the `main()` function above. -`ctx := plugin.MustInit("sync-all-configmaps-plugin")` - SDK will contact the vcluster backend server and retrieve it's configuration. The returned struct of type [`RegisterContext`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/context#RegisterContext) contains information about vcluster flags, namespace, vcluster client config, controller manager objects, etc. +`ctx := plugin.MustInit("sync-all-configmaps-plugin")` - SDK will contact the vCluster backend server and retrieve it's configuration. The returned struct of type [`RegisterContext`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/context#RegisterContext) contains information about vCluster flags, namespace, vCluster client config, controller manager objects, etc. `plugin.MustRegister(syncers.NewConfigMapSyncer(ctx))` - we will implement the `NewConfigMapSyncer` function below, but for now, all we need to know is that it should return a struct that implements [`Base`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Base) interface, which is accepted by the [`MustRegister`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/plugin#MustRegister) function. We should call [`MustRegister`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/plugin#MustRegister) function for each syncer that we wish to be managed by the plugins controller manager. -`plugin.MustStart()` - this blocking function will wait until the vcluster pod where this plugin container is running becomes the leader. Next, it will call the `Init()` and `RegisterIndices()` functions on the syncers that implement the [`Initializer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Initializer) and [`IndicesRegisterer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#IndicesRegisterer) respectively. Afterwards, the SDK will start its controller managers and call the `RegisterSyncer` or `RegisterFakeSyncer` function on the syncers that implement [`FakeSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#FakeSyncer) and [`Syncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Syncer) interfaces. Additionally, after configuring the default controller for the syncers, the `ModifyController` function is called for the syncers that implement [`ControllerModifier`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#ControllerModifier) interface, which gives a plugin developer a chance to interact with the controller builder object. All these interfaces act like hooks into different points of the SDK to allow you to customize the controller that will call your syncer based on the changes to the watched resources. +`plugin.MustStart()` - this blocking function will wait until the vCluster pod where this plugin container is running becomes the leader. Next, it will call the `Init()` and `RegisterIndices()` functions on the syncers that implement the [`Initializer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Initializer) and [`IndicesRegisterer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#IndicesRegisterer) respectively. Afterwards, the SDK will start its controller managers and call the `RegisterSyncer` or `RegisterFakeSyncer` function on the syncers that implement [`FakeSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#FakeSyncer) and [`Syncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#Syncer) interfaces. Additionally, after configuring the default controller for the syncers, the `ModifyController` function is called for the syncers that implement [`ControllerModifier`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#ControllerModifier) interface, which gives a plugin developer a chance to interact with the controller builder object. All these interfaces act like hooks into different points of the SDK to allow you to customize the controller that will call your syncer based on the changes to the watched resources. ### Implementing a syncer for a namespaced resource @@ -82,7 +82,7 @@ You can get more familiar with the interfaces mentioned above by reading the SDK ::: -The `SyncDown` function mentioned above is called by the vcluster SDK when a given resource, e.g. a ConfigMap, is created in the vcluster, but it doesn't exist in the host cluster yet. To create a ConfigMap in the host cluster we will call the `SyncDownCreate` function with the output of the `translate` function as third parameter. This demonstrates a typical pattern used in the vcluster syncer implementations. +The `SyncDown` function mentioned above is called by the vCluster SDK when a given resource, e.g. a ConfigMap, is created in the vCluster, but it doesn't exist in the host cluster yet. To create a ConfigMap in the host cluster we will call the `SyncDownCreate` function with the output of the `translate` function as third parameter. This demonstrates a typical pattern used in the vCluster syncer implementations. ``` func (s *configMapSyncer) SyncDown(ctx *syncercontext.syncercontext, vObj client.Object) (ctrl.Result, error) { @@ -93,10 +93,10 @@ func (s *configMapSyncer) translate(vObj client.Object) *corev1.ConfigMap { return s.TranslateMetadata(vObj).(*corev1.ConfigMap) } ``` -The `TranslateMetadata` function used above produces a ConfigMap object that will be created in the host cluster. It is a deep copy of the ConfigMap from vcluster, but with certain metadata modifications - the name and labels are transformed, some vcluster labels and annotations are added, many metadata fields are stripped (uid, resourceVersion, etc.). +The `TranslateMetadata` function used above produces a ConfigMap object that will be created in the host cluster. It is a deep copy of the ConfigMap from vCluster, but with certain metadata modifications - the name and labels are transformed, some vCluster labels and annotations are added, many metadata fields are stripped (uid, resourceVersion, etc.). -Next, we need to implement code that will handle the updates of the ConfigMap. When a ConfigMap in vcluster or host cluster is updated, the vcluster SDK will call the `Sync` function of the syncer. Current ConfigMap resource from the host cluster and from vcluster are passed as the second and third parameters respectively. In the implementation below, you can see another pattern used by the vcluster syncers. The `translateUpdate` function will return nil when no change to the ConfigMap in the host cluster is needed, and the `SyncDownUpdate` function will not do an unnecessary update API call in such case. +Next, we need to implement code that will handle the updates of the ConfigMap. When a ConfigMap in vCluster or host cluster is updated, the vCluster SDK will call the `Sync` function of the syncer. Current ConfigMap resource from the host cluster and from vCluster are passed as the second and third parameters respectively. In the implementation below, you can see another pattern used by the vCluster syncers. The `translateUpdate` function will return nil when no change to the ConfigMap in the host cluster is needed, and the `SyncDownUpdate` function will not do an unnecessary update API call in such case. ``` @@ -131,12 +131,12 @@ func (s *configMapSyncer) translateUpdate(pObj, vObj *corev1.ConfigMap) *corev1. As you might have noticed, the changes to the Immutable field of the ConfigMap are not being checked and propagated to the updated ConfigMap. That is done just for the simplification of the code in this tutorial. In the real world use cases, there will likely be many scenarios and edge cases that you will need to handle differently than just with a simple comparison and assignment. For example, you will need to look out for label selectors that are interpreted in the host cluster, e.g. pod selectors in the NetworkPolicy resources are interpreted by the host cluster network plugin. Such selectors must be translated when synced down to the host resources. Several functions for the common use cases are [built into the SDK in the `syncer/translator` package](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer/translator#pkg-functions), including the `TranslateLabelSelector` function. -Also, notice that this example lacks the updates to the ConfigMap resource in vcluster. Here we propagate the changes only down to the ConfigMap in the host cluster, but there are resources or use cases where a syncer would update the synced resource in vcluster. For example, this might be an update of the status subresource or synchronization of any other field that some controller sets on the host side, e.g., finalizers. Implementation of such updates needs to be considered on case-by-case basis. -For some use cases, you may need to sync the resources in the opposite direction, from the host cluster up into the vcluster, or even in both directions. If that is what your plugin needs to do, you will implement the [`UpSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#UpSyncer) interface defined by the SDK. +Also, notice that this example lacks the updates to the ConfigMap resource in vCluster. Here we propagate the changes only down to the ConfigMap in the host cluster, but there are resources or use cases where a syncer would update the synced resource in vCluster. For example, this might be an update of the status subresource or synchronization of any other field that some controller sets on the host side, e.g., finalizers. Implementation of such updates needs to be considered on case-by-case basis. +For some use cases, you may need to sync the resources in the opposite direction, from the host cluster up into the vCluster, or even in both directions. If that is what your plugin needs to do, you will implement the [`UpSyncer`](https://pkg.go.dev/github.com/loft-sh/vcluster-sdk/syncer#UpSyncer) interface defined by the SDK. ### Adding a hook for changing a resource on the fly -Hooks are a great feature to adjust current syncing behaviour of vcluster without the need to override an already existing syncer in vcluster completely. They allow you to change outgoing objects of vcluster similar to an mutating admission controller in Kubernetes. Requirement for an hook to work correctly is that vcluster itself would sync the resource, so hooks only work for the core resources that are synced by vcluster such as pods, services, secrets etc. +Hooks are a great feature to adjust current syncing behaviour of vCluster without the need to override an already existing syncer in vCluster completely. They allow you to change outgoing objects of vCluster similar to an mutating admission controller in Kubernetes. Requirement for an hook to work correctly is that vCluster itself would sync the resource, so hooks only work for the core resources that are synced by vCluster such as pods, services, secrets etc. To add a hook to your plugin, you simply need to create a new struct that implements the `ClientHook` interface: @@ -166,7 +166,7 @@ func (p *podHook) Resource() client.Object { } ``` -The `Name()` function defines the name of the hook which is used for logging purposes. The `Resource()` function returns the object you want to mutate. Besides those functions you can now define what actions you want to hook into inside vcluster's syncer: +The `Name()` function defines the name of the hook which is used for logging purposes. The `Resource()` function returns the object you want to mutate. Besides those functions you can now define what actions you want to hook into inside vCluster's syncer: ``` type MutateCreateVirtual interface { MutateCreateVirtual(ctx context.Context, obj client.Object) (client.Object, error) @@ -201,7 +201,7 @@ type MutateGetPhysical interface { } ``` -By implementing one or more of the above interfaces you will receive events from vcluster that allows you to mutate an outgoing or incoming object to vcluster. +By implementing one or more of the above interfaces you will receive events from vCluster that allows you to mutate an outgoing or incoming object to vCluster. For example, to add an hook that adds a custom label to a pod, you can add the following code: ``` var _ hook.MutateCreatePhysical = &podHook{} @@ -235,8 +235,8 @@ func (p *podHook) MutateUpdatePhysical(ctx context.Context, obj client.Object) ( } ``` -Incoming objects into vcluster can be modified through the `MutateGetPhysical` or `MutateGetVirtual` which allows you to change how vcluster is retrieving objects from either the virtual or physical cluster. -This can be useful if you don't want vcluster to change something you have mutated back for example. +Incoming objects into vCluster can be modified through the `MutateGetPhysical` or `MutateGetVirtual` which allows you to change how vCluster is retrieving objects from either the virtual or physical cluster. +This can be useful if you don't want vCluster to change something you have mutated back for example. ### Build and push your plugin @@ -245,7 +245,7 @@ Now you can run docker commands to build your container image and push it to the ### Add plugin.yaml -The last step before installing your plugin is creating a yaml file with your plugin metadata. This file follows the format of the Helm values files. It will be merged with other values files when a vcluster is installed or upgraded. For the plugin we just implemented and built it would look like this: +The last step before installing your plugin is creating a yaml file with your plugin metadata. This file follows the format of the Helm values files. It will be merged with other values files when a vCluster is installed or upgraded. For the plugin we just implemented and built it would look like this: ``` plugin: @@ -256,11 +256,11 @@ syncer: - "--sync=-configmaps" ``` -The first three lines contain a minimal definition of a vcluster plugin - a container name based on the key (second line) and container image (third line). The last three lines then contain extra values that the plugin will apply to the vcluster chart. These are needed for this particular plugin and are not mandatory otherwise. Our plugin would be syncing some ConfigMaps that would also be synced by the built-in "configmaps" syncer of the vcluster, and to avoid conflicting updates we will disable the built-in syncer by passing an additional command-line argument to the syncer container. +The first three lines contain a minimal definition of a vCluster plugin - a container name based on the key (second line) and container image (third line). The last three lines then contain extra values that the plugin will apply to the vCluster chart. These are needed for this particular plugin and are not mandatory otherwise. Our plugin would be syncing some ConfigMaps that would also be synced by the built-in "configmaps" syncer of the vCluster, and to avoid conflicting updates we will disable the built-in syncer by passing an additional command-line argument to the syncer container. ### Deploy the plugin -You can deploy your plugin to a vcluster using the same commands as [described on the overview page](./overview.mdx#loading-and-installing-plugins-to-vcluster), for example, with the vcluster CLI. +You can deploy your plugin to a vCluster using the same commands as [described on the overview page](./overview.mdx#loading-and-installing-plugins-to-vcluster), for example, with the vCluster CLI. ``` vcluster create my-vcluster -n my-vcluster -f plugin.yaml ``` @@ -294,7 +294,7 @@ I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. -DevSpace will create a development vcluster which will execute your plugin. Any changes made within the vcluster created by DevSpace will execute against your plugin. +DevSpace will create a development vCluster which will execute your plugin. Any changes made within the vCluster created by DevSpace will execute against your plugin. ``` vcluster list diff --git a/docs/pages/quickstart.mdx b/docs/pages/quickstart.mdx deleted file mode 100644 index 4f7b20ed6..000000000 --- a/docs/pages/quickstart.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Quickstart Guide -sidebar_label: Quickstart ---- - -import InstallCLIFragment from './fragments/install/cli.mdx' -import DeployFragment from './fragments/deploy-vcluster.mdx' -import DeleteFragment from './fragments/delete-vcluster.mdx' - -## 1. Download vcluster CLI -Use one of the following commands to download the vcluster CLI binary from GitHub: - - - -## 2. Create a vcluster - - -## 3. Use the vcluster -```bash -# Run any kubectl, helm, etc. command in your vcluster -kubectl get namespace -kubectl get pods -n kube-system -kubectl create namespace demo-nginx -kubectl create deployment nginx-deployment -n demo-nginx --image=nginx -kubectl get pods -n demo-nginx -``` - -## 4. Cleanup - diff --git a/docs/pages/security/isolated-mode.mdx b/docs/pages/security/isolated-mode.mdx new file mode 100644 index 000000000..630a63922 --- /dev/null +++ b/docs/pages/security/isolated-mode.mdx @@ -0,0 +1,74 @@ +--- +title: Isolated mode +sidebar_label: Isolated mode +--- + + +vCluster offers a feature called isolated mode to automatically isolate workloads in a virtual cluster. Isolated mode can be enabled via the `--isolate` flag in `vcluster create` or through the helm value `isolation.enabled: true`: + +``` +# Creates a new vCluster with isolated workloads +vcluster create my-vcluster --isolate +``` + +This feature imposes a couple of restrictions on vCluster workloads to make sure they do not break out of their virtual environment: +1. vCluster enforces a [Pod Security Standard](https://kubernetes.io/docs/concepts/security/pod-security-standards/) on syncer level, which means that for example pods that try to run as a privileged container or mount a host path will not be synced to the host cluster. Current valid options are either baseline (default in isolated mode) or restricted. This works for every Kubernetes version regardless of Pod Security Standard support, as this is implemented in vCluster directly. Rejected pods will stay pending in the vCluster and in newer Kubernetes version they will be denied by the admission controller as well. +2. vCluster deploys a [resource quota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) as well as a [limit range](https://kubernetes.io/docs/concepts/policy/limit-range/) alongside the vCluster itself. This allows restricting resource consumption of vCluster workloads. If enabled, sane defaults for those 2 resources are chosen. +3. vCluster deploys a [network policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) alongside itself that will restrict access of vCluster workloads as well as the vCluster control plane to other pods in the host cluster. (only works if your host [cluster CNI supports network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/#prerequisites)) + +You can adjust isolation settings through helm values. The default values are (also check [values.yaml](https://github.com/loft-sh/vcluster/blob/v0.7.0-alpha.1/charts/k3s/values.yaml)): +```yaml +isolation: + enabled: false + + podSecurityStandard: baseline + + resourceQuota: + enabled: true + quota: + requests.cpu: 10 + requests.memory: 20Gi + requests.storage: "100Gi" + requests.ephemeral-storage: 60Gi + limits.cpu: 20 + limits.memory: 40Gi + limits.ephemeral-storage: 160Gi + services.nodeports: 0 + services.loadbalancers: 1 + count/endpoints: 40 + count/pods: 20 + count/services: 20 + count/secrets: 100 + count/configmaps: 100 + count/persistentvolumeclaims: 20 + scopeSelector: + matchExpressions: + scopes: + + limitRange: + enabled: true + default: + ephemeral-storage: 8Gi + memory: 512Mi + cpu: "1" + defaultRequest: + ephemeral-storage: 3Gi + memory: 128Mi + cpu: 100m + + networkPolicy: + enabled: true + outgoingConnections: + ipBlock: + cidr: 0.0.0.0/0 + except: + - 100.64.0.0/10 + - 127.0.0.0/8 + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 +``` + +:::warn +In case you are using `--isolate` flag or isolated mode along with the `--expose` flag, make sure you appropriately bump up the `isolation.resourceQuotas.quota.services.nodeports` accordingly as some LoadBalancer implementations rely on `NodePorts` +::: diff --git a/docs/pages/security/network-isolation.mdx b/docs/pages/security/network-isolation.mdx new file mode 100644 index 000000000..3c711dd6d --- /dev/null +++ b/docs/pages/security/network-isolation.mdx @@ -0,0 +1,11 @@ +--- +title: Network Isolation +sidebar_label: Network Isolation +--- + +Workloads created by vCluster will be able to communicate with other workloads in the host cluster through their cluster ips. This can be sometimes beneficial if you want to purposely access a host cluster service, which is a good method to share services between vClusters. However, you often want to isolate namespaces and do not want the pods running inside vCluster to have access to other workloads in the host cluster. +This requirement can be accomplished by using [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) for the namespace where vCluster is installed in or using the [isolated mode](./isolated-mode.mdx) shown above. + +:::info +Network policies do not work in all Kubernetes clusters and need to be supported by the underlying CNI plugin. +::: diff --git a/docs/pages/security/other-topics.mdx b/docs/pages/security/other-topics.mdx new file mode 100644 index 000000000..58201300e --- /dev/null +++ b/docs/pages/security/other-topics.mdx @@ -0,0 +1,27 @@ +--- +title: Other Topics +sidebar_label: Other Topics +--- + +### Advanced Isolation + +Besides this basic workload isolation, you could also dive into more advanced isolation methods, such as isolating the workloads on separate nodes or through another container runtime. Using different nodes for your vCluster workloads can be accomplished through the [--node-selector flag](../architecture/nodes.mdx) on vCluster syncer. + +You should also be aware that pods created in the vCluster will set their [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), which will affect scheduling decisions. To prevent the pods from being scheduled to the undesirable nodes you can use the [--node-selector flag](../architecture/nodes.mdx) or admission controller as mentioned above. + +### Workload & Network Isolation within the vCluster + +The above mentioned methods also work for isolating workloads inside the vCluster itself, as you can just deploy resource quotas, limit ranges, admission controllers and network policies in there. To allow network policies to function correctly, you'll need to [enable this in vCluster](../networking/networking.mdx) itself though. + +### Secret based Service Account tokens + +By default vCluster will create Service Account Tokens for each pod and inject them as an annotation in the respective pods +metadata. This can be a security risk in certain senarios. To mitigate this there's a flag `--service-account-token-secrets` in vCluster +which creates separate secrets for each pods Service Account Token and mounts it accordingly using projected volumes. This option +is not enabled by default but can be enabled on demand. To enable this one can use the `extraArgs` options of the vCluster chart as follows + +``` +syncer: + extraArgs: + - --service-account-token-secrets=true +``` \ No newline at end of file diff --git a/docs/pages/security/pod-security.mdx b/docs/pages/security/pod-security.mdx new file mode 100644 index 000000000..2656e4eb3 --- /dev/null +++ b/docs/pages/security/pod-security.mdx @@ -0,0 +1,24 @@ +--- +title: Pod Security +sidebar_label: Pod Security +--- + +Besides restricting pod resources, it's also necessary to disallow certain potential harmful pod configurations, such as privileged pods or pods that use hostPath. +If you are using Kubernetes v1.23 or higher, you can restrict the namespace where the virtual cluster is running in via the [Pod Security Admission Controller](https://kubernetes.io/docs/concepts/security/pod-security-admission/): + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-vcluster-namespace + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/warn: restricted +``` + +To see all supported levels and modes, please take a look at the [Kubernetes docs](https://kubernetes.io/docs/concepts/security/pod-security-standards/). + +If you are using below Kubernetes v1.23 clusters, you can use the deprecated [PodSecurityPolicies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) to disallow critical workloads. + +If you want more control over this, you can also use an admission controller, that let's you define your own policies, such as [OPA](https://www.openpolicyagent.org/docs/v0.12.2/kubernetes-admission-control/), [jsPolicy](https://www.jspolicy.com/) or [Kyverno](https://kyverno.io/). \ No newline at end of file diff --git a/docs/pages/security/quotas-limits.mdx b/docs/pages/security/quotas-limits.mdx new file mode 100644 index 000000000..926d5179a --- /dev/null +++ b/docs/pages/security/quotas-limits.mdx @@ -0,0 +1,39 @@ +--- +title: Quotas & Limits +sidebar_label: Quotas & Limits +--- + +To ensure a vCluster will not consume too many resources in the host cluster, you can use a single [ResourceQuota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) in the namespace where the virtual cluster is running. This could look like: + +```yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: vcluster-quota +spec: + hard: + cpu: "10" + memory: 20Gi + pods: "10" +``` + +This allows the vCluster and all of the pods deployed inside it to only consume up to 10 vCores, 20GB of memory or to have 10 pods at maximum. If you use a resource quota, you probably also want to use a [LimitRange](https://kubernetes.io/docs/concepts/policy/limit-range/) that makes sure that needed resources are defined for each pod. For example: + +```yaml +apiVersion: v1 +kind: LimitRange +metadata: + name: vcluster-limit-range +spec: + limits: + - default: + memory: 512Mi + cpu: "1" + defaultRequest: + memory: 128Mi + cpu: 100m + type: Container +``` + +This limit range would ensure that containers that do not set `resources.requests` and `resources.limits` would get appropriate limits set automatically. + diff --git a/docs/pages/security/rootless-mode.mdx b/docs/pages/security/rootless-mode.mdx new file mode 100644 index 000000000..328d93512 --- /dev/null +++ b/docs/pages/security/rootless-mode.mdx @@ -0,0 +1,27 @@ +--- +title: Rootless mode +sidebar_label: Rootless mode +--- + +import NonRootSegment from '../fragments/non-root-vcluster.mdx' + +Many Kubernetes cluster operators employ policies to restrict the usage of certain features, for example running pods with the root user. +On this page you will see which options allow you to adjust vCluster configuration to successfully deploy it in such restricted host clusters. + +## Running as non-root user +If your host cluster policies disallow running containers with root user, or you simply prefer to run them this way, it is possible to configure it for vCluster components. Steps below show how to set the desired UID for syncer and control plane. The syncer also passes this UID down to the vCluster DNS deployment. + + + +:::info Values of the securityContext fields +You can substitute the runAsUser value as needed, e.g. if the host cluster limits the allowable UID ranges. +And you are free to set other [securityContext fields](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#podsecuritycontext-v1-core) as necessary to fulfill your host cluster policies. +::: + +:::caution +Running as non-root is currently supported only for the k3s distribution. While [other distributions provided by vCluster](../deploying-vclusters/supported-distros.mdx) may make use of the `securityContext` field from the `values.yaml` file, we do not guarantee that they will work as expected. +::: + +:::caution +vCluster doesn't currently provide a migration path from an instance that was running as root to running with a non-root user. +::: \ No newline at end of file diff --git a/docs/pages/architecture/storage.mdx b/docs/pages/storage.mdx similarity index 70% rename from docs/pages/architecture/storage.mdx rename to docs/pages/storage.mdx index 9483f3bfa..7ee8d7849 100644 --- a/docs/pages/architecture/storage.mdx +++ b/docs/pages/storage.mdx @@ -8,16 +8,16 @@ sidebar_label: Storage
vcluster - Persistent Volume Provisioning
-Since the vcluster's syncer synchronizes pods to the underlying host cluster to schedule them, vcluster users can use the storage classes of the underlying host cluster to create persistent volume claims and to mount persistent volumes. By default, the host's storage classes can be used without the need to create it in the vcluster, but this can be configured by [enabling sync of "storageclasses" or "hoststorageclasses"](./synced-resources.mdx). +Since the vCluster's syncer synchronizes pods to the underlying host cluster to schedule them, vCluster users can use the storage classes of the underlying host cluster to create persistent volume claims and to mount persistent volumes. By default, the host's storage classes can be used without the need to create it in the vCluster, but this can be configured by [enabling sync of "storageclasses" or "hoststorageclasses"](./syncer/core_resources.mdx). -vcluster provides helm values to adjust this behavior during vcluster installation or upgrade. Find out more below. +vCluster provides helm values to adjust this behavior during vCluster installation or upgrade. Find out more below. ### Sync Persistent Volumes -By default, creating persistent volumes in the vcluster will have no effect, as vcluster runs without any cluster scoped access in the host cluster. However, if you enable persistentvolumes sync via helm values, the appropriate ClusterRole will be created in the host cluster and the syncer will be started with a flag that enables persistent volume synchronization from vcluster down to the underlying host cluster. +By default, creating persistent volumes in the vCluster will have no effect, as vCluster runs without any cluster scoped access in the host cluster. However, if you enable persistentvolumes sync via helm values, the appropriate ClusterRole will be created in the host cluster and the syncer will be started with a flag that enables persistent volume synchronization from vCluster down to the underlying host cluster. -#### Create a vcluster with persistent volume sync +#### Create a vCluster with persistent volume sync To enable the synchronization of the PersistentVolume and StorageClass resources add the following to your `values.yaml`: ``` @@ -25,11 +25,11 @@ sync: persistentvolumes: enabled: true # If you want to create custom storage classes - # inside the vcluster. + # inside the vCluster. storageclasses: enabled: true ``` -then create or upgrade the vcluster with: +then create or upgrade the vCluster with: ``` vcluster create my-vcluster --upgrade -f values.yaml @@ -37,7 +37,7 @@ vcluster create my-vcluster --upgrade -f values.yaml #### How does it work? -When you enable persistent volume sync, vcluster will create persistent volumes that are created in vcluster itself in the host cluster in the form of `vcluster-PERSISTENT_VOLUME_NAME-x-VCLUSTER_NAMESPACE-x-VCLUSTER_NAME` to avoid any conflicts with already existing persistent volumes or other vclusters that sync persistent volumes. vcluster will then rewrite persistent volume claims with those new names so that it seems that the virtual name was bound. +When you enable persistent volume sync, vCluster will create persistent volumes that are created in vCluster itself in the host cluster in the form of `vcluster-PERSISTENT_VOLUME_NAME-x-VCLUSTER_NAMESPACE-x-VCLUSTER_NAME` to avoid any conflicts with already existing persistent volumes or other vClusters that sync persistent volumes. vCluster will then rewrite persistent volume claims with those new names so that it seems that the virtual name was bound. This means that when you create a PVC in the form of: @@ -56,7 +56,7 @@ spec: storage: 5Gi ``` -vcluster will rewrite this PVC into the following in the host cluster to prevent any conflicts with already existing storage classes or persistent volumes: +vCluster will rewrite this PVC into the following in the host cluster to prevent any conflicts with already existing storage classes or persistent volumes: ```yaml apiVersion: v1 kind: PersistentVolumeClaim @@ -72,24 +72,24 @@ spec: storage: 5Gi ``` -This only happens if persistent volume sync is enabled in the vcluster. There might be cases where you want to disable this automatic rewriting of PVCs (for example if you want to mount an already existing PV of the host cluster to a PVC in the vcluster), for that case you can set the annotation called `vcluster.loft.sh/skip-translate` to `true`, which will tell vcluster to not rewrite the PVC `volumeName`, `storageClass`, `selectors` or `dataSource`. +This only happens if persistent volume sync is enabled in the vCluster. There might be cases where you want to disable this automatic rewriting of PVCs (for example if you want to mount an already existing PV of the host cluster to a PVC in the vCluster), for that case you can set the annotation called `vcluster.loft.sh/skip-translate` to `true`, which will tell vCluster to not rewrite the PVC `volumeName`, `storageClass`, `selectors` or `dataSource`. ### Sync Volume Snapshots Kubernetes VolumeSnapshot resource represents a snapshot of a volume on a storage system. You can read more about volume snapshots on [the official Kubernetes documentation page of this feature](https://kubernetes.io/docs/concepts/storage/volume-snapshots/). -By default, VolumeSnapshot syncing is disabled, and creating a VolumeSnapshot custom resource in the vcluster will have no effect. Following chapters describe how to enable this feature in the vcluster. +By default, VolumeSnapshot syncing is disabled, and creating a VolumeSnapshot custom resource in the vCluster will have no effect. Following chapters describe how to enable this feature in the vCluster. #### Host prerequisites Vcluster relies fully on the volume snapshot capabilities of the host cluster, which has to fullfil certain criteria. -Host cluster must have all relevant [snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd) installed, without which the vcluster will fail to start when volume snapshots sync is enabled. +Host cluster must have all relevant [snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd) installed, without which the vCluster will fail to start when volume snapshots sync is enabled. Host cluster should have a common snapshot controller installed, as well as a compatible CSI driver. Without these the volume snapshots will not be created in the storage backend. It is also recommended for the host cluster to have [the volume snapshots validating webhook](https://github.com/kubernetes-csi/external-snapshotter#validating-webhook) installed. -#### Create a vcluster with volume snapshots sync +#### Create a vCluster with volume snapshots sync To enable synchronization of all resources relevant for the volume snapshotting, and automatically create the necessary RBAC permissions, add the following to your `values.yaml`: ``` @@ -97,22 +97,22 @@ sync: volumesnapshots: enabled: true ``` -then create or upgrade the vcluster with: +then create or upgrade the vCluster with: ``` vcluster create my-vcluster --upgrade -f values.yaml ``` :::info -It is recommend to install [the volume snapshots validating webhook](https://github.com/kubernetes-csi/external-snapshotter#validating-webhook) in your vcluster instance. +It is recommend to install [the volume snapshots validating webhook](https://github.com/kubernetes-csi/external-snapshotter#validating-webhook) in your vCluster instance. ::: #### How does it work? -When you enable volume snapshot sync, vcluster will start watching VolumeSnapshot, VolumeSnapshotContent, and VolumeSnapshotClass CRs and syncing them between vcluster and host cluster. These resource types are synced in the following ways: +When you enable volume snapshot sync, vCluster will start watching VolumeSnapshot, VolumeSnapshotContent, and VolumeSnapshotClass CRs and syncing them between vCluster and host cluster. These resource types are synced in the following ways: -**VolumeSnapshot** resources created in the vcluster will be synced to the host cluster with the name in form of `vcluster-VOLUME_SNAPSHOT_NAME-x-VOLUME_SNAPSHOT_NAMESPACE-x-VCLUSTER_NAME`. The status and finalizers of this resource will be synced back into vcluster. The `.spec.source` field of the VolumeSnapshot resource in the host cluster will be rewritten to reference the expected PersistentVolumeClaim or VolumeSnapshotContent resource. +**VolumeSnapshot** resources created in the vCluster will be synced to the host cluster with the name in form of `vcluster-VOLUME_SNAPSHOT_NAME-x-VOLUME_SNAPSHOT_NAMESPACE-x-VCLUSTER_NAME`. The status and finalizers of this resource will be synced back into vCluster. The `.spec.source` field of the VolumeSnapshot resource in the host cluster will be rewritten to reference the expected PersistentVolumeClaim or VolumeSnapshotContent resource. -**VolumeSnapshotContent** resources created in the vcluster will be synced to the host cluster with the name in form of `vcluster-VOLUME_SNAPSHOT_NAME-x-VCLUSTER_NAMESPACE-x-VCLUSTER_NAME`. VolumeSnapshotContent resources created in the host cluster and referencing VolumeSnapshot from the vcluster will be synced into vcluster. The status and finalizers of the resource in host cluster will be synced into its vcluster representation. The `.spec.volumeSnapshotRef` field of the VolumeSnapshotContent resource will be rewritten to reference the expected VolumeSnapshot resource. +**VolumeSnapshotContent** resources created in the vCluster will be synced to the host cluster with the name in form of `vcluster-VOLUME_SNAPSHOT_NAME-x-VCLUSTER_NAMESPACE-x-VCLUSTER_NAME`. VolumeSnapshotContent resources created in the host cluster and referencing VolumeSnapshot from the vCluster will be synced into vCluster. The status and finalizers of the resource in host cluster will be synced into its vCluster representation. The `.spec.volumeSnapshotRef` field of the VolumeSnapshotContent resource will be rewritten to reference the expected VolumeSnapshot resource. -**VolumeSnapshotClass** resources will be synced from the host cluster into vcluster only. +**VolumeSnapshotClass** resources will be synced from the host cluster into vCluster only. diff --git a/docs/pages/syncer/config.mdx b/docs/pages/syncer/config.mdx new file mode 100644 index 000000000..79d1a7cc4 --- /dev/null +++ b/docs/pages/syncer/config.mdx @@ -0,0 +1,59 @@ +--- +title: Configuration +sidebar_label: Configuration +--- + +## Enable or disable synced resources + +To enable a resource syncronization, for example persistent volumes, and automatically create the necessary RBAC permissions, add the following to your `values.yaml`: +``` +sync: + persistentvolumes: + enabled: true +``` +then create or upgrade the vCluster with: + +``` +vcluster create my-vcluster --upgrade -f values.yaml +``` + +To disable a resource that is synced by default, for example if you don't want to sync services, set the following in your `values.yaml`: +``` +sync: + services: + enabled: false +``` +then create or upgrade the vCluster with: + +``` +vcluster create my-vcluster --upgrade -f values.yaml +``` + +:::warning Correct Cluster Functionality +Disabling certain resources such as services, endpoints or pods can lead to a non-functional virtual Kubernetes cluster, so be careful with what resources you are deactivating. +::: + +## Sync all Secrets and Configmaps +With the new generic sync, vCluster currently only knows about a couple of resources that actually use secrets / configmaps and will try to sync only those into the host cluster, but this allows syncing of all secrets and configmaps to avoid the problem that needed secrets / configmaps are not synced to the host cluster. +To enable this, simply add the following values to the helm chart / vCluster cli options: +```yaml +sync: + secrets: + all: true + configmaps: + all: true +``` + +## Extra Pod Options + +By default [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/) and [readiness gates](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate) will not be synced by vCluster, as they require additional permissions. To enable those, please activate those within your values.yaml: + +``` +sync: + pods: + enabled: true + # Sync ephemeralContainers to host cluster + ephemeralContainers: true + # Sync readiness gates to host cluster + status: true +``` \ No newline at end of file diff --git a/docs/pages/syncer/core_resources.mdx b/docs/pages/syncer/core_resources.mdx new file mode 100644 index 000000000..29396fb57 --- /dev/null +++ b/docs/pages/syncer/core_resources.mdx @@ -0,0 +1,37 @@ +--- +title: Synced Resources +sidebar_label: Core Synced Resources +--- + + +This section lists all resources that can be synced or mirrored by vCluster currently in the table below. Those resources can be activated or deactivated via the `values.yaml` as described below, or with the `--sync` flag of the syncer. By default, certain resources are already activated and you can either disable the default resources or tell vCluster to sync other supported resources as well. + +| Resource | Description | Default Enabled | +| ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| services | Mirrors services between host and virtual cluster | Yes | +| endpoints | Mirrors endpoints between host and virtual cluster | Yes | +| configmaps | Mirrors used configmaps by pods between host and virtual cluster | Yes | +| secrets | Mirrors used secrets by ingresses or pods between host and virtual cluster | Yes | +| events | Syncs events from host cluster to virtual cluster | Yes | +| pods | Mirrors pods between host and virtual cluster | Yes | +| persistentvolumeclaims | Mirrors persistent volume claims between host and virtual cluster | Yes | +| fake-nodes | Creates fake nodes based on spec.nodeName fields of synced pods. Requires no cluster role | Yes | +| fake-persistentvolumes | Creates fake persistent volumes based on spec.volumeName of persistent volume claims. Requires no cluster role | Yes | +| ingresses | Mirrors ingresses between host and virtual cluster. Automatically tries to detect the supported ingress version (networking.k8s.io/v1 or networking.k8s.io/v1beta1) | No | +| ingressclasses | Syncs IngressClasses from host cluster to virtual cluster. This is automatically enabled when Ingresses sync is enabled. | No _*_ | +| nodes | Syncs real nodes from host cluster to virtual cluster. If enabled, implies that fake-nodes is disabled. For more information see [nodes](../architecture/nodes.mdx). | No | +| persistentvolumes | Mirrors persistent volumes from vCluster to host cluster and dynamically created persistent volumes from host cluster to virtual cluster. If enabled, implies that fake-persistentvolumes is disabled. For more information see [storage](../storage.mdx). | No | +| storageclasses | Syncs created storage classes from virtual cluster to host cluster | No | +| hoststorageclasses | Syncs real storage classes from host cluster to virtual cluster. This is only needed if you require to be able to get/list StorageClasses from vCluster API server. Host storage classes can be used in PersistentVolumes and PersistentVolumeClaims without syncing them to the virtual cluster. This option was formerly named "legacy-storageclasses". | No | +| priorityclasses | Syncs created priority classes from virtual cluster to host cluster | No | +| networkpolicies | Syncs created network policies from virtual cluster to host cluster | No | +| volumesnapshots | Enables volumesnapshot, volumesnapshotcontents and volumesnapshotclasses support. Syncing behaves similar to persistentvolumeclaims, persistentvolumes and storage classes. For more information see [storage](../storage.mdx). | No | +| poddisruptionbudgets | Syncs created poddisruptionbudgets from virtual cluster to host cluster | No | +| serviceaccounts | Syncs created service accounts from virtual cluster to host cluster. This is useful for using [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) with vCluster | No | +| csidrivers | Mirrors CSIDriver objects from host cluster to vCluster. Enabled automatically when [virtual scheduler](../architecture/scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | +| csinodes | Mirrors CSINode objects from host cluster to vCluster. Enabled automatically when [virtual scheduler](../architecture/scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | +| csistoragecapacities | Mirrors CSIStorageCapacity Objects from host cluster to vCluster if the .nodeTopology matches a synced node. Enabled automatically when [virtual scheduler](../architecture/scheduling.mdx#separate-vcluster-scheduler) is enabled. Disabling this syncer while using virtual scheduler may result in incorrect pod scheduling. | No _*_ | + +_\* refer to the description column for claryfying information about default behavior._ + +By default, vCluster runs with a minimal set of RBAC permissions to allow execution in restricted environments. Certain resources require extra permissions, which will be automatically given to the vCluster ServiceAccount if you enable the resource sync with the associated helm value \ No newline at end of file diff --git a/docs/pages/syncer/other_resources/config_syntax.mdx b/docs/pages/syncer/other_resources/config_syntax.mdx new file mode 100644 index 000000000..857738865 --- /dev/null +++ b/docs/pages/syncer/other_resources/config_syntax.mdx @@ -0,0 +1,129 @@ +--- +title: Configuration Syntax +sidebar_label: Configuration Syntax +--- + +## Permissions + +The helm values snippet below shows an example of the generic sync configuration and related RBAC roles. There you can notice some key fields nested under `.sync.generic` value: +- the RBAC namespaced `role` and cluster scoped `clusterRole` required for the plugin - these would be adjusted to fit the needs of your use case and the configuration that you define. Note that when the ["Multi-namespace mode"](./multi_namespace_mode.mdx) is used, the namespaced role will become ClusterRole. +- the `config` field, which will populate the `CONFIG` environment variable of the vCluster syncer container - this must be a string with valid YAML formatting. It uses a custom syntax to define the behavior of the plugin. + +```yaml +sync: + generic: + clusterRole: + extraRules: + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] + role: + extraRules: + # Example for Cert Manager + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "certificates", "certificaterequests"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + config: |- + version: v1beta1 + export: + # ... +``` + + +## Virtual to Host sync +We use the top-level `export` field in the configuration to declare which virtual resources we want to sync to the host cluster. Each item in the `export` array defines the resource via `apiVersion` and `kind` strings. Each `apiVersion` and `kind` pair can have only one entry in the `export` array. The `patches` field allows you to define how are certain fields of the synced resource modified before its creation(or update) in the host cluster. +The `reversePatches` field allows you to declare how changes to certain fields(implicitly this is done for the `status`) of the synced resource(the one created in the host cluster) are propagated back to the original resource in the virtual cluster. Besides the status, only the fields referenced in the `copyFromObject` reverse patch operations are propagated. +Both these fields follow the same syntax, as documented in [the "Patch syntax" chapter of this doc](#patch-syntax). + + +Example: +```yaml +sync: + generic: + config: |- + version: v1beta1 + export: + - apiVersion: cert-manager.io/v1 + kind: Certificate + patches: + - op: rewriteName + path: spec.issuerRef.name + - op: rewriteName + path: spec.secretName + reversePatches: + # Implicit reverse patch for status would be declared like so: + # - op: copyFromObject + # fromPath: status + # path: status +``` + +:::info +Only the namespaced resources are supported at this time. +::: + +**Selector for a generic Virtual to Host sync** +You can limit which resources will be synced from the virtual cluster by configuring the `selector` array. The virtual resource is synced when it matches one or more selectors, or when the `selector` field is empty. Supported selector types are: +`labelSelector` - the `key: value` map of the resource labels. All of the defined label key and values should match on the resource in the virtual cluster to be synced. Example: +```yaml +sync: + generic: + config: |- + version: v1beta1 + export: + - apiVersion: cert-manager.io/v1 + kind: Certificate + selector: + labelSelector: + "label-key": "label-value" +``` + + +## Host to Virtual sync +We use the top-level `import` field in the configuration to declare which host resources we want to sync to the virtual cluster. Each item in the `import` array defines the resource via `apiVersion` and `kind` strings. Each `apiVersion` and `kind` pair can have only one entry in the `import` array. The `patches` field allows you to define how are certain fields of the synced resource modified before its creation(or update) in the virtual cluster. +The `reversePatches` field allows you to declare how changes to certain fields of the synced resource(in this case, the one created in the virtual cluster) are propagated back to the original resource in the host cluster. Only the fields referenced in the `copyFromObject` reverse patch operations are propagated. +Both these fields follow the same syntax, as documented in [the "Patch syntax" chapter of this doc](#patch-syntax). + + +Example: +```yaml +sync: + generic: + config: |- + version: v1beta1 + import: + - kind: Secret + apiVersion: v1 + - kind: IngressClass + apiVersion: networking.k8s.io/v1 +``` + +:::info +The sync from Host to Virtual cluster is supported only in ["Multi-namespace mode"](./multi_namespace_mode.mdx) +::: + +## Patch syntax +The patch defines how will the vCluster behave when syncing each resource to and from the host cluster. Generally, a patch is defined by the field `path` and `op`(operation) that should be performed on said field. +An array of `conditions` may also be set, and in such case, the field value will be modified by a patch only if the field value matches all the conditions. +Some operation types may utilize additional fields, and these will be explained in the next chapter. + + + +### Patch operations + +| op | Support | Description | +| -------------------------------------- | :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| copyFromObject | all | Copy value of the field referenced in the `fromPath` from the originating object to the `path` field of the destination object. The `fromPath` can be omitted, in such case, it will default to the same field path as referenced in the `path`. | +| add | all | Add contents of the `value` into the `path` field. The `value` can be either scalar or a complex object. | +| replace | all | Replace the contents of the `path` field with the contents of the `value`. The `value` can be either scalar or a complex object. | +| remove | all | Remove the contents of the `path` field | +| rewriteName | V->H | Replaces the contents of the `path` field with transformed content based on the namespace of the synced resource. This is typically done on the fields that refer to a resource name, and on the `.metadata.name` as well(implicit). This is done to avoid naming collisions when syncing resources to the host cluster, but it is not necessary when using the ["Multi-namespace mode"](./multi_namespace_mode.mdx).
As an example, the "logstash" value of a resource in the "logging" namespace of the vCluster named "vc" is rewritten to "logstash-x-logging-x-vc". If the resulting length of the value would be over 63 characters, the last 10 characters will be replaced with a hash of the full value. | +| rewriteName + namePath + namespacePath | V->H | Similar to `rewriteName`, but with an addition of the `namePath` and/or `namespacePath`. This is used when a field of the synced resource is referencing a different resource via namespace and name via two separate fields. When using this option you would set the `path` to reference a field that is a common parent of both `namePath` and `namespacePath`, and these two fields would then contain just the relative path. For example, `path: spec.includes` + `namePath: name` + `namespacePath: namespace` for a resource that contains name in `spec.includes.name` and namespace in `spec.includes.namespace`. | +| rewriteName + regex | V->H | Similar to `rewriteName`, but with an addition of the `regex` option for the patch. This is used when a string contains not just the resource name, but optionally a namespace, and other characters. For example, a string containing "namespace/name" can be correctly rewritten with the addition of this configuration option - `regex: "$NAMESPACE/$NAME"`. The vCluster uses Go regular expressions to recognize the name part with the "NAME" capture group (can be written as `$NAME`), and the namespace with the "NAMESPACE" capture group (can be written as `$NAMESPACE`). | +| rewriteLabelKey | V->H | The keys of the `.metadata.labels` of the synced resources are rewritten by vCluster and plugins. This patch type allows you to rewrite the key references in the same way, so the fields that are referencing labels will still reference correct labels in their rewritten form. For example, the label key-value pair "app: curl" is rewritten to "vcluster.loft.sh/label-vcluster-x-a172cedcae: curl", so with this patch operation you can rewrite a field that contains "app" to "vcluster.loft.sh/label-vcluster-x-a172cedcae, and the controllers operating on the synced resources will work with this label just as expected.
This is not necessary when using the ["Multi-namespace mode"].(./multi_namespace_mode.mdx). | +| rewriteLabelSelector | V->H | This operation exists for the same reasons as described for the rewriteLabelKey operation. It is intended to be used for the key-value map fields that represent a label selector. This patch operation will rewrite all keys in the field referenced by `path` to the expected format for the label keys, and it will also add additional key-value pairs(with virtual namespace and vCluster name) to avoid naming conflicts.
This is not necessary when using the ["Multi-namespace mode"]. | +| rewriteLabelExpressionsSelector | V->H | Similar to the `rewriteLabelSelector`, but expects `path` reference a field with the `matchLabels` and `matchExpressions` sub-fields, which will have the label keys rewritten just as described for `rewriteLabelKey`.
This is not necessary when using the ["Multi-namespace mode"]. | + + +:::info +`V->H` patch operation is supported only for patches, or reverse patches, that are executed in the virtual to host direction. +::: \ No newline at end of file diff --git a/docs/pages/syncer/other_resources/generic_sync.mdx b/docs/pages/syncer/other_resources/generic_sync.mdx new file mode 100644 index 000000000..35aa14672 --- /dev/null +++ b/docs/pages/syncer/other_resources/generic_sync.mdx @@ -0,0 +1,17 @@ +--- +title: Generic Sync +sidebar_label: Generic Sync +--- + +Besides the plugins, vCluster provides a way to define additional resources that should be synced in a generic and declarative way with just a few lines of a YAML configuration. This feature is a successor to the [vcluster-generic-crd-sync-plugin](https://github.com/loft-sh/vcluster-generic-crd-sync-plugin) project and is included since v0.14.0 release. The full range of the generic sync features is available only in the vCluster created in the "multi-namespace mode", see the ["Multi-namespace mode" chapter](#multi-namespace-mode) for details. + +You will need to declare which CRD Kinds you would like to sync from the virtual cluster to the host cluster, or vice versa, and the vCluster will automatically copy the CRD definition from the host cluster into vCluster at the start. Then it will take care of watching the resources of the predefined Kinds and execute the synchronization logic based on the configuration provided to it. The vCluster may automatically transform the resource metadata(such as name, namespace, labels, etc.) as is common for resources synced by vCluster. In addition to the implicit metadata transformations, you can configure transformations that will be performed on other fields of the resource, and these will depend on the meaning of those fields. You may also declare which fields will be copied in the opposite direction, from the synced resource to the original one. +Many controllers create Kubernetes resources as a result of custom resources, for example, cert-manager creates Secrets based on Certificate custom resources, and this feature will allow you to sync these resources from the host cluster into the virtual one. The following chapters describe the configuration options in more detail. + +:::tip +You may find configuration examples in the ["generic-sync-examples" folder in the vCluster repo](https://github.com/loft-sh/vcluster/tree/main/generic-sync-examples). +::: + +:::warning Alpha feature +Generic sync feature is currently in an alpha state. This is an advanced feature that requires more permissions in the host cluster, and as a result, it can potentially cause significant disruption in the host cluster. +::: diff --git a/docs/pages/syncer/other_resources/multi_namespace_mode.mdx b/docs/pages/syncer/other_resources/multi_namespace_mode.mdx new file mode 100644 index 000000000..eff52c309 --- /dev/null +++ b/docs/pages/syncer/other_resources/multi_namespace_mode.mdx @@ -0,0 +1,19 @@ +--- +title: Multi-Namespace Mode +sidebar_label: Multi-Namespace Mode +--- + +In this mode vCluster diverges from the [architecture described previously](../../architecture/overview.mdx). By default, all namespaced resources that need to be synced to the host cluster are created in the namespace where vCluster is installed. But in multi-namespace mode vCluster will create a namespace in the host cluster for each namespace in the virtual cluster. The namespace name is modified to avoid conflicts between multiple vCluster instances in the same host, but the synced namespaced resources are created with the same name as in the virtual cluster. To enable this mode use the following helm value: + +```yaml +multiNamespaceMode: + enabled: true +``` + +:::warning This mode must be enabled during vCluster creation. +Enabling, or disabling, it on an existing vCluster instance will force it into an inconsistent state. +::: + +:::warning Alpha feature +Multi-namespace mode is currently in an alpha state. This is an advanced feature that requires more permissions in the host cluster, and as a result, it can potentially cause significant disruption in the host cluster. +::: \ No newline at end of file diff --git a/docs/pages/syncer/other_resources/overview.mdx b/docs/pages/syncer/other_resources/overview.mdx new file mode 100644 index 000000000..b0c94098e --- /dev/null +++ b/docs/pages/syncer/other_resources/overview.mdx @@ -0,0 +1,8 @@ +--- +title: Sync other resources +sidebar_label: Sync other resources +--- + +Syncing other resources such as deployments, statefulsets and namespaces is usually not needed as those just control lower level resources and since those lower level resources are synced the cluster can function correctly. + +However, there might be cases though where custom syncing of resources might be needed or beneficial. In order to accomplish this, vCluster provides an [SDK](https://github.com/loft-sh/vcluster-sdk) to develop your own resource syncers as plugins. To find out more, please take a look at the [plugins documentation](../../plugins/overview.mdx). diff --git a/docs/pages/operator/external-access.mdx b/docs/pages/using-vclusters/access.mdx similarity index 80% rename from docs/pages/operator/external-access.mdx rename to docs/pages/using-vclusters/access.mdx index e2f1e58ed..6b3ab74ed 100644 --- a/docs/pages/operator/external-access.mdx +++ b/docs/pages/using-vclusters/access.mdx @@ -1,31 +1,146 @@ --- -title: Exposing vcluster (ingress etc.) -sidebar_label: Exposing vcluster (ingress etc.) +title: Accessing vCluster +sidebar_label: Access --- -By default, vcluster is only reachable via port-forwarding in remote clusters. However, this means that you need access to the host cluster, where the vcluster is running, in order to access it. To directly access vcluster without port-forwarding, you can use one of the following methods: -- [LoadBalancer service](#loadbalancer-service) -- [NodePort service](#nodeport-service) -- [Ingress](#ingress) -- [In-Cluster](#in-cluster) +By default, vCluster is only reachable via port-forwarding in remote clusters. However, this means that you need access to the host cluster, where the vCluster is running, in order to access it. To directly access vCluster without port-forwarding, you can use one of the following methods. :::info Local Kubernetes Clusters -If you are using a local Kubernetes cluster, such as docker-desktop, rancher-desktop, KinD or minikube, vcluster will automatically connect to it without the need of port-forwarding. +If you are using a local Kubernetes cluster, such as docker-desktop, rancher-desktop, KinD or minikube, vCluster will automatically connect to it without the need of port-forwarding. ::: -## LoadBalancer service +### Via Ingress -The easiest way is to use the flag `--expose` in `vcluster create` to tell vcluster to use a LoadBalancer service: +An [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) with SSL passthrough support will provide the best user experience, but there is a workaround if this feature is not natively supported. + +- [Kubernetes Nginx](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough) +- [Traefik Proxy](https://doc.traefik.io/traefik/routing/routers/#passthrough) +- [Emissary](https://www.getambassador.io/docs/emissary/latest/topics/using/tcpmappings#tls-termination) + +Make sure your ingress controller is installed and healthy on the cluster that will host your virtual clusters. Create the following `ingress.yaml` for a vCluster called `my-vcluster` in the namespace `my-vcluster`: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + # We need the ingress to pass through ssl traffic to the vCluster + # This only works for the nginx-ingress (enable via --enable-ssl-passthrough + # https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough ) + # for other ingress controllers please check their respective documentation. + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + name: vcluster-ingress + namespace: my-vcluster +spec: + ingressClassName: nginx # use your ingress class name + rules: + - host: my-vcluster.example.com + http: + paths: + - backend: + service: + name: my-vcluster + port: + number: 443 + path: / + pathType: ImplementationSpecific +``` + +Create the resource in the namespace via: +``` +kubectl apply -f ingress.yaml +``` + +:::info Enable SSL Passthrough Feature +If you are using the ingress nginx controller, please make sure you have [enabled the SSL passthrough feature](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough) as it is disabled by default. +::: + +:::warning SSL Passthrough required +In order for this ingress to work correctly, you will need to enable SSL passthrough as TLS termination has to happen at the vCluster level and not ingress controller level. If you cannot do that, please take a look below for using an ingress without ssl passthrough. +::: + +Now create a `values.yaml` to create the vCluster with: +```yaml +syncer: + extraArgs: + - --tls-san=my-vcluster.example.com +``` + +Create the virtual cluster with: +``` +vcluster create my-vcluster -n my-vcluster --connect=false -f values.yaml +``` + +Retrieve the kube config via: +``` +vcluster connect my-vcluster -n my-vcluster --update-current=false --server=https://my-vcluster.example.com +``` + +Access the vCluster: +``` +export KUBECONFIG=./kubeconfig.yaml + +# Run any kubectl command +kubectl get ns +``` + +#### Ingress without SSL-Passthrough + +If you cannot configure your ingress controller to use ssl-passthrough, you can also create an ingress similar to this: +``` +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-redirect: "true" + name: vcluster-ingress + namespace: my-vcluster +spec: + ingressClassName: nginx # use your ingress class name + rules: + - host: my-vcluster.example.com + http: + paths: + - backend: + service: + name: my-vcluster + port: + number: 443 + path: / + pathType: ImplementationSpecific + tls: + - hosts: + - my-vcluster.example.com +``` + +With this configuration you will need to use [service account authentication](./kube-context.mdx#connect-via-service-accounts) in order to connect as the ingress controller won't be able to resolve the client-cert and client-key which is used by default as authentication method. To create a kube config that uses a service account, please run the following command: + +``` +vcluster connect my-vcluster -n my-vcluster --server=https://my-vcluster.example.com --service-account admin --cluster-role cluster-admin --insecure +``` + +Then access the vCluster: +``` +# Run any kubectl command +kubectl get ns +``` + +### Via LoadBalancer service + +The easiest way is to use the flag `--expose` in `vcluster create` to tell vCluster to use a LoadBalancer service: ```bash -# Create a new vcluster with a LoadBalancer +# Create a new vCluster with a LoadBalancer vcluster create my-vcluster --expose -# Run any kube command in the vcluster +# Run any kube command in the vCluster kubectl get ns ``` -Thats it, your vcluster is now externally reachable through a LoadBalancer service. +Thats it, your vCluster is now externally reachable through a LoadBalancer service. :::warning Check the costs first Even though using a LoadBalancer is the easiest option, if you use a cloud provider it will be costly to create one Loadbalancer per cluster. Check your cloud vendor about the cost of each LoadBalancer. In general using an Ingress is the most cost effective method. @@ -33,7 +148,7 @@ Even though using a LoadBalancer is the easiest option, if you use a cloud provi #### Manual LoadBalancer service creation -Instead of using the built-in flag `--expose`, you can also create the following `load-balancer.yaml` for a vcluster called `my-vcluster` in the namespace `my-vcluster` yourself: +Instead of using the built-in flag `--expose`, you can also create the following `load-balancer.yaml` for a vCluster called `my-vcluster` in the namespace `my-vcluster` yourself: ```yaml apiVersion: v1 kind: Service @@ -57,14 +172,14 @@ Create the resource in the namespace via: kubectl apply -f load-balancer.yaml ``` -Find out the external ip via `kubectl get svc vcluster-loadbalancer -n my-vcluster`: +Find out the external ip via `kubectl get svc vCluster-loadbalancer -n my-vcluster`: ``` kubectl get svc vcluster-loadbalancer -n my-vcluster NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE vcluster-loadbalancer LoadBalancer 10.68.9.239 x.x.x.x 443:32678/TCP 7m15s ``` -Now create a `values.yaml` to create the vcluster with: +Now create a `values.yaml` to create the vCluster with: ```yaml syncer: extraArgs: @@ -82,15 +197,15 @@ Update the current kube config via: vcluster connect my-vcluster -n my-vcluster --server=https://x.x.x.x ``` -Access the vcluster: +Access the vCluster: ``` # Run any kube context command kubectl get ns ``` -## NodePort service +### Via NodePort service -You can also expose the vcluster via a NodePort service. Create the following `nodeport.yaml` for a vcluster called `my-vcluster` in the namespace `my-vcluster`: +You can also expose the vCluster via a NodePort service. Create the following `nodeport.yaml` for a vCluster called `my-vcluster` in the namespace `my-vcluster`: ```yaml apiVersion: v1 @@ -130,7 +245,7 @@ gke-cluster-1-default-pool-8f0bb8bb-vl79 Ready 6d v1.20.6-gke.1 gke-cluster-1-default-pool-8f0bb8bb-wpkp Ready 6d v1.20.6-gke.1000 10.156.0.16 z.z.z.z Container-Optimized OS from Google 5.4.104+ containerd://1.4.3 ``` -Now create a `values.yaml` to create the vcluster with: +Now create a `values.yaml` to create the vCluster with: ```yaml syncer: extraArgs: @@ -147,7 +262,7 @@ Retrieve the kube config via: vcluster connect my-vcluster -n my-vcluster --update-current=false --server=https://x.x.x.x ``` -Access the vcluster: +Access the vCluster: ``` export KUBECONFIG=./kubeconfig.yaml @@ -155,128 +270,9 @@ export KUBECONFIG=./kubeconfig.yaml kubectl get ns ``` -## Ingress - -An [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) with SSL passthrough support will provide the best user experience, but there is a workaround if this feature is not natively supported. - -- [Kubernetes Nginx](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough) -- [Traefik Proxy](https://doc.traefik.io/traefik/routing/routers/#passthrough) -- [Emissary](https://www.getambassador.io/docs/emissary/latest/topics/using/tcpmappings#tls-termination) - -Make sure your ingress controller is installed and healthy on the cluster that will host your virtual clusters. Create the following `ingress.yaml` for a vcluster called `my-vcluster` in the namespace `my-vcluster`: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - # We need the ingress to pass through ssl traffic to the vcluster - # This only works for the nginx-ingress (enable via --enable-ssl-passthrough - # https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough ) - # for other ingress controllers please check their respective documentation. - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - nginx.ingress.kubernetes.io/ssl-passthrough: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "true" - name: vcluster-ingress - namespace: my-vcluster -spec: - ingressClassName: nginx # use your ingress class name - rules: - - host: my-vcluster.example.com - http: - paths: - - backend: - service: - name: my-vcluster - port: - number: 443 - path: / - pathType: ImplementationSpecific -``` - -Create the resource in the namespace via: -``` -kubectl apply -f ingress.yaml -``` - -:::info Enable SSL Passthrough Feature -If you are using the ingress nginx controller, please make sure you have [enabled the SSL passthrough feature](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#ssl-passthrough) as it is disabled by default. -::: - -:::warning SSL Passthrough required -In order for this ingress to work correctly, you will need to enable SSL passthrough as TLS termination has to happen at the vcluster level and not ingress controller level. If you cannot do that, please take a look below for using an ingress without ssl passthrough. -::: - -Now create a `values.yaml` to create the vcluster with: -```yaml -syncer: - extraArgs: - - --tls-san=my-vcluster.example.com -``` - -Create the virtual cluster with: -``` -vcluster create my-vcluster -n my-vcluster --connect=false -f values.yaml -``` - -Retrieve the kube config via: -``` -vcluster connect my-vcluster -n my-vcluster --update-current=false --server=https://my-vcluster.example.com -``` - -Access the vcluster: -``` -export KUBECONFIG=./kubeconfig.yaml - -# Run any kubectl command -kubectl get ns -``` - -### Ingress without SSL-Passthrough - -If you cannot configure your ingress controller to use ssl-passthrough, you can also create an ingress similar to this: -``` -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - nginx.ingress.kubernetes.io/ssl-redirect: "true" - name: vcluster-ingress - namespace: my-vcluster -spec: - ingressClassName: nginx # use your ingress class name - rules: - - host: my-vcluster.example.com - http: - paths: - - backend: - service: - name: my-vcluster - port: - number: 443 - path: / - pathType: ImplementationSpecific - tls: - - hosts: - - my-vcluster.example.com -``` - -With this configuration you will need to use [service account authentication](./accessing-vcluster.mdx#connect-via-service-accounts) in order to connect as the ingress controller won't be able to resolve the client-cert and client-key which is used by default as authentication method. To create a kube config that uses a service account, please run the following command: - -``` -vcluster connect my-vcluster -n my-vcluster --server=https://my-vcluster.example.com --service-account admin --cluster-role cluster-admin --insecure -``` - -Then access the vcluster: -``` -# Run any kubectl command -kubectl get ns -``` - -## In-Cluster +### From Host Cluster -In order to access the virtual cluster from within the host cluster, you can directly connect to the vcluster service. Make sure you can access that service and then create a kube config in the following form: +In order to access the virtual cluster from within the host cluster, you can directly connect to the vCluster service. Make sure you can access that service and then create a kube config in the following form: ``` vcluster connect my-vcluster -n my-vcluster --server=my-vcluster.my-vcluster --insecure --update-current=false ``` diff --git a/docs/pages/operator/backup.mdx b/docs/pages/using-vclusters/backup-restore.mdx similarity index 80% rename from docs/pages/operator/backup.mdx rename to docs/pages/using-vclusters/backup-restore.mdx index fe6875311..8cc82672f 100644 --- a/docs/pages/operator/backup.mdx +++ b/docs/pages/using-vclusters/backup-restore.mdx @@ -3,8 +3,8 @@ title: Backup & Restore sidebar_label: Backup & Restore --- -Backing up and restoring a virtual cluster usually means to backup the namespace where vcluster is installed in. -If you are using an [external datastore](./external-datastore.mdx) like MySQL or Postgresql that is **not** running inside the same namespace as vcluster, you will need to create a separate backup for the datastore as well. Please refer to the [appropriate docs](https://rancher.com/docs/k3s/latest/en/backup-restore/) for doing that. +Backing up and restoring a virtual cluster usually means to backup the namespace where vCluster is installed in. +If you are using an external datastore like MySQL or Postgresql that is **not** running inside the same namespace as vCluster, you will need to create a separate backup for the datastore as well. Please refer to the [appropriate docs](https://rancher.com/docs/k3s/latest/en/backup-restore/) for doing that. ## Using velero @@ -12,7 +12,7 @@ We recommend [velero](https://velero.io/) to backup virtual clusters, as it supp Make sure your cluster supports [volume snapshots](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) to allow velero to backup persistent volumes and persistent volume claims that save the virtual cluster state. Alternatively, you can use [velero's restic integration](https://velero.io/docs/main/restic/) to backup the virtual cluster state. -### Backing up a vcluster +### Backing up a vCluster Make sure to install the [velero cli](https://velero.io/docs/main/basic-install/), [velero server components](https://velero.io/docs/v1.8/supported-providers/) and run the following command: ``` @@ -50,9 +50,9 @@ Resources: ... ``` -### Restoring a vcluster +### Restoring a vCluster -After you have created a backup through either the velero cli or a schedule, you can restore a vcluster from the created backup via the velero cli: +After you have created a backup through either the velero cli or a schedule, you can restore a vCluster from the created backup via the velero cli: ``` velero restore create --from-backup ``` @@ -62,21 +62,21 @@ Verify the restore process via: velero restore logs ``` -This should recreate the vcluster workloads, configurations as well as vcluster state in the virtual cluster namespace. +This should recreate the vCluster workloads, configurations as well as vCluster state in the virtual cluster namespace. -:::warning Moving vclusters -Currently its quite difficult to move a vcluster from one namespace to another as there are objects that include a namespace reference such as the cluster role bindings or persistent volumes. velero supports namespace mapping that should work in most cases, but caution is still required as this might not work for every vcluster setup. +:::warning Moving vClusters +Currently its quite difficult to move a vCluster from one namespace to another as there are objects that include a namespace reference such as the cluster role bindings or persistent volumes. velero supports namespace mapping that should work in most cases, but caution is still required as this might not work for every vCluster setup. ::: -## Using velero inside vcluster -To use velero for making backups you need to enable the `hostpath-mapper` component of vcluster. You can do this by adding this to your values.yaml file when creating or upgrading the vcluster +## Using velero inside vCluster +To use velero for making backups you need to enable the `hostpath-mapper` component of vCluster. You can do this by adding this to your values.yaml file when creating or upgrading the vCluster ```yaml hostpathMapper: enabled: true ``` -This will start a the vcluster component for mapping the correct hostpaths as a `Daemonset`. +This will start a the vCluster component for mapping the correct hostpaths as a `Daemonset`. -Once done you need to install velero cli as explained above and then connect to your vcluster, and install velero: +Once done you need to install velero cli as explained above and then connect to your vCluster, and install velero: ``` velero install --provider --bucket --secret-file --plugins velero/velero-plugin-for-: --use-restic ``` diff --git a/docs/pages/operator/accessing-vcluster.mdx b/docs/pages/using-vclusters/kube-context.mdx similarity index 68% rename from docs/pages/operator/accessing-vcluster.mdx rename to docs/pages/using-vclusters/kube-context.mdx index df1fa6140..9ec95e428 100644 --- a/docs/pages/operator/accessing-vcluster.mdx +++ b/docs/pages/using-vclusters/kube-context.mdx @@ -1,16 +1,16 @@ --- -title: Accessing vcluster -sidebar_label: Accessing vcluster +title: Accessing vCluster +sidebar_label: Kube-Context --- -There are multiple ways how you can access a vcluster with an external application like `kubectl`. +There are multiple ways how you can access a vCluster with an external application like `kubectl`. -## Using the vcluster CLI +## Using the vCluster CLI -Please make sure to [install the vcluster CLI](../getting-started/setup.mdx). Connect to the vcluster via: +Please make sure to [install the vCluster CLI](../getting-started/setup.mdx). Connect to the vCluster via: ``` -# Connect and switch the current context to the vcluster +# Connect and switch the current context to the vCluster vcluster connect my-vcluster -n my-vcluster # Switch back context @@ -19,14 +19,14 @@ vcluster disconnect # Create a separate kube config to use instead of changing the current context vcluster connect my-vcluster --update-current=false -# Execute a command directly with vcluster context without changing the current context +# Execute a command directly with vCluster context without changing the current context vcluster connect my-vcluster -- kubectl get namespaces vcluster connect my-vcluster -- bash ``` -Depending on if the vcluster was created within a local Kubernetes cluster or with the `--expose` flag, the CLI will either start port-forwarding or create a context that can be used directly. +Depending on if the vCluster was created within a local Kubernetes cluster or with the `--expose` flag, the CLI will either start port-forwarding or create a context that can be used directly. -If you have manually [exposed the vcluster](./external-access.mdx), you can specify the domain where the vcluster is reachable via the `--server` flag: +If you have manually [exposed the vCluster](./access.mdx), you can specify the domain where the vCluster is reachable via the `--server` flag: ``` # Will create a kube context that uses https://my-domain.org as endpoint @@ -35,11 +35,11 @@ vcluster connect my-vcluster -n my-vcluster --server my-domain.org ## Connect via Service Accounts -By default, vcluster will update the current kube config to access the vcluster that contains the default admin client certificate and client key to authenticate to the vcluster. This means that all kube configs generated will have cluster admin access within the vcluster. +By default, vCluster will update the current kube config to access the vCluster that contains the default admin client certificate and client key to authenticate to the vCluster. This means that all kube configs generated will have cluster admin access within the vCluster. -Often this might not be desired. Instead of giving a user admin access to the virtual cluster, you can also use [service account authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens) to the virtual cluster. Let's say we want to create a kube config that only has view access in the virtual cluster. Then you would create a new service account inside the vcluster and assign it the cluster role `view` via a cluster role binding. Then we would generate a service account token and use that instead of the client-cert and client-key inside the kube config. +Often this might not be desired. Instead of giving a user admin access to the virtual cluster, you can also use [service account authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens) to the virtual cluster. Let's say we want to create a kube config that only has view access in the virtual cluster. Then you would create a new service account inside the vCluster and assign it the cluster role `view` via a cluster role binding. Then we would generate a service account token and use that instead of the client-cert and client-key inside the kube config. -With vcluster version `v0.6.0` and higher you can automatically do this via the CLI: +With vCluster version `v0.6.0` and higher you can automatically do this via the CLI: ``` vcluster connect my-vcluster -n my-vcluster --service-account kube-system/my-user --cluster-role view @@ -87,13 +87,13 @@ kubectl create namespace test Error from server (Forbidden): namespaces is forbidden: User "system:serviceaccount:kube-system:my-user" cannot create resource "namespaces" in API group "" at the cluster scope ``` -You can replace the token field in the kube config with any other service account token from inside the vcluster to act as this service account against the vcluster. For more information about service accounts and tokens, please refer to the [official Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens). +You can replace the token field in the kube config with any other service account token from inside the vCluster to act as this service account against the vCluster. For more information about service accounts and tokens, please refer to the [official Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#service-account-tokens). -## Retrieving the kube config from the vcluster secret +## Retrieving the kube config from the vCluster secret -There might be cases where connecting to a vcluster with the CLI is not feasible or the CLI cannot be installed. For such cases, you can retrieve the vcluster kube config from a secret that is created automatically in the vcluster namespace. +There might be cases where connecting to a vCluster with the CLI is not feasible or the CLI cannot be installed. For such cases, you can retrieve the vCluster kube config from a secret that is created automatically in the vCluster namespace. -The secret is prefixed with `vc-` and ends with the vcluster name, so a vcluster called `my-vcluster` in namespace `test` would create a secret called `vc-my-vcluster` in the namespace `test`. You can retrieve the kube config after the vcluster has started via: +The secret is prefixed with `vc-` and ends with the vCluster name, so a vCluster called `my-vcluster` in namespace `test` would create a secret called `vc-my-vcluster` in the namespace `test`. You can retrieve the kube config after the vCluster has started via: ``` kubectl get secret vc-my-vcluster -n test --template={{.data.config}} | base64 -D @@ -123,23 +123,23 @@ users: client-key-data: LS0tLS... ``` -By default, the server `https://localhost:8443` is used that would work if you port forward the vcluster with: +By default, the server `https://localhost:8443` is used that would work if you port forward the vCluster with: ``` kubectl port-forward my-vcluster-0 -n test 8443 ``` :::tip -With the syncer flag `--out-kube-config-secret-namespace` you can specify a different namespace where the kube config secret should be created in. Keep in mind that you have to manually apply RBAC permissions for the vcluster to allow creation and retrieving of secrets in that namespace. +With the syncer flag `--out-kube-config-secret-namespace` you can specify a different namespace where the kube config secret should be created in. Keep in mind that you have to manually apply RBAC permissions for the vCluster to allow creation and retrieving of secrets in that namespace. ::: -### Externally accessible vclusters +### Externally accessible vClusters -If you have [exposed the vcluster](./external-access.mdx), you can also tell the vcluster to create the kube config secret with another server endpoint through the `--out-kube-config-server` flag. +If you have [exposed the vCluster](./access.mdx), you can also tell the vCluster to create the kube config secret with another server endpoint through the `--out-kube-config-server` flag. -For example, if you want to expose a vcluster at `https://my-domain.org`, you can create a `values.yaml` like this: +For example, if you want to expose a vCluster at `https://my-domain.org`, you can create a `values.yaml` like this: ```yaml -# Make sure vcluster will sign the server certs for my-domain.org +# Make sure vCluster will sign the server certs for my-domain.org # and use it in the generated kube config secret. syncer: extraArgs: @@ -147,13 +147,13 @@ syncer: - --out-kube-config-server=https://my-domain.org ``` -Then you can create or upgrade the vcluster with: +Then you can create or upgrade the vCluster with: ``` vcluster create my-vcluster -n my-vcluster --upgrade --connect=false -f values.yaml ``` -Wait until the vcluster has started and you can retrieve the kube config via: +Wait until the vCluster has started and you can retrieve the kube config via: ``` kubectl get secret vc-my-vcluster -n my-vcluster --template={{.data.config}} | base64 -D diff --git a/docs/pages/using-vclusters/pausing-vcluster.mdx b/docs/pages/using-vclusters/pausing-vcluster.mdx new file mode 100644 index 000000000..635d4a950 --- /dev/null +++ b/docs/pages/using-vclusters/pausing-vcluster.mdx @@ -0,0 +1,37 @@ +--- +title: Pausing & Resuming vCluster +sidebar_label: Pausing & Resuming vCluster +--- + +Pausing a vCluster means to temporarily scale down the vCluster and delete all its created workloads on the host cluster. This can be useful to save computing resources used by vCluster workloads in the host cluster. + +## Pausing a vCluster + +In order to pause a vCluster, make sure you have the CLI installed and run the following command: + +``` +vcluster pause my-vcluster -n my-vcluster-namespace +``` + +This command will do the following things: +1. Scale down the vCluster statefulset or deployment depending on which vCluster distro was used +2. Delete all the workloads created by vCluster + +The command leaves the objects within the vCluster untouched, which means that even single pods that were deployed within the vCluster without a controlling replica set or statefulset will be restarted. + +:::warning Temporary Filesystem of Pods erased +Since all the pods will be restarted, this also means that their temporary filesystem is erased as well as pod ip is changed. +::: + +## Resuming a vCluster + +To resume a vCluster, make sure you have the CLI installed and run the following command: + +``` +vcluster resume my-vcluster -n my-vcluster-namespace + +# OR: connect to the vCluster to automatically resume it as well +vcluster connect my-vcluster +``` + +As soon as the vCluster is resumed, vCluster will scale up the paused statefulset or deployment and the vCluster syncer will recreate the vCluster pods. diff --git a/docs/pages/what-are-virtual-clusters.mdx b/docs/pages/what-are-virtual-clusters.mdx index 7804c9958..4b651aeed 100644 --- a/docs/pages/what-are-virtual-clusters.mdx +++ b/docs/pages/what-are-virtual-clusters.mdx @@ -7,12 +7,12 @@ Virtual clusters are fully working Kubernetes clusters that run on top of other
vcluster Architecture -
vcluster - Architecture
+
vCluster - Architecture
-The virtual cluster itself only consists of the core Kubernetes components: API server, controller manager, storage backend (such as etcd, sqlite, mysql etc.) and optionally a scheduler. To reduce virtual cluster overhead, vcluster builds by default on [k3s](https://k3s.io/), which is a fully working, certified, lightweight Kubernetes distribution that compiles the Kubernetes components into a single binary and disables by default all not needed Kubernetes features, such as the pod scheduler or certain controllers. +The virtual cluster itself only consists of the core Kubernetes components: API server, controller manager, storage backend (such as etcd, sqlite, mysql etc.) and optionally a scheduler. To reduce virtual cluster overhead, vCluster builds by default on [k3s](https://k3s.io/), which is a fully working, certified, lightweight Kubernetes distribution that compiles the Kubernetes components into a single binary and disables by default all not needed Kubernetes features, such as the pod scheduler or certain controllers. -Besides k3s, other Kubernetes distributions such as [k0s and vanilla k8s are supported](./operator/other-distributions.mdx). In addition to the control plane, there is also a Kubernetes hypervisor that emulates networking and worker nodes inside the virtual cluster. This component syncs a handful of core resources that are essential for cluster functionality between the virtual and host cluster: +Besides k3s, other Kubernetes distributions such as [k0s and vanilla k8s are supported](./deploying-vclusters/supported-distros.mdx). In addition to the control plane, there is also a Kubernetes hypervisor that emulates networking and worker nodes inside the virtual cluster. This component syncs a handful of core resources that are essential for cluster functionality between the virtual and host cluster: * **Pods**: All pods that are started in the virtual cluster are rewritten and then started in the namespace of the virtual cluster in the host cluster. Service account tokens, environment variables, DNS and other configurations are exchanged to point to the virtual cluster instead of the host cluster. Within the pod, it so seems that the pod is started within the virtual cluster instead of the host cluster. * **Services**: All services and endpoints are rewritten and created in the namespace of the virtual cluster in the host cluster. The virtual and host cluster share the same service cluster IPs. This also means that a service in the host cluster can be reached from within the virtual cluster without any performance penalties. @@ -20,7 +20,7 @@ Besides k3s, other Kubernetes distributions such as [k0s and vanilla k8s are sup * **Configmaps & Secrets**: ConfigMaps or secrets in the virtual cluster that are mounted to pods will be synced to the host cluster, all other configmaps or secrets will purely stay in the virtual cluster. * **Other Resources**: Deployments, statefulsets, CRDs, service accounts etc. are **NOT** synced to the host cluster and purely exist in the virtual cluster. -See [synced resources](./architecture/synced-resources.mdx) for more information about what resources are synced exactly. +See [synced resources](./syncer/core_resources.mdx) for more information about what resources are synced exactly. In addition to the synchronization of virtual and host cluster resources, the hypervisor proxies certain Kubernetes API requests to the host cluster, such as pod port forwarding or container command execution. It essentially acts as a reverse proxy for the virtual cluster. diff --git a/docs/sidebars.js b/docs/sidebars.js index ed496668e..093525509 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -8,88 +8,266 @@ module.exports = { adminSidebar: [ { - type: 'doc', - id: 'what-are-virtual-clusters', + type: "html", + value: ` + + `, + }, + { + type: "doc", + id: "what-are-virtual-clusters", }, { - type: 'category', - label: 'Getting Started', + type: "category", + label: "Getting Started", collapsed: false, items: [ + "getting-started/setup", + "getting-started/deployment", + "getting-started/connect", + "getting-started/cleanup", + ], + }, + { + type: "category", + label: "Architecture", + collapsed: true, + items: [ + "architecture/overview", { - type: 'doc', - id: 'quickstart', + type: "category", + label: "Control Plane", + collapsed: true, + items: [ + "architecture/control_plane/control_plane", + "architecture/control_plane/k8s_distros", + { + type: "html", + value: `Isolated control planes
p
`, + defaultStyle: true, + }, + ], }, { - type: 'category', - label: 'Full Guide', - collapsed: false, + type: "category", + label: "Syncer", + collapsed: true, items: [ - 'getting-started/setup', - 'getting-started/deployment', - 'getting-started/connect', - 'getting-started/cleanup', + "architecture/syncer/syncer", + "architecture/syncer/single_vs_multins", ], }, + "architecture/scheduling", + "architecture/nodes", ], }, { - type: 'category', - label: 'Architecture', - collapsed: false, + type: "category", + label: "Networking", + collapsed: true, items: [ - 'architecture/basics', - 'architecture/scheduling', - 'architecture/networking', - 'architecture/storage', - 'architecture/nodes', - 'architecture/synced-resources', + "networking/networking", + "networking/coreDNS", + { + type: "html", + value: `Integrated CoreDNS
p
`, + defaultStyle: true, + }, + { + type: "category", + label: "Mapping Traffic", + collapsed: true, + items: [ + "networking/internal_traffic/host_to_vcluster", + "networking/internal_traffic/vcluster_to_host", + ], + }, + "networking/ingress_traffic", + "networking/network_policies", ], }, { - type: 'category', - label: 'Operator Guide', - collapsed: false, + type: "category", + label: "Sync", + collapsed: true, items: [ - 'operator/external-access', - 'operator/external-datastore', - 'operator/accessing-vcluster', - 'operator/init-manifests', - 'operator/monitoring-logging', - 'operator/high-availability', - 'operator/other-distributions', - 'operator/restricted-hosts', - 'operator/pausing-vcluster', - 'operator/backup', - 'operator/security', - 'operator/cluster-api-provider', + "syncer/core_resources", + { + type: "category", + label: "Syncer", + collapsed: true, + items: ["syncer/config"], + }, + { + type: "category", + label: "Other resources", + collapsed: true, + items: [ + "syncer/other_resources/overview", + "syncer/other_resources/generic_sync", + "syncer/other_resources/config_syntax", + "syncer/other_resources/multi_namespace_mode", + ], + }, + { + type: "html", + value: `Generic Resource Patches
p
`, + defaultStyle: true, + }, + { + type: "category", + label: "Plugins", + collapsed: true, + items: ["plugins/overview", "plugins/tutorial"], + }, ], }, { type: "category", - label: "Plugins", - collapsed: false, + label: "Using vclusters", + collapsed: true, items: [ - 'plugins/overview', - 'plugins/tutorial', - ] + { + type: "category", + label: "Accessing vcluster", + collapsed: true, + items: ["using-vclusters/kube-context", "using-vclusters/access"], + }, + "using-vclusters/pausing-vcluster", + "using-vclusters/backup-restore", + ], }, { - type: 'doc', - id: 'troubleshooting', + type: "category", + label: "Deploying vclusters", + collapsed: true, + items: [ + { + type: "category", + label: "Kubernetes Distros", + collapsed: true, + items: ["deploying-vclusters/supported-distros"], + }, + { + type: "category", + label: "Persistent vclusters", + collapsed: true, + items: ["deploying-vclusters/persistence"], + }, + "deploying-vclusters/high-availability", + { + type: "category", + label: "On Init", + collapsed: true, + items: [ + "deploying-vclusters/init-manifests", + "deploying-vclusters/init-charts", + ], + }, + { + type: "category", + label: "Integrations", + collapsed: true, + items: ["deploying-vclusters/integrations-openshift"], + }, + ], + }, + { + type: "doc", + id: "storage", + }, + { + type: "category", + label: "Observability", + collapsed: true, + items: [ + { + type: "category", + label: "Collecting Metrics", + collapsed: true, + items: [ + "o11y/metrics/metrics_server_proxy", + "o11y/metrics/metrics_server", + "o11y/metrics/monitoring_vcluster", + ], + }, + { + type: "category", + label: "Logging", + collapsed: true, + items: [ + "o11y/logging/hpm", + "o11y/logging/central_hpm", + "o11y/logging/elk_stack", + "o11y/logging/grafana_loki", + ], + }, + ], + }, + { + type: "category", + label: "Security", + collapsed: true, + items: [ + "security/rootless-mode", + "security/isolated-mode", + "security/quotas-limits", + "security/pod-security", + "security/network-isolation", + "security/other-topics", + ], }, { - type: 'doc', - id: 'telemetry', + type: "category", + label: "Advanced topics", + collapsed: true, + items: [ + { + type: "category", + label: "Plugins", + collapsed: true, + items: [ + "advanced-topics/plugins-overview", + "advanced-topics/plugins-development", + ], + }, + "advanced-topics/telemetry", + ], }, { - type: 'doc', - id: 'config-reference', + type: "category", + label: "Help and Tutorials", + collapsed: true, + items: ["help&tutorials/troubleshooting", "help&tutorials/helm-provisioning", "help&tutorials/bootstrapping"], + }, + { + type: "doc", + id: "config-reference", + }, + { + type: "category", + label: "CLI", + collapsed: true, + link: { type: 'doc', id: 'cli' }, + items: [ + { + type: 'autogenerated', + dirName: 'cli', + }, + ], }, { - type: 'link', - label: 'Originally created by Loft', - href: 'https://loft.sh/', + type: "link", + label: "Originally created by Loft", + href: "https://loft.sh/", }, ], }; diff --git a/docs/src/components/ProLabel/ProLabel.js b/docs/src/components/ProLabel/ProLabel.js new file mode 100644 index 000000000..131b473c8 --- /dev/null +++ b/docs/src/components/ProLabel/ProLabel.js @@ -0,0 +1,10 @@ +import React from "react"; +import "./pro-label.css"; + +const CustomLabel = ({ children, color, href }) => ( + + {" "} + {children} + +); +export default CustomLabel; diff --git a/docs/src/components/ProLabel/pro-label.css b/docs/src/components/ProLabel/pro-label.css new file mode 100644 index 000000000..0ce233d0c --- /dev/null +++ b/docs/src/components/ProLabel/pro-label.css @@ -0,0 +1,7 @@ +.proFeatureLabel { + background-color: orange; + padding: 3px; + border-radius: 2px; + padding: 0.2rem; + font-weight: bold; +} diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 3fce3c158..c417eb4d8 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -344,6 +344,7 @@ table th:first-child:empty { .container > .row > .col > div { display: flex; flex-direction: column; + height: 100%; } /* Button: Edit this page */ @@ -611,3 +612,100 @@ body .markdown { background-position: center center; background-size: contain; } + +/* --- */ +/* The slider */ +.pro-flex-container { + margin-top: 1em; + + display: flex; + background-color: #F2F2F2; + + border-style: solid; + border-color: #E1E1E1; + border-width: 1px; + + border-radius: 4px; +} + +[data-theme="dark"] .pro-flex-container { + background-color: #272729; + border-color: #444950; +} + +.pro-flex-container > a { + margin: 6px; + padding: 4px; + width: 50%; + text-align: center; + font-weight: 500; + color: black; + + font-weight: 500; +} + +.pro-flex-container > a.selected { + background-color: #EE7D3B; + color: white; + border-radius: 4px; + + font-weight: bolder; +} + +[data-theme="dark"] .pro-flex-container > a { + color: white; +} + +[data-theme="dark"] .pro-flex-container > a.selected { + color: black; +} + +@font-face { + font-family: "loft"; + src: url('../../static/media/loft-1-3-Regular.otf') format('opentype'); +} + +.pro-feature { + --ifm-font-family-base: "loft"; + font-family: "loft"; + + font-size: large; + + padding-left: 4px; + padding-top: 0.4em; + + line-height: 1em; + + vertical-align: middle; +} + +.pro-feature .menu__link:after { + content: "p"; +} + +.pro-feature-link { + visibility: hidden; + opacity:0; + + transition: visibility 0.2s linear,opacity 0.2s linear; +} + +.pro-feature-sidebar-item .menu__link:after { + content: "p"; + + --ifm-font-family-base: "loft"; + font-family: "loft"; + font-size: large; + + padding-left: 4px; + padding-top: 0.4em; + + line-height: 1em; + + vertical-align: middle; +} + +.menu__link:hover > .pro-feature-link { + visibility: visible; + opacity:1; +} diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js deleted file mode 100644 index 44cc4e454..000000000 --- a/docs/src/pages/index.js +++ /dev/null @@ -1,16 +0,0 @@ -import React from 'react'; -import Layout from '@theme/Layout'; -import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; - -function Home() { - const context = useDocusaurusContext(); - const {siteConfig = {}} = context; - return ( - - - ); -} - -export default Home; diff --git a/docs/static/media/diagrams/vcluster-multinamespace-architecture.svg b/docs/static/media/diagrams/vcluster-multinamespace-architecture.svg new file mode 100644 index 000000000..21f2ce87d --- /dev/null +++ b/docs/static/media/diagrams/vcluster-multinamespace-architecture.svg @@ -0,0 +1,610 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/static/media/loft-1-2-Regular.otf b/docs/static/media/loft-1-2-Regular.otf new file mode 100644 index 000000000..e4eda7dda Binary files /dev/null and b/docs/static/media/loft-1-2-Regular.otf differ diff --git a/docs/static/media/loft-1-3-Regular.otf b/docs/static/media/loft-1-3-Regular.otf new file mode 100644 index 000000000..1389e1438 Binary files /dev/null and b/docs/static/media/loft-1-3-Regular.otf differ diff --git a/docs/static/media/vCluster_horizontal-orange.svg b/docs/static/media/vCluster_horizontal-orange.svg new file mode 100644 index 000000000..b60eb993a --- /dev/null +++ b/docs/static/media/vCluster_horizontal-orange.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/static/media/vcluster_Horizontal_MonoBranding.svg b/docs/static/media/vcluster_Horizontal_MonoBranding.svg deleted file mode 100644 index 735e3ab95..000000000 --- a/docs/static/media/vcluster_Horizontal_MonoBranding.svg +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/go.mod b/go.mod index 2b90396da..bc203e449 100644 --- a/go.mod +++ b/go.mod @@ -65,6 +65,7 @@ require ( github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cloudflare/circl v1.3.3 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/docker/cli v23.0.0-rc.1+incompatible // indirect github.com/docker/docker v23.0.0-rc.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect diff --git a/go.sum b/go.sum index c7caa522e..89bd2a3d0 100644 --- a/go.sum +++ b/go.sum @@ -137,6 +137,7 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= diff --git a/hack/gen-docs.go b/hack/docs/main.go old mode 100644 new mode 100755 similarity index 60% rename from hack/gen-docs.go rename to hack/docs/main.go index 2fac3e046..6bea99ffd --- a/hack/gen-docs.go +++ b/hack/docs/main.go @@ -1,34 +1,44 @@ -//go:build ignore -// +build ignore - +// nolint package main import ( "fmt" - "log" "os" "path" "path/filepath" "regexp" "strings" + "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd" - "github.com/loft-sh/vcluster/pkg/util/loghelper" "github.com/spf13/cobra/doc" ) -const cliDocsDir = "./docs/pages/commands" +const cliDocsDir = "./docs/pages/cli" const headerTemplate = `--- -title: "%s" +title: "%s --help" sidebar_label: %s --- ` -var fixSynopsisRegexp = regexp.MustCompile("(?si)(## vcluster.*?\n)(.*?)#(## Synopsis\n*\\s*)(.*?)(\\s*\n\n\\s*)((```)(.*?))?#(## Options)(.*?)((### Options inherited from parent commands)(.*?)#(## See Also)(\\s*\\* \\[vcluster\\][^\n]*)?(.*))|(#(## See Also)(\\s*\\* \\[vcluster\\][^\n]*)?(.*))\n###### Auto generated by spf13/cobra on .*$") +const proHeaderTemplate = `--- +title: "%[1]s --help" +sidebar_label: %[2]s +sidebar_class_name: "pro-feature-sidebar-item" +--- + +:::info Note: +` + "`%[1]s`" + ` is only available in the enterprise-ready [vCluster.Pro](https://vcluster.pro) offering. +::: + +` + +var fixSynopsisRegexp = regexp.MustCompile("(?si)(## vcluster.*?\n)(.*?)#(## Synopsis\n*\\s*)(.*?)(\\s*\n\n\\s*)((```)(.*?))?#(## Options)(.*?)((### Options inherited from parent commands)(.*?)#(## See Also)(\\s*\\* \\[vcluster][^\n]*)?(.*))|(#(## See Also)(\\s*\\* \\[vcluster][^\n]*)?(.*))\n###### Auto generated by spf13/cobra on .*$") // Run executes the command logic func main() { + logger := log.GetInstance() filePrepender := func(filename string) string { name := filepath.Base(filename) base := strings.TrimSuffix(name, path.Ext(name)) @@ -40,7 +50,7 @@ func main() { if l > 1 { matches, err := filepath.Glob(cliDocsDir + "/vcluster_" + command[1]) if err != nil { - log.Fatal(err) + logger.Fatal(err) } if len(matches) > 2 { @@ -48,7 +58,11 @@ func main() { } } - return fmt.Sprintf(headerTemplate, "Command - "+title, sidebarLabel) + if strings.HasPrefix(name, "vcluster_pro") { + return fmt.Sprintf(proHeaderTemplate, title, sidebarLabel) + } + + return fmt.Sprintf(headerTemplate, title, sidebarLabel) } linkHandler := func(name string) string { @@ -56,12 +70,14 @@ func main() { return strings.ToLower(base) + ".md" } - log := loghelper.GetInstance() - rootCmd := cmd.BuildRoot(log) + rootCmd, err := cmd.BuildRoot(logger) + if err != nil { + logger.Fatal(err) + } - err := doc.GenMarkdownTreeCustom(rootCmd, cliDocsDir, filePrepender, linkHandler) + err = doc.GenMarkdownTreeCustom(rootCmd, cliDocsDir, filePrepender, linkHandler) if err != nil { - log.Fatal(err) + logger.Fatal(err) } err = filepath.Walk(cliDocsDir, func(path string, info os.FileInfo, err error) error { @@ -82,9 +98,13 @@ func main() { return err } + if info.Name() == "vcluster.md" { + os.Rename(path, filepath.Join(cliDocsDir, "..", "cli.md")) + } + return nil }) if err != nil { - log.Fatal(err) + logger.Fatal(err) } } diff --git a/netlify.toml b/netlify.toml index 040cc7845..c297a7df5 100644 --- a/netlify.toml +++ b/netlify.toml @@ -1,23 +1,27 @@ [build] - base = "docs/" - publish = "public/" - command = """ +base = "docs/" +publish = "public/" +command = """ set -e - + yarn build mkdir public/ mv build/ public/docs/ """ [build.processing] - skip_processing = false +skip_processing = false + [build.processing.html] - pretty_urls = true +pretty_urls = true + [build.processing.css] - bundle = false - minify = false +bundle = false +minify = false + [build.processing.js] - bundle = false - minify = false +bundle = false +minify = false + [build.processing.images] - compress = true +compress = true diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..b48005673 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,14 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions())}...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..be2b34360 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,336 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + firstDD bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB\\fC" + codespanCloseTag = "\\fR" + codeTag = "\n.PP\n.RS\n\n.nf\n" + codeCloseTag = "\n.fi\n.RE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + + var walkAction = blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + escapeSpecialChars(w, node.Literal) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + if !entering { + out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + } + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags + default: + fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) + } + return walkAction +} + +func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + switch node.Level { + case 1: + if !r.firstHeader { + out(w, titleHeader) + r.firstHeader = true + break + } + out(w, topLevelHeader) + case 2: + out(w, secondLevelHdr) + default: + out(w, otherHeader) + } + } +} + +func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { + openTag := listTag + closeTag := listCloseTag + if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // tags for definition lists handled within Item node + openTag = "" + closeTag = "" + } + if entering { + r.listDepth++ + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = append(r.listCounters, 1) + } + out(w, openTag) + } else { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = r.listCounters[:len(r.listCounters)-1] + } + out(w, closeTag) + r.listDepth-- + } +} + +func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) + r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeTerm != 0 { + // DT (definition term): line just before DD (see below). + out(w, dtTag) + r.firstDD = true + } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // DD (definition description): line that starts with ": ". + // + // We have to distinguish between the first DD and the + // subsequent ones, as there should be no vertical + // whitespace between the DT and the first DD. + if r.firstDD { + r.firstDD = false + } else { + out(w, dd2Tag) + } + } else { + out(w, ".IP \\(bu 2\n") + } + } else { + out(w, "\n") + } +} + +func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + out(w, tableStart) + // call walker to count cells (and rows?) so format section can be produced + columns := countColumns(node) + out(w, strings.Repeat("l ", columns)+"\n") + out(w, strings.Repeat("l ", columns)+".\n") + } else { + out(w, tableEnd) + } +} + +func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + var start string + if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { + start = "\t" + } + if node.IsHeader { + start += codespanTag + } else if nodeLiteralSize(node) > 30 { + start += tableCellStart + } + out(w, start) + } else { + var end string + if node.IsHeader { + end = codespanCloseTag + } else if nodeLiteralSize(node) > 30 { + end = tableCellEnd + } + if node.Next == nil && end != tableCellEnd { + // Last cell: need to carriage return if we are at the end of the + // header row and content isn't wrapped in a "tablecell" + end += crTag + } + out(w, end) + } +} + +func nodeLiteralSize(node *blackfriday.Node) int { + total := 0 + for n := node.FirstChild; n != nil; n = n.FirstChild { + total += len(n.Literal) + } + return total +} + +// because roff format requires knowing the column count before outputting any table +// data we need to walk a table tree and count the columns +func countColumns(node *blackfriday.Node) int { + var columns int + + node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.TableRow: + if !entering { + return blackfriday.Terminate + } + case blackfriday.TableCell: + if entering { + columns++ + } + default: + } + return blackfriday.GoToNext + }) + return columns +} + +func out(w io.Writer, output string) { + io.WriteString(w, output) // nolint: errcheck +} + +func escapeSpecialChars(w io.Writer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out(w, "\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && text[i] != '\\' { + i++ + } + if i > org { + w.Write(text[org:i]) // nolint: errcheck + } + + // escape a character + if i >= len(text) { + break + } + + w.Write([]byte{'\\', text[i]}) // nolint: errcheck + } +} diff --git a/vendor/github.com/spf13/cobra/doc/README.md b/vendor/github.com/spf13/cobra/doc/README.md new file mode 100644 index 000000000..8e07baae3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/README.md @@ -0,0 +1,17 @@ +# Documentation generation + +- [Man page docs](./man_docs.md) +- [Markdown docs](./md_docs.md) +- [Rest docs](./rest_docs.md) +- [Yaml docs](./yaml_docs.md) + +## Options +### `DisableAutoGenTag` + +You may set `cmd.DisableAutoGenTag = true` +to _entirely_ remove the auto generated string "Auto generated by spf13/cobra..." +from any documentation source. + +### `InitDefaultCompletionCmd` + +You may call `cmd.InitDefaultCompletionCmd()` to document the default autocompletion command. diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go new file mode 100644 index 000000000..b8c15ce88 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -0,0 +1,246 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/cpuguy83/go-md2man/v2/md2man" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// GenManTree will generate a man page for this command and all descendants +// in the directory given. The header may be nil. This function may not work +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { + return GenManTreeFromOpts(cmd, GenManTreeOptions{ + Header: header, + Path: dir, + CommandSeparator: "-", + }) +} + +// GenManTreeFromOpts generates a man page for the command and all descendants. +// The pages are written to the opts.Path directory. +func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { + header := opts.Header + if header == nil { + header = &GenManHeader{} + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenManTreeFromOpts(c, opts); err != nil { + return err + } + } + section := "1" + if header.Section != "" { + section = header.Section + } + + separator := "_" + if opts.CommandSeparator != "" { + separator = opts.CommandSeparator + } + basename := strings.ReplaceAll(cmd.CommandPath(), " ", separator) + filename := filepath.Join(opts.Path, basename+"."+section) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + headerCopy := *header + return GenMan(cmd, &headerCopy, f) +} + +// GenManTreeOptions is the options for generating the man pages. +// Used only in GenManTreeFromOpts. +type GenManTreeOptions struct { + Header *GenManHeader + Path string + CommandSeparator string +} + +// GenManHeader is a lot like the .TH header at the start of man pages. These +// include the title, section, date, source, and manual. We will use the +// current time if Date is unset and will use "Auto generated by spf13/cobra" +// if the Source is unset. +type GenManHeader struct { + Title string + Section string + Date *time.Time + date string + Source string + Manual string +} + +// GenMan will generate a man page for the given command and write it to +// w. The header argument may be nil, however obviously w may not. +func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { + if header == nil { + header = &GenManHeader{} + } + if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil { + return err + } + + b := genMan(cmd, header) + _, err := w.Write(md2man.Render(b)) + return err +} + +func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error { + if header.Title == "" { + header.Title = strings.ToUpper(strings.ReplaceAll(name, " ", "\\-")) + } + if header.Section == "" { + header.Section = "1" + } + if header.Date == nil { + now := time.Now() + if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" { + unixEpoch, err := strconv.ParseInt(epoch, 10, 64) + if err != nil { + return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err) + } + now = time.Unix(unixEpoch, 0) + } + header.Date = &now + } + header.date = (*header.Date).Format("Jan 2006") + if header.Source == "" && !disableAutoGen { + header.Source = "Auto generated by spf13/cobra" + } + return nil +} + +func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) { + description := cmd.Long + if len(description) == 0 { + description = cmd.Short + } + + cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s" +# NAME +`, header.Title, header.Section, header.date, header.Source, header.Manual)) + cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) + cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n") + cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine())) + cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n") + cobra.WriteStringAndCheck(buf, description+"\n\n") +} + +func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + format := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) + } else { + format = fmt.Sprintf("**--%s**", flag.Name) + } + if len(flag.NoOptDefVal) > 0 { + format += "[" + } + if flag.Value.Type() == "string" { + // put quotes on the value + format += "=%q" + } else { + format += "=%s" + } + if len(flag.NoOptDefVal) > 0 { + format += "]" + } + format += "\n\t%s\n\n" + cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage)) + }) +} + +func manPrintOptions(buf io.StringWriter, command *cobra.Command) { + flags := command.NonInheritedFlags() + if flags.HasAvailableFlags() { + cobra.WriteStringAndCheck(buf, "# OPTIONS\n") + manPrintFlags(buf, flags) + cobra.WriteStringAndCheck(buf, "\n") + } + flags = command.InheritedFlags() + if flags.HasAvailableFlags() { + cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") + manPrintFlags(buf, flags) + cobra.WriteStringAndCheck(buf, "\n") + } +} + +func genMan(cmd *cobra.Command, header *GenManHeader) []byte { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + // something like `rootcmd-subcmd1-subcmd2` + dashCommandName := strings.ReplaceAll(cmd.CommandPath(), " ", "-") + + buf := new(bytes.Buffer) + + manPreamble(buf, header, cmd, dashCommandName) + manPrintOptions(buf, cmd) + if len(cmd.Example) > 0 { + buf.WriteString("# EXAMPLE\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) + } + if hasSeeAlso(cmd) { + buf.WriteString("# SEE ALSO\n") + seealsos := make([]string, 0) + if cmd.HasParent() { + parentPath := cmd.Parent().CommandPath() + dashParentPath := strings.ReplaceAll(parentPath, " ", "-") + seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) + seealsos = append(seealsos, seealso) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, c := range children { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) + seealsos = append(seealsos, seealso) + } + buf.WriteString(strings.Join(seealsos, ", ") + "\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) + } + return buf.Bytes() +} diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md new file mode 100644 index 000000000..3709160f3 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/man_docs.md @@ -0,0 +1,31 @@ +# Generating Man Pages For Your Own cobra.Command + +Generating man pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + err := doc.GenManTree(cmd, header, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a man page `/tmp/test.3` diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go new file mode 100644 index 000000000..c4a27c009 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -0,0 +1,156 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(buf) + if flags.HasAvailableFlags() { + buf.WriteString("### Options\n\n```\n") + flags.PrintDefaults() + buf.WriteString("```\n\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(buf) + if parentFlags.HasAvailableFlags() { + buf.WriteString("### Options inherited from parent commands\n\n```\n") + parentFlags.PrintDefaults() + buf.WriteString("```\n\n") + } + return nil +} + +// GenMarkdown creates markdown output. +func GenMarkdown(cmd *cobra.Command, w io.Writer) error { + return GenMarkdownCustom(cmd, w, func(s string) string { return s }) +} + +// GenMarkdownCustom creates custom markdown output. +func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) + name := cmd.CommandPath() + + buf.WriteString("## " + name + "\n\n") + buf.WriteString(cmd.Short + "\n\n") + if len(cmd.Long) > 0 { + buf.WriteString("### Synopsis\n\n") + buf.WriteString(cmd.Long + "\n\n") + } + + if cmd.Runnable() { + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + } + + if len(cmd.Example) > 0 { + buf.WriteString("### Examples\n\n") + buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + } + + if err := printOptions(buf, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + buf.WriteString("### SEE ALSO\n\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + link := pname + ".md" + link = strings.ReplaceAll(link, " ", "_") + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + cname := name + " " + child.Name() + link := cname + ".md" + link = strings.ReplaceAll(link, " ", "_") + buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) + } + buf.WriteString("\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") + } + _, err := buf.WriteTo(w) + return err +} + +// GenMarkdownTree will generate a markdown page for this command and all +// descendants in the directory given. The header may be nil. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. +func GenMarkdownTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but +// with custom filePrepender and linkHandler. +func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".md" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md new file mode 100644 index 000000000..1659175cf --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/md_docs.md @@ -0,0 +1,115 @@ +# Generating Markdown Docs For Your Own cobra.Command + +Generating Markdown pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenMarkdownTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a Markdown document `/tmp/test.md` + +## Generate markdown docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "log" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenMarkdownTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate markdown docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` + +```go + out := new(bytes.Buffer) + err := doc.GenMarkdown(cmd, out) + if err != nil { + log.Fatal(err) + } +``` + +This will write the markdown doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go new file mode 100644 index 000000000..2cca6fd77 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go @@ -0,0 +1,186 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(buf) + if flags.HasAvailableFlags() { + buf.WriteString("Options\n") + buf.WriteString("~~~~~~~\n\n::\n\n") + flags.PrintDefaults() + buf.WriteString("\n") + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(buf) + if parentFlags.HasAvailableFlags() { + buf.WriteString("Options inherited from parent commands\n") + buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") + parentFlags.PrintDefaults() + buf.WriteString("\n") + } + return nil +} + +// defaultLinkHandler for default ReST hyperlink markup +func defaultLinkHandler(name, ref string) string { + return fmt.Sprintf("`%s <%s.rst>`_", name, ref) +} + +// GenReST creates reStructured Text output. +func GenReST(cmd *cobra.Command, w io.Writer) error { + return GenReSTCustom(cmd, w, defaultLinkHandler) +} + +// GenReSTCustom creates custom reStructured Text output. +func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + buf := new(bytes.Buffer) + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + ref := strings.ReplaceAll(name, " ", "_") + + buf.WriteString(".. _" + ref + ":\n\n") + buf.WriteString(name + "\n") + buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") + buf.WriteString(short + "\n\n") + buf.WriteString("Synopsis\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString("\n" + long + "\n\n") + + if cmd.Runnable() { + buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) + } + + if len(cmd.Example) > 0 { + buf.WriteString("Examples\n") + buf.WriteString("~~~~~~~~\n\n") + buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) + } + + if err := printOptionsReST(buf, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + buf.WriteString("SEE ALSO\n") + buf.WriteString("~~~~~~~~\n\n") + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + ref = strings.ReplaceAll(pname, " ", "_") + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + cname := name + " " + child.Name() + ref = strings.ReplaceAll(cname, " ", "_") + buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) + } + buf.WriteString("\n") + } + if !cmd.DisableAutoGenTag { + buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") + } + _, err := buf.WriteTo(w) + return err +} + +// GenReSTTree will generate a ReST page for this command and all +// descendants in the directory given. +// This function may not work correctly if your command names have `-` in them. +// If you have `cmd` with two subcmds, `sub` and `sub-third`, +// and `sub` has a subcommand called `third`, it is undefined which +// help output will be in the file `cmd-sub-third.1`. +func GenReSTTree(cmd *cobra.Command, dir string) error { + emptyStr := func(s string) string { return "" } + return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) +} + +// GenReSTTreeCustom is the the same as GenReSTTree, but +// with custom filePrepender and linkHandler. +func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".rst" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenReSTCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// indentString adapted from: https://github.com/kr/text/blob/main/indent.go +func indentString(s, p string) string { + var res []byte + b := []byte(s) + prefix := []byte(p) + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return string(res) +} diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md new file mode 100644 index 000000000..3041c573a --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/rest_docs.md @@ -0,0 +1,114 @@ +# Generating ReStructured Text Docs For Your Own cobra.Command + +Generating ReST pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenReSTTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a ReST document `/tmp/test.rst` + +## Generate ReST docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "log" + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenReSTTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate ReST docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree` + +```go + out := new(bytes.Buffer) + err := doc.GenReST(cmd, out) + if err != nil { + log.Fatal(err) + } +``` + +This will write the ReST doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { + //... +} +``` + +```go +func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used: + +```go +// Sphinx cross-referencing format +linkHandler := func(name, ref string) string { + return fmt.Sprintf(":ref:`%s <%s>`", name, ref) +} +``` diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go new file mode 100644 index 000000000..0aaa07a16 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/util.go @@ -0,0 +1,52 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "strings" + + "github.com/spf13/cobra" +) + +// Test to see if we have a reason to print See Also information in docs +// Basically this is a test for a parent command or a subcommand which is +// both not deprecated and not the autogenerated help command. +func hasSeeAlso(cmd *cobra.Command) bool { + if cmd.HasParent() { + return true + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + return true + } + return false +} + +// Temporary workaround for yaml lib generating incorrect yaml with long strings +// that do not contain \n. +func forceMultiLine(s string) string { + if len(s) > 60 && !strings.Contains(s, "\n") { + s = s + "\n" + } + return s +} + +type byName []*cobra.Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go new file mode 100644 index 000000000..2b26d6ec0 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go @@ -0,0 +1,175 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "gopkg.in/yaml.v3" +) + +type cmdOption struct { + Name string + Shorthand string `yaml:",omitempty"` + DefaultValue string `yaml:"default_value,omitempty"` + Usage string `yaml:",omitempty"` +} + +type cmdDoc struct { + Name string + Synopsis string `yaml:",omitempty"` + Description string `yaml:",omitempty"` + Usage string `yaml:",omitempty"` + Options []cmdOption `yaml:",omitempty"` + InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` + Example string `yaml:",omitempty"` + SeeAlso []string `yaml:"see_also,omitempty"` +} + +// GenYamlTree creates yaml structured ref files for this command and all descendants +// in the directory given. This function may not work +// correctly if your command names have `-` in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenYamlTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenYamlTreeCustom(cmd, dir, emptyStr, identity) +} + +// GenYamlTreeCustom creates yaml structured ref files. +func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { + continue + } + if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".yaml" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenYamlCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} + +// GenYaml creates yaml output. +func GenYaml(cmd *cobra.Command, w io.Writer) error { + return GenYamlCustom(cmd, w, func(s string) string { return s }) +} + +// GenYamlCustom creates custom yaml output. +func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + cmd.InitDefaultHelpCmd() + cmd.InitDefaultHelpFlag() + + yamlDoc := cmdDoc{} + yamlDoc.Name = cmd.CommandPath() + + yamlDoc.Synopsis = forceMultiLine(cmd.Short) + yamlDoc.Description = forceMultiLine(cmd.Long) + + if cmd.Runnable() { + yamlDoc.Usage = cmd.UseLine() + } + + if len(cmd.Example) > 0 { + yamlDoc.Example = cmd.Example + } + + flags := cmd.NonInheritedFlags() + if flags.HasFlags() { + yamlDoc.Options = genFlagResult(flags) + } + flags = cmd.InheritedFlags() + if flags.HasFlags() { + yamlDoc.InheritedOptions = genFlagResult(flags) + } + + if hasSeeAlso(cmd) { + result := []string{} + if cmd.HasParent() { + parent := cmd.Parent() + result = append(result, parent.CommandPath()+" - "+parent.Short) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, child := range children { + if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { + continue + } + result = append(result, child.CommandPath()+" - "+child.Short) + } + yamlDoc.SeeAlso = result + } + + final, err := yaml.Marshal(&yamlDoc) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + if _, err := w.Write(final); err != nil { + return err + } + return nil +} + +func genFlagResult(flags *pflag.FlagSet) []cmdOption { + var result []cmdOption + + flags.VisitAll(func(flag *pflag.Flag) { + // Todo, when we mark a shorthand is deprecated, but specify an empty message. + // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. + // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. + if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { + opt := cmdOption{ + flag.Name, + flag.Shorthand, + flag.DefValue, + forceMultiLine(flag.Usage), + } + result = append(result, opt) + } else { + opt := cmdOption{ + Name: flag.Name, + DefaultValue: forceMultiLine(flag.DefValue), + Usage: forceMultiLine(flag.Usage), + } + result = append(result, opt) + } + }) + + return result +} diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md new file mode 100644 index 000000000..172e61d12 --- /dev/null +++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md @@ -0,0 +1,112 @@ +# Generating Yaml Docs For Your Own cobra.Command + +Generating yaml files from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + err := doc.GenYamlTree(cmd, "/tmp") + if err != nil { + log.Fatal(err) + } +} +``` + +That will get you a Yaml document `/tmp/test.yaml` + +## Generate yaml docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "io/ioutil" + "log" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + err := doc.GenYamlTree(kubectl, "./") + if err != nil { + log.Fatal(err) + } +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate yaml docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree` + +```go + out := new(bytes.Buffer) + doc.GenYaml(cmd, out) +``` + +This will write the yaml doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` diff --git a/vendor/modules.txt b/vendor/modules.txt index f61bee77f..ea3a9fe0b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -84,6 +84,9 @@ github.com/coreos/go-semver/semver ## explicit; go 1.12 github.com/coreos/go-systemd/v22/daemon github.com/coreos/go-systemd/v22/journal +# github.com/cpuguy83/go-md2man/v2 v2.0.2 +## explicit; go 1.11 +github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew @@ -541,6 +544,7 @@ github.com/skratchdot/open-golang/open # github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra +github.com/spf13/cobra/doc # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag