diff --git a/.custom-gcl.yml b/.custom-gcl.yml index 294f3ff35..6af85c46a 100644 --- a/.custom-gcl.yml +++ b/.custom-gcl.yml @@ -1,5 +1,5 @@ # This has to be >= v1.57.0 for module plugin system support. -version: v1.59.1 +version: v1.61.0 plugins: - module: "go.uber.org/nilaway" import: "go.uber.org/nilaway/cmd/gclplugin" diff --git a/.github/workflows/compatibility.yaml b/.github/workflows/compatibility.yaml index 0c43157d6..e3450d120 100644 --- a/.github/workflows/compatibility.yaml +++ b/.github/workflows/compatibility.yaml @@ -18,7 +18,7 @@ jobs: steps: - name: Check out code into the Go module directory uses: actions/checkout@v4 - - name: Set up Go 1.21 + - name: Set up Go uses: actions/setup-go@v5 with: go-version-file: "go.mod" diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 426290a66..af92193c7 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -41,7 +41,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version-file: go.mod - name: Setup Just uses: extractions/setup-just@v2 - name: Setup Syft @@ -75,7 +75,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version-file: go.mod - name: Setup Just uses: extractions/setup-just@v2 - name: Setup Syft @@ -486,4 +486,3 @@ jobs: echo "======================================================================================================================" kubectl describe pods -n ${{ env.VCLUSTER_NAMESPACE }} exit 1 - diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 1ba2b42f0..5c10b6cc3 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -47,7 +47,7 @@ jobs: fi - name: Install golangci-lint - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.1 + run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0 - name: Build custom golangci-lint run: golangci-lint custom diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index aef62bfd4..531e7141b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -24,7 +24,7 @@ jobs: uses: actions/setup-go@v5 with: cache: false - go-version: "1.22" + go-version-file: go.mod - name: Setup Just uses: extractions/setup-just@v2 - name: Setup Cosgin @@ -59,7 +59,7 @@ jobs: - uses: "goreleaser/goreleaser-action@v6" with: args: release --clean --timeout 60m - version: '~> v2' + version: "~> v2" env: GITHUB_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} TELEMETRY_PRIVATE_KEY: ${{ secrets.VCLUSTER_TELEMETRY_PRIVATE_KEY }} diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index 814d6920f..a835f4959 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -36,12 +36,12 @@ jobs: name: Execute all go tests runs-on: ubuntu-22.04 steps: - - name: Set up Go 1.21 + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version-file: go.mod cache: false - - name: Check out code into the Go module directory - uses: actions/checkout@v4 - name: Execute unit tests run: ./hack/test.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6c7753cfd..362e20564 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,17 +32,17 @@ There are a number of areas where contributions can be accepted: We recommend developing vCluster directly on a local Kubernetes cluster as it provides faster feedback. There are two ways that we recommend developing. -* DevSpace -* Locally +- DevSpace +- Locally ## Pre-requisites for Development ### Tools -* Docker needs to be installed (e.g. docker-desktop, orbstack, rancher desktop etc.) -* [kubectl](https://kubernetes.io/docs/tasks/tools/) -* [Helm v3.10.0+](https://helm.sh/docs/intro/install/) -* Local Kubernetes v1.26+ cluster (i.e. Docker Desktop, [minikube](https://minikube.sigs.k8s.io/docs/start/), KinD or similar) +- Docker needs to be installed (e.g. docker-desktop, orbstack, rancher desktop etc.) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Helm v3.10.0+](https://helm.sh/docs/intro/install/) +- Local Kubernetes v1.26+ cluster (i.e. Docker Desktop, [minikube](https://minikube.sigs.k8s.io/docs/start/), KinD or similar) ### Fork and Clone the vcluster repo @@ -69,13 +69,13 @@ Follow the guide on how to install [DevSpace](https://github.com/loft-sh/devspac Ensure your `kubectl` is connected to the local Kubernetes cluster. ``` -$ kubectl get namespaces +kubectl get namespaces ``` In your Github `vcluster` directory, run: ``` -$ devspace dev +devspace dev ``` Which uses the `devspace.yaml` file in the `vcluster` directory to deploy a vCluster and launch DevSpace: @@ -155,7 +155,6 @@ vcluster-0:vcluster-dev$ go run -mod vendor cmd/vcluster/main.go start Now, you can start to work with the virtual cluster based on the source code. This vCluster is running on your local Kubernetes cluster. - If you change a file locally, DevSpace will automatically sync the file into the Devspace container. After any changes, re-run the same command in the DevSpace terminal to apply the changes. #### Start vcluster in DevSpace in debug mode via `dlv` @@ -165,7 +164,7 @@ You can either debug with Delve within DevSpace or locally. Devspace is more con Run vCluster in the debug mode with Delve in the `vcluster` directory. Note: Other sessions of DevSpace will need to be terminated before starting another ``` -$ devspace dev -n vcluster +devspace dev -n vcluster ``` Once DevSpace launches and you are in the `vcluster` pod, run the following delve command. @@ -190,7 +189,7 @@ Download the [vCluster CLI](https://www.vcluster.com/docs/get-started/) and use By connecting to the vCluster using the CLI, you set your local KubeConfig to the virtual cluster ``` -$ vcluster connect vcluster +vcluster connect vcluster ``` ## Build and Test the vcluster CLI tool @@ -198,7 +197,7 @@ $ vcluster connect vcluster Build the CLI tool ``` -$ go generate ./... && go build -o vcluster cmd/vclusterctl/main.go # build vcluster cli +go generate ./... && go build -o vcluster cmd/vclusterctl/main.go # build vcluster cli ``` Test the built CLI tool @@ -208,12 +207,13 @@ Test the built CLI tool ``` ## Developing without DevSpace + ### Pre-requisites -* [Golang v1.22](https://go.dev/doc/install) -* [Goreleaser](https://goreleaser.com/install/) -* [Just](https://github.com/casey/just) -* [Kind](https://kind.sigs.k8s.io/) +- [Golang v1.22](https://go.dev/doc/install) +- [Goreleaser](https://goreleaser.com/install/) +- [Just](https://github.com/casey/just) +- [Kind](https://kind.sigs.k8s.io/) ### Uninstall vCluster CLI @@ -266,6 +266,7 @@ You can now use your cluster with: kubectl cluster-info --context kind-kind ``` + ### Build vCluster Container Image ``` @@ -279,7 +280,7 @@ Note: Feel free to push this image into your own registry. If using kind as your local Kubernetes cluster, you need to import the image into kind. ``` -$ kind load docker-image my-vcluster:0.0.1 +kind load docker-image my-vcluster:0.0.1 ``` ### Create vCluster with self-compiled vCluster CLI @@ -305,7 +306,7 @@ controlPlane: Launch your vCluster using your `vcluster.yaml` ``` -$ ./dist//vcluster create my-vcluster -n my-vcluster -f ./vcluster.yaml --local-chart-dir chart +./dist//vcluster create my-vcluster -n my-vcluster -f ./vcluster.yaml --local-chart-dir chart ``` ### Access your vCluster and Set your local KubeConfig @@ -313,7 +314,7 @@ $ ./dist//vcluster create my-vcluster -n my-vcluster -f ./vcluster.yaml -- By connecting to the vCluster using the CLI, you set your local KubeConfig to the virtual cluster ``` -$ ./dist//vcluster connect my-vcluster +./dist//vcluster connect my-vcluster ``` # Running vCluster Tests @@ -325,7 +326,7 @@ All of the tests are located in the vcluster directory. Run the entire unit test suite. ``` -$ ./hack/test.sh +./hack/test.sh ``` ## Running the e2e Test Suite @@ -333,8 +334,8 @@ $ ./hack/test.sh Run the e2e tests, that are located in the e2e folder. ``` -$ just delete-kind -$ just e2e +just delete-kind +just e2e ``` @@ -344,7 +345,6 @@ If [Ginkgo](https://github.com/onsi/ginkgo#global-installation) is already insta For running conformance tests, please take a look at [conformance tests](https://github.com/loft-sh/tree/vcluster/main/conformance/v1.21) - # License This project is licensed under the Apache 2.0 License. @@ -352,4 +352,3 @@ This project is licensed under the Apache 2.0 License. # Copyright notice It is important to state that you retain copyright for your contributions, but agree to license them for usage by the project and author(s) under the Apache 2.0 license. Git retains history of authorship, but we use a catch-all statement rather than individual names. - diff --git a/Dockerfile b/Dockerfile index f00fc41f5..c9425db9a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ ARG KINE_VERSION="v0.13.1" FROM rancher/kine:${KINE_VERSION} as kine # Build program -FROM golang:1.22 as builder +FROM golang:1.23 as builder WORKDIR /vcluster-dev ARG TARGETOS diff --git a/README.md b/README.md index 7f27ac7b0..391933ed2 100644 --- a/README.md +++ b/README.md @@ -8,11 +8,9 @@ [![Join us on Slack!](docs/static/media/slack.svg)](https://slack.loft.sh/) [![Open in DevPod!](https://devpod.sh/assets/open-in-devpod.svg)](https://devpod.sh/open#https://github.com/loft-sh/vcluster) - - Virtual clusters are fully functional Kubernetes clusters nested inside a physical host cluster providing better isolation and flexibility to support multi-tenancy. Multiple teams can operate independently within the same physical infrastructure while minimizing conflicts, maximizing autonomy, and reducing costs. -Virtual clusters run inside host cluster namespaces but function as separate Kubernetes clusters, with their own API server, control plane, syncer, and set of resources. While virtual clusters share the physical resources of the host cluster (such as CPU, memory, and storage), they manage their resources independently, allowing for efficient utilization and scaling. +Virtual clusters run inside host cluster namespaces but function as separate Kubernetes clusters, with their own API server, control plane, syncer, and set of resources. While virtual clusters share the physical resources of the host cluster (such as CPU, memory, and storage), they manage their resources independently, allowing for efficient utilization and scaling. Virtual clusters interact with the host cluster for resource scheduling and networking but maintain a level of abstraction to ensure operations within a virtual cluster don't directly affect the host cluster's global state. @@ -25,41 +23,50 @@ Virtual clusters interact with the host cluster for resource scheduling and netw
## Benefits + Virtual clusters provide immense benefits for large-scale Kubernetes deployments and multi-tenancy. ### Robust security and isolation + - **Granular Permissions:** vCluster users operate with minimized permissions in the host cluster, significantly reducing the risk of privileged access misuse. Within their vCluster, users have admin-level control, enabling them to manage CRDs, RBAC, and other security policies independently. - **Isolated Control Plane:** Each vCluster comes with its own dedicated API server and control plane, creating a strong isolation boundary. - **Customizable Security Policies:** Tenants can implement additional vCluster-specific governance, including OPA policies, network policies, resource quotas, limit ranges, and admission control, in addition to the existing policies and security measures in the underlying physical host cluster. - **Enhanced Data Protection:** With options for separate backing stores, including embedded SQLite, etcd, or external databases, virtual clusters allow for isolated data management, reducing the risk of data leakage between tenants. ### Access for tenants + - **Full Admin Access per Tenant:** Tenants can freely deploy CRDs, create namespaces, taint, and label nodes, and manage cluster-scoped resources typically restricted in standard Kubernetes namespaces. - **Isolated yet Integrated Networking:** While ensuring automatic isolation (for example, pods in different virtual clusters cannot communicate by default), vCluster allows for configurable network policies and service sharing, supporting both separation and sharing as needed. - **Node Management:** Assign static nodes to specific virtual clusters or share node pools among multiple virtual clusters, providing flexibility in resource allocation. ### Cost-effectiveness and reduced overhead + - **Lightweight Infrastructure:** Virtual clusters are significantly more lightweight than physical clusters, able to spin up in seconds, which contrasts sharply with the lengthy provisioning times often seen in environments like EKS (~45 minutes). - **Resource Efficiency:** By sharing the underlying host cluster's resources, virtual clusters minimize the need for additional physical infrastructure, reducing costs and environmental impact. -- **Simplified Management:** The vCluster control plane, running inside a single pod, along with optional integrated CoreDNS, minimizes the operational overhead, making virtual clusters especially suitable for large-scale deployments and multi-tenancy scenarios. +- **Simplified Management:** The vCluster control plane, running inside a single pod, along with optional integrated CoreDNS, minimizes the operational overhead, making virtual clusters especially suitable for large-scale deployments and multi-tenancy scenarios. ### Enhanced flexibility and compatibility + - **Diverse Kubernetes Environments:** vCluster supports different Kubernetes versions and distributions (including K8s, K3s, and K0s), allowing version skews. This makes it possible to tailor each virtual cluster to specific requirements without impacting others. - **Adaptable Backing Stores:** Choose from a range of data stores, from lightweight (SQLite) to enterprise-grade options (embedded etcd, external data stores like Global RDS), catering to various scalability and durability needs. - **Runs Anywhere:** Virtual clusters can run on EKS, GKE, AKS, OpenShift, RKE, K3s, cloud, edge, and on-prem. As long as it's a K8s cluster, you can run a virtual cluster on top of it. ### Improved scalability + - **Reduced API Server Load:** Virtual clusters, each with their own dedicated API server, significantly reduce the operational load on the host cluster's Kubernetes API server by isolating and handling requests internally. - **Conflict-Free CRD Management:** Independent management of CRDs within each virtual cluster eliminates the potential for CRD conflicts and version discrepancies, ensuring smoother operations and easier scaling as the user base expands. ## Common use cases + ### Pre-production -- **Empower developers with self-service Kubernetes:** Simplify Kubernetes access for developers through self-service virtual clusters, reducing human error and enhancing developer autonomy without compromising security and compliance requirements. + +- **Empower developers with self-service Kubernetes:** Simplify Kubernetes access for developers through self-service virtual clusters, reducing human error and enhancing developer autonomy without compromising security and compliance requirements. - **Accelerate CI/CD with ephemeral Kubernetes clusters:** Instantly create clean, new virtual Kubernetes clusters for each pull request, enabling fast, isolated testing and PR previews without wait times and the struggles of a shared test environment. ### Production + - **Elevate your ISV offering with a dedicated cluster per customer:** Host each customer in a virtual cluster with strict tenant isolation and seamless scalability, while consolidating essential tools into a unified platform stack serving multiple tenants. - **Build a managed Kubernetes service with best-in-class COGS and high margins:** Enable direct customer access to dedicated virtual Kubernetes clusters, streamlining node and resource allocation for industry-leading efficiency and unparalleled scalability. @@ -72,11 +79,10 @@ Refer to our [quick start guide](https://www.vcluster.com/docs/vcluster/) to dep Thank you for your interest in contributing! Please refer to [CONTRIBUTING.md](https://github.com/loft-sh/vcluster/blob/main/CONTRIBUTING.md) for guidance. - ## License Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/go.mod b/go.mod index 10d75aedd..042d762eb 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/loft-sh/vcluster -go 1.22.4 +go 1.23.2 require ( github.com/blang/semver v3.5.1+incompatible diff --git a/pkg/config/validation.go b/pkg/config/validation.go index 8cc3e7d36..5732a2442 100644 --- a/pkg/config/validation.go +++ b/pkg/config/validation.go @@ -20,9 +20,7 @@ var allowedPodSecurityStandards = map[string]bool{ "restricted": true, } -var ( - verbs = []string{"get", "list", "create", "update", "patch", "watch", "delete", "deletecollection"} -) +var verbs = []string{"get", "list", "create", "update", "patch", "watch", "delete", "deletecollection"} func ValidateConfigAndSetDefaults(config *VirtualClusterConfig) error { // check the value of pod security standard @@ -61,12 +59,24 @@ func ValidateConfigAndSetDefaults(config *VirtualClusterConfig) error { // check if nodes controller needs to be enabled if config.ControlPlane.Advanced.VirtualScheduler.Enabled && !config.Sync.FromHost.Nodes.Enabled { - return fmt.Errorf("sync.fromHost.nodes.enabled is false, but required if using virtual scheduler") + return errors.New("sync.fromHost.nodes.enabled is false, but required if using virtual scheduler") } // check if storage classes and host storage classes are enabled at the same time if config.Sync.FromHost.StorageClasses.Enabled == "true" && config.Sync.ToHost.StorageClasses.Enabled { - return fmt.Errorf("you cannot enable both sync.fromHost.storageClasses.enabled and sync.toHost.storageClasses.enabled at the same time. Choose only one of them") + return errors.New("you cannot enable both sync.fromHost.storageClasses.enabled and sync.toHost.storageClasses.enabled at the same time. Choose only one of them") + } + + if config.Sync.FromHost.PriorityClasses.Enabled && config.Sync.ToHost.PriorityClasses.Enabled { + return errors.New("cannot sync priorityclasses to and from host at the same time") + } + + // volumesnapshots and volumesnapshotcontents are dependant on each other + if config.Sync.ToHost.VolumeSnapshotContents.Enabled && !config.Sync.ToHost.VolumeSnapshots.Enabled { + return errors.New("when syncing volume snapshots contents to the host, one must set sync.toHost.volumeSnapshots.enabled to true") + } + if config.Sync.ToHost.VolumeSnapshots.Enabled && !config.Sync.ToHost.VolumeSnapshotContents.Enabled { + return errors.New("when syncing volume snapshots to the host, one must set sync.toHost.volumeSnapshotContents.enabled to true") } // validate central admission control @@ -122,13 +132,13 @@ func ValidateConfigAndSetDefaults(config *VirtualClusterConfig) error { func validateDistro(config *VirtualClusterConfig) error { enabledDistros := 0 - if config.Config.ControlPlane.Distro.K3S.Enabled { + if config.ControlPlane.Distro.K3S.Enabled { enabledDistros++ } - if config.Config.ControlPlane.Distro.K0S.Enabled { + if config.ControlPlane.Distro.K0S.Enabled { enabledDistros++ } - if config.Config.ControlPlane.Distro.K8S.Enabled { + if config.ControlPlane.Distro.K8S.Enabled { enabledDistros++ } diff --git a/pkg/controllers/resources/volumesnapshotclasses/syncer_test.go b/pkg/controllers/resources/volumesnapshotclasses/syncer_test.go index b860e632c..875d26b6b 100644 --- a/pkg/controllers/resources/volumesnapshotclasses/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshotclasses/syncer_test.go @@ -32,10 +32,7 @@ func TestSync(t *testing.T) { vMoreParamsVSC := vBaseVSC.DeepCopy() vMoreParamsVSC.Parameters["additional"] = "param" - syncertesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { - vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true - return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) - }, []*syncertesting.SyncTest{ + tests := []*syncertesting.SyncTest{ { Name: "Create backward", InitialVirtualState: []runtime.Object{}, @@ -96,5 +93,13 @@ func TestSync(t *testing.T) { assert.NilError(t, err) }, }, - }) + } + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + test.Run(t, syncertesting.NewContextFunc(func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) + })) + }) + } } diff --git a/pkg/controllers/resources/volumesnapshotcontents/syncer_test.go b/pkg/controllers/resources/volumesnapshotcontents/syncer_test.go index f8d6ec305..c3d2245e2 100644 --- a/pkg/controllers/resources/volumesnapshotcontents/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshotcontents/syncer_test.go @@ -172,6 +172,7 @@ func TestSync(t *testing.T) { createContext := func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + vConfig.Sync.ToHost.VolumeSnapshotContents.Enabled = true return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) } diff --git a/pkg/controllers/resources/volumesnapshots/syncer_test.go b/pkg/controllers/resources/volumesnapshots/syncer_test.go index 484e5396b..1d29fae83 100644 --- a/pkg/controllers/resources/volumesnapshots/syncer_test.go +++ b/pkg/controllers/resources/volumesnapshots/syncer_test.go @@ -105,10 +105,13 @@ func TestSync(t *testing.T) { vWithStatus := vPVSourceSnapshot.DeepCopy() vWithStatus.Status = pWithStatus.Status - syncertesting.RunTestsWithContext(t, func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { + createContext := syncertesting.NewContextFunc(func(vConfig *config.VirtualClusterConfig, pClient *testingutil.FakeIndexClient, vClient *testingutil.FakeIndexClient) *synccontext.RegisterContext { vConfig.Sync.ToHost.VolumeSnapshots.Enabled = true + vConfig.Sync.ToHost.VolumeSnapshotContents.Enabled = true return syncertesting.NewFakeRegisterContext(vConfig, pClient, vClient) - }, []*syncertesting.SyncTest{ + }) + + tests := []*syncertesting.SyncTest{ { Name: "Create with PersistentVolume source", InitialVirtualState: []runtime.Object{vPVSourceSnapshot.DeepCopy()}, @@ -229,12 +232,19 @@ func TestSync(t *testing.T) { volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot"): {vDeletingSnapshot}, }, ExpectedPhysicalState: map[schema.GroupVersionKind][]runtime.Object{ - volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot"): {}}, + volumesnapshotv1.SchemeGroupVersion.WithKind("VolumeSnapshot"): {}, + }, Sync: func(ctx *synccontext.RegisterContext) { syncCtx, syncer := syncertesting.FakeStartSyncer(t, ctx, New) _, err := syncer.(*volumeSnapshotSyncer).Sync(syncCtx, synccontext.NewSyncEvent(pPVSourceSnapshot.DeepCopy(), vDeletingSnapshot)) assert.NilError(t, err) }, }, - }) + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + test.Run(t, createContext) + }) + } } diff --git a/pkg/mappings/generic/mapper.go b/pkg/mappings/generic/mapper.go index 6e4bf1abb..4ad1d2314 100644 --- a/pkg/mappings/generic/mapper.go +++ b/pkg/mappings/generic/mapper.go @@ -207,7 +207,7 @@ func tryToMatchHostNameShort(ctx *synccontext.SyncContext, req types.NamespacedN } vNamespace := nameMapping.VirtualName.Namespace - vName := strings.Replace(req.Name, nameMapping.HostName.Name, nameMapping.VirtualName.Name, -1) + vName := strings.ReplaceAll(req.Name, nameMapping.HostName.Name, nameMapping.VirtualName.Name) klog.FromContext(ctx).V(1).Info("Translated back name/namespace via single-namespace mode method", "req", req.String(), "ret", types.NamespacedName{ Namespace: vNamespace, Name: vName, diff --git a/pkg/mappings/generic/mirror.go b/pkg/mappings/generic/mirror.go index 581d0a358..1f1f88b28 100644 --- a/pkg/mappings/generic/mirror.go +++ b/pkg/mappings/generic/mirror.go @@ -35,7 +35,7 @@ func (n *mirrorMapper) Migrate(_ *synccontext.RegisterContext, _ synccontext.Map return nil } -func (n *mirrorMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, _ client.Object) (retName types.NamespacedName) { +func (n *mirrorMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.NamespacedName, _ client.Object) types.NamespacedName { pNamespace := req.Namespace if pNamespace != "" { pNamespace = translate.Default.HostNamespace(ctx, pNamespace) @@ -47,7 +47,7 @@ func (n *mirrorMapper) VirtualToHost(ctx *synccontext.SyncContext, req types.Nam } } -func (n *mirrorMapper) HostToVirtual(_ *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) (retName types.NamespacedName) { +func (n *mirrorMapper) HostToVirtual(_ *synccontext.SyncContext, req types.NamespacedName, pObj client.Object) types.NamespacedName { if pObj != nil { pAnnotations := pObj.GetAnnotations() if pAnnotations != nil && pAnnotations[translate.NameAnnotation] != "" { diff --git a/pkg/mappings/generic/recorder.go b/pkg/mappings/generic/recorder.go index 2912f98bc..7afa77900 100644 --- a/pkg/mappings/generic/recorder.go +++ b/pkg/mappings/generic/recorder.go @@ -25,7 +25,7 @@ type recorder struct { } func (n *recorder) Migrate(ctx *synccontext.RegisterContext, mapper synccontext.Mapper) error { - gvk := n.Mapper.GroupVersionKind() + gvk := n.GroupVersionKind() listGvk := schema.GroupVersionKind{ Group: gvk.Group, Version: gvk.Version, diff --git a/pkg/mappings/resources/volumesnapshotclasses.go b/pkg/mappings/resources/volumesnapshotclasses.go index cc43833e3..7bc481a23 100644 --- a/pkg/mappings/resources/volumesnapshotclasses.go +++ b/pkg/mappings/resources/volumesnapshotclasses.go @@ -13,7 +13,7 @@ import ( var volumeSnapshotClassesCRD string func CreateVolumeSnapshotClassesMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - if !ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled { + if !ctx.Config.Sync.FromHost.VolumeSnapshotClasses.Enabled { return generic.NewMirrorMapper(&volumesnapshotv1.VolumeSnapshotClass{}) } diff --git a/pkg/mappings/resources/volumesnapshotcontents.go b/pkg/mappings/resources/volumesnapshotcontents.go index b5b50c056..418f5a598 100644 --- a/pkg/mappings/resources/volumesnapshotcontents.go +++ b/pkg/mappings/resources/volumesnapshotcontents.go @@ -17,7 +17,7 @@ import ( var volumeSnapshotContentsCRD string func CreateVolumeSnapshotContentsMapper(ctx *synccontext.RegisterContext) (synccontext.Mapper, error) { - if !ctx.Config.Sync.ToHost.VolumeSnapshots.Enabled { + if !ctx.Config.Sync.ToHost.VolumeSnapshotContents.Enabled { return generic.NewMirrorMapper(&volumesnapshotv1.VolumeSnapshotContent{}) } diff --git a/pkg/syncer/testing/testing.go b/pkg/syncer/testing/testing.go index 605ff1d8a..0446a56d2 100644 --- a/pkg/syncer/testing/testing.go +++ b/pkg/syncer/testing/testing.go @@ -32,13 +32,13 @@ type SyncTest struct { ExpectedVirtualState map[schema.GroupVersionKind][]runtime.Object Sync func(ctx *synccontext.RegisterContext) Compare Compare - Name string - InitialPhysicalState []runtime.Object - InitialVirtualState []runtime.Object AdjustConfig func(vConfig *config.VirtualClusterConfig) pClient *testingutil.FakeIndexClient vClient *testingutil.FakeIndexClient vConfig *config.VirtualClusterConfig + Name string + InitialPhysicalState []runtime.Object + InitialVirtualState []runtime.Object } func RunTests(t *testing.T, tests []*SyncTest) {