diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 83e6d2647..681c985b3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,6 +18,11 @@ env: AWS_REGION: us-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + NAMESPACE: hmc-system jobs: e2etest: diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..39d9547ea --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +* @Kshatrix @eromanova @a13x5 +/.github/ @Kshatrix @squizzi @eromanova @a13x5 +/test/ @Kshatrix @squizzi @eromanova @a13x5 diff --git a/Makefile b/Makefile index e2cb73c0e..a7498d0d6 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,7 @@ test: generate-all fmt vet envtest tidy external-crd ## Run tests. # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. test-e2e: cli-install - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=2h + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint @@ -269,7 +269,11 @@ helm-push: helm-package chart_version=$$(echo $$base | grep -o "v\{0,1\}[0-9]\+\.[0-9]\+\.[0-9].*"); \ chart_name="$${base%-"$$chart_version"}"; \ echo "Verifying if chart $$chart_name, version $$chart_version already exists in $(REGISTRY_REPO)"; \ - chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO) $$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + if $(REGISTRY_IS_OCI); then \ + chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO)/$$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + else \ + chart_exists=$$($(HELM) pull $$repo_flag $(REGISTRY_REPO) $$chart_name --version $$chart_version --destination /tmp 2>&1 | grep "not found" || true); \ + fi; \ if [ -z "$$chart_exists" ]; then \ echo "Chart $$chart_name version $$chart_version already exists in the repository."; \ else \ @@ -303,7 +307,7 @@ dev-release: .PHONY: dev-aws-creds dev-aws-creds: envsubst - @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/aws-credentials.yaml | $(KUBECTL) apply -f - + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -i config/dev/aws-credentials.yaml | $(KUBECTL) apply -f - .PHONY: dev-azure-creds dev-azure-creds: envsubst @@ -374,7 +378,7 @@ ENVSUBST ?= $(LOCALBIN)/envsubst-$(ENVSUBST_VERSION) AWSCLI ?= $(LOCALBIN)/aws ## Tool Versions -CONTROLLER_TOOLS_VERSION ?= v0.14.0 +CONTROLLER_TOOLS_VERSION ?= v0.16.3 ENVTEST_VERSION ?= release-0.17 GOLANGCI_LINT_VERSION ?= v1.61.0 HELM_VERSION ?= v3.15.1 diff --git a/PROJECT b/PROJECT index 678c620b4..280c37db1 100644 --- a/PROJECT +++ b/PROJECT @@ -84,4 +84,13 @@ resources: kind: ServiceTemplateChain path: github.com/Mirantis/hmc/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: hmc.mirantis.com + group: hmc.mirantis.com + kind: Credential + path: github.com/Mirantis/hmc/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/README.md b/README.md index e6d3bb3b0..329d2f01d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,14 @@ -# Mirantis Hybrid Cloud Platform +# Mirantis Hybrid Multi Cluster + +## Overview + +Mirantis Hybrid Multi Cluster is part of Mirantis Project 2A which is focused on delivering a +open source approach to providing an enterprise grade multi-cluster kubernetes managment solution +based entirely on standard open source tooling. + +## Documentation + +Detailed documentation is available in [Project 2A Docs](https://mirantis.github.io/project-2a-docs/) ## Installation @@ -34,10 +44,7 @@ Optionally, the following CLIs may be helpful: ### Providers configuration -Follow the instruction to configure providers. Currently supported providers: -* [AWS](docs/aws/main.md#prepare-the-aws-infra-provider) -* [Azure](docs/azure/main.md) -* [vSphere](docs/vsphere/main.md) +Full details on the provider configuration can be found in the Project 2A Docs, see [Documentation](#documentation) ### Installation @@ -47,8 +54,6 @@ export KUBECONFIG= helm install hmc oci://ghcr.io/mirantis/hmc/charts/hmc --version -n hmc-system --create-namespace ``` -See [HMC configuration options](templates/hmc/values.yaml). - #### Extended Management configuration By default, the Hybrid Container Cloud is being deployed with the following configuration: @@ -106,9 +111,7 @@ export KUBECONFIG= kubectl get template -n hmc-system -o go-template='{{ range .items }}{{ if eq .status.type "deployment" }}{{ .metadata.name }}{{ printf "\n" }}{{ end }}{{ end }}' ``` -For details about the `Template system` in HMC, see [Templates system](docs/templates/main.md#templates-system). - -If you want to deploy hostded control plate template, make sure to check additional notes on [Hosted control plane](docs/aws/hosted-control-plane.md). +If you want to deploy hostded control plate template, make sure to check additional notes on Hosted control plane in 2A Docs, see [Documentation](#documentation). 2. Create the file with the `ManagedCluster` configuration: diff --git a/api/v1alpha1/credential_types.go b/api/v1alpha1/credential_types.go new file mode 100644 index 000000000..126709275 --- /dev/null +++ b/api/v1alpha1/credential_types.go @@ -0,0 +1,69 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CredentialState string + +const ( + CredentialReady CredentialState = "Ready" + CredentialNotFound CredentialState = "Cluster Identity not found" + CredentialWrongType CredentialState = "Mismatched type" +) + +// CredentialSpec defines the desired state of Credential +type CredentialSpec struct { + // Reference to the Credential Identity + IdentityRef *corev1.ObjectReference `json:"identityRef"` + // Description of the Credential object + Description string `json:"description,omitempty"` // WARN: noop +} + +// CredentialStatus defines the observed state of Credential +type CredentialStatus struct { + State CredentialState `json:"state,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=cred +// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state` +// +kubebuilder:printcolumn:name="Description",type=string,JSONPath=`.spec.description` + +// Credential is the Schema for the credentials API +type Credential struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CredentialSpec `json:"spec,omitempty"` + Status CredentialStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CredentialList contains a list of Credential +type CredentialList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Credential `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Credential{}, &CredentialList{}) +} diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 9dbc2c30c..37b939d51 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -25,7 +25,9 @@ const ( BlockingFinalizer = "hmc.mirantis.com/cleanup" ManagedClusterFinalizer = "hmc.mirantis.com/managed-cluster" - FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name" + FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name" + FluxHelmChartNamespaceKey = "helm.toolkit.fluxcd.io/namespace" + HMCManagedLabelKey = "hmc.mirantis.com/managed" HMCManagedLabelValue = "true" @@ -36,6 +38,8 @@ const ( // ManagedClusterKind is the string representation of a ManagedCluster. ManagedClusterKind = "ManagedCluster" + // CredentialReadyCondition indicates if referenced Credential exists and has Ready state + CredentialReadyCondition = "CredentialReady" // TemplateReadyCondition indicates the referenced Template exists and valid. TemplateReadyCondition = "TemplateReady" // HelmChartReadyCondition indicates the corresponding HelmChart is valid and ready. @@ -60,6 +64,26 @@ const ( ProgressingReason string = "Progressing" ) +// ManagedClusterServiceSpec represents a Service within ManagedCluster +type ManagedClusterServiceSpec struct { + // Template is a reference to a Template object located in the same namespace. + // +kubebuilder:validation:MinLength=1 + Template string `json:"template"` + // Disable can be set to disable handling of this service. + // +optional + Disable bool `json:"disable"` + // Name is the chart release. + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + // Namespace is the namespace the release will be installed in. + // It will default to Name if not provided. + // +optional + Namespace string `json:"namespace"` + // Values is the helm values to be passed to the template. + // +optional + Values *apiextensionsv1.JSON `json:"values,omitempty"` +} + // ManagedClusterSpec defines the desired state of ManagedCluster type ManagedClusterSpec struct { // Config allows to provide parameters for template customization. @@ -72,7 +96,12 @@ type ManagedClusterSpec struct { // Template is a reference to a Template object located in the same namespace. Template string `json:"template"` // DryRun specifies whether the template should be applied after validation or only validated. - DryRun bool `json:"dryRun,omitempty"` + DryRun bool `json:"dryRun,omitempty"` + Credential string `json:"credential,omitempty"` + // Services is a list of services created via ServiceTemplates + // that could be installed on the target cluster. + // +optional + Services []ManagedClusterServiceSpec `json:"services,omitempty"` } // ManagedClusterStatus defines the observed state of ManagedCluster diff --git a/api/v1alpha1/management_types.go b/api/v1alpha1/management_types.go index 029f9b0ab..f2b73174a 100644 --- a/api/v1alpha1/management_types.go +++ b/api/v1alpha1/management_types.go @@ -25,8 +25,10 @@ const ( CoreCAPIName = "capi" - ManagementName = "hmc" - ManagementFinalizer = "hmc.mirantis.com/management" + ManagementKind = "Management" + ManagementName = "hmc" + ManagementFinalizer = "hmc.mirantis.com/management" + TemplateManagementName = "hmc" ) // ManagementSpec defines the desired state of Management diff --git a/api/v1alpha1/templatemanagement_types.go b/api/v1alpha1/templatemanagement_types.go index 109b9b19a..d64b59ae2 100644 --- a/api/v1alpha1/templatemanagement_types.go +++ b/api/v1alpha1/templatemanagement_types.go @@ -18,6 +18,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const TemplateManagementKind = "TemplateManagement" + // TemplateManagementSpec defines the desired state of TemplateManagement type TemplateManagementSpec struct { // AccessRules is the list of access rules. Each AccessRule enforces diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index bc342372d..90cd85c00 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -20,7 +20,8 @@ package v1alpha1 import ( "github.com/fluxcd/helm-controller/api/v2" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -220,7 +221,7 @@ func (in *Component) DeepCopyInto(out *Component) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config - *out = new(v1.JSON) + *out = new(apiextensionsv1.JSON) (*in).DeepCopyInto(*out) } } @@ -282,6 +283,100 @@ func (in *CoreProviderTemplate) DeepCopy() *CoreProviderTemplate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Credential) DeepCopyInto(out *Credential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Credential. +func (in *Credential) DeepCopy() *Credential { + if in == nil { + return nil + } + out := new(Credential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Credential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialList) DeepCopyInto(out *CredentialList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Credential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialList. +func (in *CredentialList) DeepCopy() *CredentialList { + if in == nil { + return nil + } + out := new(CredentialList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CredentialList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialSpec) DeepCopyInto(out *CredentialSpec) { + *out = *in + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(v1.ObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialSpec. +func (in *CredentialSpec) DeepCopy() *CredentialSpec { + if in == nil { + return nil + } + out := new(CredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialStatus) DeepCopyInto(out *CredentialStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialStatus. +func (in *CredentialStatus) DeepCopy() *CredentialStatus { + if in == nil { + return nil + } + out := new(CredentialStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HelmSpec) DeepCopyInto(out *HelmSpec) { *out = *in @@ -361,14 +456,41 @@ func (in *ManagedClusterList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterServiceSpec) DeepCopyInto(out *ManagedClusterServiceSpec) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterServiceSpec. +func (in *ManagedClusterServiceSpec) DeepCopy() *ManagedClusterServiceSpec { + if in == nil { + return nil + } + out := new(ManagedClusterServiceSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { *out = *in if in.Config != nil { in, out := &in.Config, &out.Config - *out = new(v1.JSON) + *out = new(apiextensionsv1.JSON) (*in).DeepCopyInto(*out) } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ManagedClusterServiceSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. @@ -1116,7 +1238,7 @@ func (in *TemplateStatusCommon) DeepCopyInto(out *TemplateStatusCommon) { out.TemplateValidationStatus = in.TemplateValidationStatus if in.Config != nil { in, out := &in.Config, &out.Config - *out = new(v1.JSON) + *out = new(apiextensionsv1.JSON) (*in).DeepCopyInto(*out) } if in.ChartRef != nil { diff --git a/cmd/main.go b/cmd/main.go index 2b03b321b..9d6d03962 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -40,6 +40,7 @@ import ( "github.com/Mirantis/hmc/internal/telemetry" "github.com/Mirantis/hmc/internal/utils" hmcwebhook "github.com/Mirantis/hmc/internal/webhook" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) @@ -54,6 +55,7 @@ func init() { utilruntime.Must(hmcmirantiscomv1alpha1.AddToScheme(scheme)) utilruntime.Must(sourcev1.AddToScheme(scheme)) utilruntime.Must(hcv2.AddToScheme(scheme)) + utilruntime.Must(sveltosv1beta1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -67,6 +69,7 @@ func main() { insecureRegistry bool registryCredentialsSecret string createManagement bool + createTemplateManagement bool createTemplates bool hmcTemplatesChartName string enableTelemetry bool @@ -87,6 +90,8 @@ func main() { "Secret containing authentication credentials for the registry.") flag.BoolVar(&insecureRegistry, "insecure-registry", false, "Allow connecting to an HTTP registry.") flag.BoolVar(&createManagement, "create-management", true, "Create Management object with default configuration.") + flag.BoolVar(&createTemplateManagement, "create-template-management", true, + "Create TemplateManagement object with default configuration.") flag.BoolVar(&createTemplates, "create-templates", true, "Create HMC Templates.") flag.StringVar(&hmcTemplatesChartName, "hmc-templates-chart-name", "hmc-templates", "The name of the helm chart with HMC Templates.") @@ -206,7 +211,6 @@ func main() { } if err = (&controller.ManagedClusterReconciler{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), Config: mgr.GetConfig(), DynamicClient: dc, }).SetupWithManager(mgr); err != nil { @@ -224,7 +228,6 @@ func main() { } if err = (&controller.TemplateManagementReconciler{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), Config: mgr.GetConfig(), SystemNamespace: currentNamespace, }).SetupWithManager(mgr); err != nil { @@ -232,12 +235,13 @@ func main() { os.Exit(1) } if err = mgr.Add(&controller.Poller{ - Client: mgr.GetClient(), - Config: mgr.GetConfig(), - CreateManagement: createManagement, - CreateTemplates: createTemplates, - HMCTemplatesChartName: hmcTemplatesChartName, - SystemNamespace: currentNamespace, + Client: mgr.GetClient(), + Config: mgr.GetConfig(), + CreateManagement: createManagement, + CreateTemplateManagement: createTemplateManagement, + CreateTemplates: createTemplates, + HMCTemplatesChartName: hmcTemplatesChartName, + SystemNamespace: currentNamespace, }); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ReleaseController") os.Exit(1) @@ -253,6 +257,13 @@ func main() { } } + if err = (&controller.CredentialReconciler{ + Client: mgr.GetClient(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Credential") + os.Exit(1) + } + // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/dev/aws-managedcluster.yaml b/config/dev/aws-managedcluster.yaml index 5e76f4b10..8aaa6d6b5 100644 --- a/config/dev/aws-managedcluster.yaml +++ b/config/dev/aws-managedcluster.yaml @@ -18,3 +18,10 @@ spec: workersNumber: 1 installBeachHeadServices: false template: aws-standalone-cp + services: + - template: kyverno + name: kyverno + namespace: kyverno + - template: ingress-nginx + name: ingress-nginx + namespace: ingress-nginx diff --git a/config/dev/hmc_values.yaml b/config/dev/hmc_values.yaml index e81bfc3dc..6ae56befa 100644 --- a/config/dev/hmc_values.yaml +++ b/config/dev/hmc_values.yaml @@ -1,5 +1,6 @@ image: repository: hmc/controller + tag: latest controller: defaultRegistryURL: oci://hmc-local-registry:5000/charts insecureRegistry: true diff --git a/docs/assets/favicon.ico b/docs/assets/favicon.ico deleted file mode 100644 index 3d4fcc6d8..000000000 Binary files a/docs/assets/favicon.ico and /dev/null differ diff --git a/docs/assets/mirantis-logo-inverted-horizontal-one-color.svg b/docs/assets/mirantis-logo-inverted-horizontal-one-color.svg deleted file mode 100644 index c1f56f818..000000000 --- a/docs/assets/mirantis-logo-inverted-horizontal-one-color.svg +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/aws/cloudformation.md b/docs/aws/cloudformation.md deleted file mode 100644 index f14db966d..000000000 --- a/docs/aws/cloudformation.md +++ /dev/null @@ -1,21 +0,0 @@ -# AWS IAM setup - -Before launching a cluster on AWS, it's crucial to set up your AWS infrastructure provider: - -> Note. Skip steps below if you've already configured IAM policy for your account - -1. In order to use clusterawsadm you must have an administrative user in an AWS account. Once you have that - administrator user you need to set your environment variables: - -``` -export AWS_REGION= -export AWS_ACCESS_KEY_ID= -export AWS_SECRET_ACCESS_KEY= -export AWS_SESSION_TOKEN= # Optional. If you are using Multi-Factor Auth. -``` - -2. After these are set run this command to create IAM cloud formation stack: - -``` -clusterawsadm bootstrap iam create-cloudformation-stack -``` diff --git a/docs/aws/cluster-parameters.md b/docs/aws/cluster-parameters.md deleted file mode 100644 index 35bde3e8e..000000000 --- a/docs/aws/cluster-parameters.md +++ /dev/null @@ -1,80 +0,0 @@ -# AWS cluster parameters - -## Software prerequisites - -1. `clusterawsadm` CLI installed locally. -2. `AWS_B64ENCODED_CREDENTIALS` environment variable to be exported. -See [AWS credentials](credentials.md#aws-credentials-configuration) (p. 1-3) - -## AWS AMI - -By default AMI id will be looked up automatically (latest Amazon Linux 2 image -will be used). - -You can override lookup parameters to search your desired image automatically or -use AMI ID directly. -If both AMI ID and lookup paramters are defined AMI ID will have higher precedence. - -### Image lookup - -To configure automatic AMI lookup 3 parameters are used: - -`.imageLookup.format` - used directly as value for the `name` filter -(see the [describe-images filters](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html#describe-images)). -Supports substitutions for `{{.BaseOS}}` and `{{.K8sVersion}}` with the base OS -and kubernetes version, respectively. - -`.imageLookup.org` - AWS org ID which will be used as value for the `owner-id` -filter. - -`.imageLookup.baseOS` - will be used as value for `{{.BaseOS}}` substitution in -the `.imageLookup.format` string. - -### AMI ID - -AMI ID can be directly used in the `.amiID` parameter. - -#### CAPA prebuilt AMIs - -Use `clusterawsadm` to get available AMIs to deploy managed cluster: - -```bash -clusterawsadm ami list -``` - -For details, see [Pre-built Kubernetes AMIs](https://cluster-api-aws.sigs.k8s.io/topics/images/built-amis.html). - -## SSH access to cluster nodes - -To access the nodes using the SSH protocol, several things should be configured: - -- An SSH key added in the region where you want to deploy the cluster -- Bastion host is enabled - -### SSH keys - -Only one SSH key is supported and it should be added in AWS prior to creating -the `ManagedCluster` object. The name of the key should then be placed under `.spec.config.sshKeyName`. - -The same SSH key will be used for all machines and a bastion host. - -To enable bastion you should add `.spec.config.bastion.enabled` option in the -`ManagedCluster` object to `true`. - -Full list of the bastion configuration options could be fould in [CAPA docs](https://cluster-api-aws.sigs.k8s.io/crd/#infrastructure.cluster.x-k8s.io/v1beta1.Bastion). - -The resulting `ManagedCluster` can look like this: - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: cluster-1 -spec: - template: aws-standalone-cp - config: - sshKeyName: foobar - bastion: - enabled: true -... -``` diff --git a/docs/aws/credentials.md b/docs/aws/credentials.md deleted file mode 100644 index 6f96dda16..000000000 --- a/docs/aws/credentials.md +++ /dev/null @@ -1,35 +0,0 @@ -# AWS Credentials configuration - -1. Ensure AWS user has enough permissions to deploy cluster. Ensure that these policies were attached to the AWS user: - -* `control-plane.cluster-api-provider-aws.sigs.k8s.io` -* `controllers.cluster-api-provider-aws.sigs.k8s.io` -* `nodes.cluster-api-provider-aws.sigs.k8s.io` - -2. Retrieve access key and export it as environment variable: - -``` -export AWS_REGION= -export AWS_ACCESS_KEY_ID= -export AWS_SECRET_ACCESS_KEY= -export AWS_SESSION_TOKEN= # Optional. If you are using Multi-Factor Auth. -``` - -3. Create the base64 encoded credentials using `clusterawsadm`: - -``` -export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile) -``` - -4. Create the secret with AWS variables: - -> By default, HMC fetches the AWS variables configuration from the `aws-variables` secret in the `hmc-system` -> namespace. If you want to change the name of the secret you should overwrite the configuration of the cluster -> API provider AWS in the HMC Management object. \ -> For details, see: [Extended Management Configuration](../../README.md#extended-management-configuration) - -> You can also provide additional configuration variables, but the `AWS_B64ENCODED_CREDENTIALS` parameter is required. - -``` -kubectl create secret generic aws-variables -n hmc-system --from-literal AWS_B64ENCODED_CREDENTIALS="$AWS_B64ENCODED_CREDENTIALS" -``` diff --git a/docs/aws/hosted-control-plane.md b/docs/aws/hosted-control-plane.md deleted file mode 100644 index 19ea7919c..000000000 --- a/docs/aws/hosted-control-plane.md +++ /dev/null @@ -1,137 +0,0 @@ -# Hosted control plane (k0smotron) deployment - -## Prerequisites - -- Management Kubernetes cluster (v1.28+) deployed on AWS with HMC installed on it -- Default storage class configured on the management cluster -- VPC id for the worker nodes -- Subnet ID which will be used along with AZ information -- AMI id which will be used to deploy worker nodes - -Keep in mind that all control plane components for all managed clusters will -reside in the management cluster. - -## Networking - -The networking resources in AWS which are needed for a managed cluster can be -reused with a management cluster. - -If you deployed your AWS Kubernetes cluster using Cluster API Provider AWS (CAPA) -you can obtain all the necessary data with the commands below or use the -template found below in the -[HMC ManagedCluster manifest -generation](#hmc-managed-cluster-manifest-generation) section. - -If using the `aws-standalone-cp` template to deploy a hosted cluster it is -recommended to use a `t3.large` or larger instance type as the `hmc-controller` -and other provider controllers will need a large amount of resources to run. - -**VPC ID** - -```bash - kubectl get awscluster -o go-template='{{.spec.network.vpc.id}}' -``` - -**Subnet ID** - -```bash - kubectl get awscluster -o go-template='{{(index .spec.network.subnets 0).resourceID}}' -``` - -**Availability zone** - -```bash - kubectl get awscluster -o go-template='{{(index .spec.network.subnets 0).availabilityZone}}' -``` - -**Security group** -```bash - kubectl get awscluster -o go-template='{{.status.networkStatus.securityGroups.node.id}}' -``` - -**AMI id** - -```bash - kubectl get awsmachinetemplate -worker-mt -o go-template='{{.spec.template.spec.ami.id}}' -``` - -If you want to use different VPCs/regions for your management or managed clusters -you should setup additional connectivity rules like [VPC peering](https://docs.aws.amazon.com/whitepapers/latest/building-scalable-secure-multi-vpc-network-infrastructure/vpc-peering.html). - - -## HMC ManagedCluster manifest - -With all the collected data your `ManagedCluster` manifest will look similar to this: - -```yaml - apiVersion: hmc.mirantis.com/v1alpha1 - kind: ManagedCluster - metadata: - name: aws-hosted-cp - spec: - template: aws-hosted-cp - config: - vpcID: vpc-0a000000000000000 - region: us-west-1 - publicIP: true - subnets: - - id: subnet-0aaaaaaaaaaaaaaaa - availabilityZone: us-west-1b - amiID: ami-0bfffffffffffffff - instanceType: t3.medium - securityGroupIDs: - - sg-0e000000000000000 -``` - -> [!NOTE] -> In this example we're using the `us-west-1` region, but you should use the region of your VPC. - -## HMC ManagedCluster manifest generation - -Grab the following `ManagedCluster` manifest template and save it to a file named `managedcluster.yaml.tpl`: - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: aws-hosted -spec: - template: aws-hosted-cp - config: - vpcID: "{{.spec.network.vpc.id}}" - region: "{{.spec.region}}" - subnets: - - id: "{{(index .spec.network.subnets 0).resourceID}}" - availabilityZone: "{{(index .spec.network.subnets 0).availabilityZone}}" - amiID: ami-0bf2d31c356e4cb25 - instanceType: t3.medium - securityGroupIDs: - - "{{.status.networkStatus.securityGroups.node.id}}" -``` - -Then run the following command to create the `managedcluster.yaml`: - -``` -kubectl get awscluster cluster -o go-template="$(cat managedcluster.yaml.tpl)" > managedcluster.yaml -``` -## Deployment Tips -* Ensure HMC templates and the controller image are somewhere public and - fetchable. -* For installing the HMC charts and templates from a custom repository, load - the `kubeconfig` from the cluster and run the commands: - -``` -KUBECONFIG=kubeconfig IMG="ghcr.io/mirantis/hmc/controller-ci:v0.0.1-179-ga5bdf29" REGISTRY_REPO="oci://ghcr.io/mirantis/hmc/charts-ci" make dev-apply -KUBECONFIG=kubeconfig make dev-templates -``` -* The infrastructure will need to manually be marked `Ready` to get the - `MachineDeployment` to scale up. You can patch the `AWSCluster` kind using - the command: - -``` -KUBECONFIG=kubeconfig kubectl patch AWSCluster --type=merge --subresource status --patch 'status: {ready: true}' -n hmc-system -``` - -For additional information on why this is required [click here](https://docs.k0smotron.io/stable/capi-aws/#:~:text=As%20we%20are%20using%20self%2Dmanaged%20infrastructure%20we%20need%20to%20manually%20mark%20the%20infrastructure%20ready.%20This%20can%20be%20accomplished%20using%20the%20following%20command). - - diff --git a/docs/aws/main.md b/docs/aws/main.md deleted file mode 100644 index 275919cbe..000000000 --- a/docs/aws/main.md +++ /dev/null @@ -1,18 +0,0 @@ -# Prepare the AWS infra provider - -## Software prerequisites - -1. `kubectl` CLI installed locally. -2. `clusterawsadm` CLI installed locally. - -## Configure AWS IAM - -Follow the AWS IAM [setup quide](cloudformation.md#aws-iam-setup). - -## Configure AWS credentials - -Follow the AWS Credentials [configuration quide](credentials.md#aws-credentials-configuration). - -## AWS cluster parameters - -Follow the [AWS Cluster Parameters guide](cluster-parameters.md#aws-cluster-parameters) diff --git a/docs/aws/nuke.md b/docs/aws/nuke.md deleted file mode 100644 index 55a46c33d..000000000 --- a/docs/aws/nuke.md +++ /dev/null @@ -1,7 +0,0 @@ -# Nuking AWS resources -If you'd like to forcefully cleanup all AWS resources created by HMC you can use -the following command: - -``` -CLUSTER_NAME= make dev-aws-nuke -``` diff --git a/docs/azure/cluster-parameters.md b/docs/azure/cluster-parameters.md deleted file mode 100644 index 410b1fa1d..000000000 --- a/docs/azure/cluster-parameters.md +++ /dev/null @@ -1,82 +0,0 @@ -# Azure cluster parameters - -## Prerequisites - -- Azure CLI installed -- `az login` command executed - -## Cluster Identity - -To provide credentials for CAPI Azure provider (CAPZ) the `AzureClusterIdentity` -resource must be created. This should be done before provisioning any clusters. - - -To create the `AzureClusterIdentity` you should first get the desired -`SubscriptionID` by executing `az account list -o table` which will return list -of subscriptions available to user. - -Then you need to create service principal which will be used by CAPZ to interact -with Azure API. To do so you need to execute the following command: - -```bash -az ad sp create-for-rbac --role contributor --scopes="/subscriptions/" -``` - -The command will return json with the credentials for the service principal which -will look like this: - -```json -{ - "appId": "29a3a125-7848-4ce6-9be9-a4b3eecca0ff", - "displayName": "azure-cli", - "password": "u_RANDOMHASH", - "tenant": "2f10bc28-959b-481f-b094-eb043a87570a", -} -``` - -*Note: make sure to save this credentials and treat them like passwords.* - -With the data from the json you can now create the `AzureClusterIdentity` object -and it's secret. - -The objects created with the data above can look something like this: - -**Secret**: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: az-cluster-identity-secret - namespace: hmc-system -stringData: - clientSecret: u_RANDOMHASH -type: Opaque -``` - -**AzureClusterIdentity**: - -```yaml -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureClusterIdentity -metadata: - labels: - clusterctl.cluster.x-k8s.io/move-hierarchy: "true" - name: az-cluster-identity - namespace: hmc-system -spec: - allowedNamespaces: {} - clientID: 29a3a125-7848-4ce6-9be9-a4b3eecca0ff - clientSecret: - name: az-cluster-identity-secret - namespace: hmc-system - tenantID: 2f10bc28-959b-481f-b094-eb043a87570a - type: ServicePrincipal -``` - -These objects then should be referenced in the `ManagedCluster` object in the -`.spec.config.clusterIdentity` field. - -Subscription ID which was used to create service principal should be the -same that will be used in the `.spec.config.subscriptionID` field of the -`ManagedCluster` object. diff --git a/docs/azure/hosted-control-plane.md b/docs/azure/hosted-control-plane.md deleted file mode 100644 index ce39a96ce..000000000 --- a/docs/azure/hosted-control-plane.md +++ /dev/null @@ -1,162 +0,0 @@ -# Hosted control plane (k0smotron) deployment - -## Prerequisites - -- Management Kubernetes cluster (v1.28+) deployed on Azure with HMC installed - on it -- Default storage class configured on the management cluster - -Keep in mind that all control plane components for all managed clusters will -reside in the management cluster. - -## Pre-existing resources - -Certain resources will not be created automatically in a hosted control plane -scenario thus they should be created in advance and provided in the `ManagedCluster` -object. You can reuse these resources with management cluster as described -below. - -If you deployed your Azure Kubernetes cluster using Cluster API Provider Azure -(CAPZ) you can obtain all the necessary data with the commands below: - -**Location** - -```bash -kubectl get azurecluster -o go-template='{{.spec.location}}' -``` - -**Subscription ID** - -```bash -kubectl get azurecluster -o go-template='{{.spec.subscriptionID}}' -``` - -**Resource group** - -```bash -kubectl get azurecluster -o go-template='{{.spec.resourceGroup}}' -``` - -**vnet name** - -```bash -kubectl get azurecluster -o go-template='{{.spec.networkSpec.vnet.name}}' -``` - -**Subnet name** - -```bash -kubectl get azurecluster -o go-template='{{(index .spec.networkSpec.subnets 1).name}}' -``` - -**Route table name** - -```bash -kubectl get azurecluster -o go-template='{{(index .spec.networkSpec.subnets 1).routeTable.name}}' -``` - -**Security group name** - -```bash -kubectl get azurecluster -o go-template='{{(index .spec.networkSpec.subnets 1).securityGroup.name}}' -``` - - - -## HMC ManagedCluster manifest - -With all the collected data your `ManagedCluster` manifest will look similar to this: - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: azure-hosted-cp -spec: - template: azure-hosted-cp - config: - location: "westus" - subscriptionID: ceb131c7-a917-439f-8e19-cd59fe247e03 - vmSize: Standard_A4_v2 - clusterIdentity: - name: az-cluster-identity - namespace: hmc-system - resourceGroup: mgmt-cluster - network: - vnetName: mgmt-cluster-vnet - nodeSubnetName: mgmt-cluster-node-subnet - routeTableName: mgmt-cluster-node-routetable - securityGroupName: mgmt-cluster-node-nsg - tenantID: 7db9e0f2-c88a-4116-a373-9c8b6cc9d5eb - clientID: 471f65fa-ddee-40b4-90ae-da1a8a114ee1 - clientSecret: "u_RANDOM" -``` - -To simplify creation of the ManagedCluster object you can use the template below: - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: azure-hosted-cp -spec: - template: azure-hosted-cp - config: - location: "{{.spec.location}}" - subscriptionID: "{{.spec.subscriptionID}}" - vmSize: Standard_A4_v2 - clusterIdentity: - name: az-cluster-identity - namespace: hmc-system - resourceGroup: "{{.spec.resourceGroup}}" - network: - vnetName: "{{.spec.networkSpec.vnet.name}}" - nodeSubnetName: "{{(index .spec.networkSpec.subnets 1).name}}" - routeTableName: "{{(index .spec.networkSpec.subnets 1).routeTable.name}}" - securityGroupName: "{{(index .spec.networkSpec.subnets 1).securityGroup.name}}" - tenantID: 7db9e0f2-c88a-4116-a373-9c8b6cc9d5eb - clientID: 471f65fa-ddee-40b4-90ae-da1a8a114ee1 - clientSecret: "u_RANDOM" -``` - -Then you can render it using the command: - -```bash -kubectl get azurecluster -o go-template="$(cat template.yaml)" -``` - -## Cluster creation - -After applying `ManagedCluster` object you require to manually set the status of the -`AzureCluster` object due to current limitations (see k0sproject/k0smotron#668). - -To do so you need to execute the following command: - -```bash -kubectl patch azurecluster --type=merge --subresource status --patch 'status: {ready: true}' -``` - -## Important notes on the cluster deletion - -Because of the aforementioned limitation you also need to make manual steps in -order to properly delete cluster. - -Before removing the cluster make sure to place custom finalizer onto -`AzureCluster` object. This is needed to prevent it from being deleted instantly -which will cause cluster deletion to stuck indefinitely. - -To place finalizer you can execute the following command: - -```bash -kubectl patch azurecluster --type=merge --patch 'metadata: {finalizers: [manual]}' -``` - -When finalizer is placed you can remove the `ManagedCluster` as usual. Check that -all `AzureMachines` objects are deleted successfully and remove finalizer you've -placed to finish cluster deletion. - -In case if have orphaned `AzureMachines` left you have to delete finalizers on -them manually after making sure that no VMs are present in Azure. - -*Note: since Azure admission prohibits orphaned objects mutation you'll have to -disable it by deleting it's `mutatingwebhookconfiguration`* diff --git a/docs/azure/machine-parameters.md b/docs/azure/machine-parameters.md deleted file mode 100644 index 6a5bf383f..000000000 --- a/docs/azure/machine-parameters.md +++ /dev/null @@ -1,61 +0,0 @@ -# Azure machine parameters - -## SSH - -SSH public key can be passed to `.spec.config.sshPublicKey` (in case of hosted CP) -parameter or `.spec.config.controlPlane.sshPublicKey` and -`.spec.config.worker.sshPublicKey` parameters (in case of standalone CP) -of the `ManagedCluster` object. - -It should be encoded in **base64** format. - -## VM size - -Azure supports various VM sizes which can be retrieved with the following -command: - -```bash -az vm list-sizes --location "" -o table -``` - -Then desired VM size could be passed to the: - -- `.spec.config.vmSize` - for hosted CP deployment. -- `.spec.config.controlPlane.vmSize` - for control plane nodes in the standalone - deployment. -- `.spec.config.worker.vmSize` - for worker nodes in the standalone deployment. - -*Example: Standard_A4_v2* - -## Root Volume size - -Root volume size of the VM (in GB) can be changed through the following -parameters: - -- `.spec.config.rootVolumeSize` - for hosted CP deployment. -- `.spec.config.controlPlane.rootVolumeSize` - for control plane nodes in the - standalone deployment. -- `.spec.config.worker.rootVolumeSize` - for worker nodes in the standalone - deployment. - -*Default value: 30* - -Please note that this value can't be less than size of the root volume which -defined in your image. - -## VM Image - -You can define the image which will be used for you machine using the following -parameters: - -- `.spec.config.image` - for hosted CP deployment. -- `.spec.config.controlPlane.image` - for control plane nodes in the standalone - deployment. -- `.spec.config.worker.image` - for worker nodes in the standalone deployment. - -There are multiple self-excluding ways to define the image source (e.g. Azure -Compute Gallery, Azure Marketplace, etc.). - -Detailed information regarding image can be found in [CAPZ documentation](https://capz.sigs.k8s.io/self-managed/custom-images) - -By default, the latest official CAPZ Ubuntu based image is used. diff --git a/docs/azure/main.md b/docs/azure/main.md deleted file mode 100644 index 0560d3b99..000000000 --- a/docs/azure/main.md +++ /dev/null @@ -1,35 +0,0 @@ -# Azure provider - -## Prerequisites - -1. `kubectl` CLI installed locally. -2. `az` CLI utility installed (part of `azure-cli`). -3. Azure account with the required resource providers registered. - -## Resource providers registration - -The following resource providers should be registered in your Azure account: - -- `Microsoft.Compute` -- `Microsoft.Network` -- `Microsoft.ContainerService` -- `Microsoft.ManagedIdentity` -- `Microsoft.Authorization` - -You can follow the [official documentation guide](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types) -to register the providers. - -## Azure cluster parameters - -Follow the [Azure cluster parameters](cluster-parameters.md) guide to setup -mandatory parameters for Azure clusters. - -## Azure machine parameters - -Follow the [Azure machine parameters](machine-parameters.md) guide if you want to -setup/modify the default machine parameters. - -## Azure hosted control plane - -Follow the [Hosted control plane](hosted-control-plane.md) guide to deploy -hosted control plane cluster on Azure. diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 04f5b8021..000000000 --- a/docs/index.md +++ /dev/null @@ -1,28 +0,0 @@ -# Welcome to HMC Docs - - -# Introduction - -HMC is part of Mirantis Project 2A and is focused and developing a consistent -way to deploy and manage kubernetes clusters at scale. More informaiton can be -found [here](./introduction.md) - - -## Providers -HMC leverages the Cluster API provider ecosystem, the following providers have -had templates created and validated - - - * [AWS](./aws/main.md) - * [Azure](./azure/main.md) - * [Vsphere](./vsphere/main.md) - - - - -## Project layout - - mkdocs.yml # The configuration file. - docs/ - index.md # The documentation homepage. - \ No newline at end of file diff --git a/docs/introduction.md b/docs/introduction.md deleted file mode 100644 index b647609a1..000000000 --- a/docs/introduction.md +++ /dev/null @@ -1,19 +0,0 @@ -# Project Overview - -```mermaid ---- -title: HMC Overview ---- -erDiagram - USER ||--o{ HMC : uses - USER ||--o{ Template : assigns - Template ||--o{ HMC : "used by" - HMC ||--o{ CAPI : connects - CAPI ||--|{ CAPV : provider - CAPI ||--|{ CAPA : provider - CAPI ||--|{ CAPZ : provider - CAPI ||--|{ K0smotron : Bootstrap - K0smotron |o..o| CAPV : uses - K0smotron |o..o| CAPA : uses - K0smotron |o..o| CAPZ : uses -``` \ No newline at end of file diff --git a/docs/mk-docs-setup.md b/docs/mk-docs-setup.md deleted file mode 100644 index 998a4d409..000000000 --- a/docs/mk-docs-setup.md +++ /dev/null @@ -1,42 +0,0 @@ -# HMC MKdocs Setup - -## Project layout - - mkdocs.yml # The configuration file. - docs/ - index.md # The documentation homepage. - stylesheets # CSS stylesheets to control look and feel - assets # Images and other served material - ... # Other markdown pages, images and other files. - - -## Setting up MKdocs and dependancies - -1. Setup python Virtual Environment - - `python3 -m venv ./mkdocs` - `source ./mkdocs/bin/activate` - -2. Install MkDocs - - `pip install mkdocs` - -3. Install plugins - - `pip install mkdocs-mermaid2-plugin` - - `pip install mkdocs-material` - -## Run MKdocs for dev - -* `mkdocs serve` - Start the live-reloading docs server. - -For full documentation visit [mkdocs.org](https://www.mkdocs.org). - -## MKdocs Commands - -* `mkdocs new [dir-name]` - Create a new project. -* `mkdocs serve` - Start the live-reloading docs server. -* `mkdocs build` - Build the documentation site. -* `mkdocs -h` - Print help message and exit. - diff --git a/docs/quick-start.md b/docs/quick-start.md deleted file mode 100644 index 39e61326c..000000000 --- a/docs/quick-start.md +++ /dev/null @@ -1,16 +0,0 @@ -# Mirantis Hybrid Cloud Platform - -## Installation - -### TLDR - - kubectl apply -f https://github.com/Mirantis/hmc/releases/download/v0.0.1/install.yaml - -or install using `helm` - - helm install hmc oci://ghcr.io/mirantis/hmc/charts/hmc --version v0.0.1 -n hmc-system --create-namespace - - -> Note: The HMC installation using Kubernetes manifests does not allow customization of the deployment. To apply a custom HMC configuration, install HMC using the Helm chart. -> deployment. If the custom HMC configuration should be applied, install HMC using -> the Helm chart. \ No newline at end of file diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css deleted file mode 100644 index e65d3c741..000000000 --- a/docs/stylesheets/extra.css +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2024 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -:root { - --md-primary-fg-color: #4ba0f3; - --md-primary-fg-color--light: #4ba0f3; - --md-primary-fg-color--dark: #4ba0f3; - } - .md-header__button.md-logo img { - height: fit-content; /* Adjust the size as needed */ - } - .md-header__title { - font-size: 1.1rem; - } - .md-nav { - font-size: .8rem; - font-weight: 800; - line-height: 1.3; - } - .md-typeset h1, .md-typeset h2 { - font-weight: 500; - letter-spacing: -.01em; - } - .md-typeset h3 { - font-weight: 500; -} diff --git a/docs/templates/main.md b/docs/templates/main.md deleted file mode 100644 index b81bb72cc..000000000 --- a/docs/templates/main.md +++ /dev/null @@ -1,132 +0,0 @@ -# Templates system - -By default, Hybrid Container Cloud delivers a set of default `Template` objects. You can also build your own templates -and use them for deployment. - -## Custom deployment Templates - -> At the moment all `Templates` should reside in the `hmc-system` namespace. But they can be referenced -> by `ManagedClusters` from any namespace. - -Here are the instructions on how to bring your own Template to HMC: - -1. Create a [HelmRepository](https://fluxcd.io/flux/components/source/helmrepositories/) object containing the URL to the -external Helm repository. Label it with `hmc.mirantis.com/managed: "true"`. -2. Create a [HelmChart](https://fluxcd.io/flux/components/source/helmcharts/) object referencing the `HelmRepository` as a -`sourceRef`, specifying the name and version of your Helm chart. Label it with `hmc.mirantis.com/managed: "true"`. -3. Create a `Template` object in `hmc-system` namespace referencing this helm chart in `spec.helm.chartRef`. -`chartRef` is a field of the -[CrossNamespaceSourceReference](https://fluxcd.io/flux/components/helm/api/v2/#helm.toolkit.fluxcd.io/v2.CrossNamespaceSourceReference) kind. - -Here is an example of a custom `Template` with the `HelmChart` reference: - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1 -kind: HelmRepository -metadata: - name: custom-templates-repo - namespace: hmc-system - labels: - hmc.mirantis.com/managed: "true" -spec: - insecure: true - interval: 10m0s - provider: generic - type: oci - url: oci://ghcr.io/external-templates-repo/charts -``` - -```yaml -apiVersion: source.toolkit.fluxcd.io/v1 -kind: HelmChart -metadata: - name: custom-template-chart - namespace: hmc-system - labels: - hmc.mirantis.com/managed: "true" -spec: - interval: 5m0s - chart: custom-template-chart-name - reconcileStrategy: ChartVersion - sourceRef: - kind: HelmRepository - name: custom-templates-repo - version: 0.2.0 -``` - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: Template -metadata: - name: os-k0smotron - namespace: hmc-system -spec: - type: deployment - providers: - infrastructure: - - openstack - bootstrap: - - k0s - controlPlane: - - k0smotron - helm: - chartRef: - kind: HelmChart - name: custom-template-chart - namespace: default -``` - -The `Template` should follow the rules mentioned below: -1. `spec.type` should be `deployment` (as an alternative, the referenced helm chart may contain the -`hmc.mirantis.com/type: deployment` annotation in `Chart.yaml`). -2. `spec.providers` should contain the list of required Cluster API providers: `infrastructure`, `bootstrap` and -`controlPlane`. As an alternative, the referenced helm chart may contain the specific annotations in the -`Chart.yaml` (value is a list of providers divided by comma). These fields are only used for validation. For example: - -`Template` spec: - -```yaml -spec: - providers: - infrastructure: - - aws - bootstrap: - - k0s - controlPlane: - - k0smotron -``` - -`Chart.yaml`: - -```bash -annotations: - hmc.mirantis.com/infrastructure-providers: aws - hmc.mirantis.com/controlplane-providers: k0smotron - hmc.mirantis.com/bootstrap-providers: k0s -``` - -## Remove Templates shipped with HMC - -If you need to limit the cluster templates that exist in your HMC installation, follow the instructions below: - -1. Get the list of `deployment` Templates shipped with HMC: - -```bash -kubectl get templates -n hmc-system -l helm.toolkit.fluxcd.io/name=hmc-templates | grep deployment -``` - -Example output: -```bash -aws-hosted-cp deployment true -aws-standalone-cp deployment true -``` - -2. Remove the templates from the list: - -```bash -kubectl delete template -n hmc-system - -``` - diff --git a/docs/vsphere/cluster-parameters.md b/docs/vsphere/cluster-parameters.md deleted file mode 100644 index c52f44d0c..000000000 --- a/docs/vsphere/cluster-parameters.md +++ /dev/null @@ -1,129 +0,0 @@ -# vSphere cluster parameters - -## Prerequisites - -- vSphere provider [prerequisites](main.md#prerequisites) are complete. - -## Cluster Identity - -To provide credentials for CAPI vSphere provider (CAPV) the -`VSphereClusterIdentity` resource must be created. This should be done before -provisioning any clusters. - -To create cluster identity you'll only need username and password for your -vSphere instance. - -The example of the objects can be found below: - -**Secret**: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: vsphere-cluster-identity-secret - namespace: hmc-system -stringData: - username: user - password: Passw0rd -``` - -**VsphereClusterIdentity**: - -```yaml -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: VSphereClusterIdentity -metadata: - name: vsphere-cluster-identity - namespace: hmc-system -spec: - secretName: vsphere-cluster-identity-secret - allowedNamespaces: - selector: - matchLabels: {} -``` - -These objects then should be referenced in the `ManagedCluster` object in the -`.spec.config.clusterIdentity` field. - - -## ManagedCluster parameters - -To deploy managed cluster a number of parameters should be passed to the -`ManagedCluster` object. - -### Parameter list - -The following is the list of vSphere specific parameters, which are _required_ -for successful cluster creation. - -| Parameter | Example | Description | -|-------------------------------------|---------------------------------------|------------------------------------| -| `.spec.config.vsphere.server` | `vcenter.example.com` | Address of the vSphere server | -| `.spec.config.vsphere.thumbprint` | `"00:00:00"` | Certificate thumbprint | -| `.spec.config.vsphere.datacenter` | `DC` | Datacenter name | -| `.spec.config.vsphere.datastore` | `/DC/datastore/DS` | Datastore path | -| `.spec.config.vsphere.resourcePool` | `/DC/host/vCluster/Resources/ResPool` | Resource pool path | -| `.spec.config.vsphere.folder` | `/DC/vm/example` | vSphere folder path | -| `.spec.config.vsphere.username` | `user` | Username for your vSphere instance | -| `.spec.config.vsphere.password` | `password` | Password for your vSphere instance | - -_You can check [machine parameters](machine-parameters.md) for machine specific -parameters._ - -To obtain vSphere certificate thumbprint you can use the following command: - -```bash -curl -sw %{certs} https://vcenter.example.com | openssl x509 -sha256 -fingerprint -noout | awk -F '=' '{print $2}' -``` - -Username and password currently must be passed once more in the `ManagedCluster` -object to provide proper authentication for CCM and CSI driver. - -## Example of ManagedCluster CR - -With all above parameters provided your `ManagedCluster` can look like this: - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: cluster-1 -spec: - template: vsphere-standalone-cp - config: - clusterIdentity: - name: vsphere-cluster-identity - vsphere: - server: vcenter.example.com - thumbprint: "00:00:00" - datacenter: "DC" - datastore: "/DC/datastore/DC" - resourcePool: "/DC/host/vCluster/Resources/ResPool" - folder: "/DC/vm/example" - username: "user" - password: "Passw0rd" - controlPlaneEndpointIP: "172.16.0.10" - - controlPlane: - ssh: - user: ubuntu - publicKey: | - ssh-rsa AAA... - rootVolumeSize: 50 - cpus: 2 - memory: 4096 - vmTemplate: "/DC/vm/template" - network: "/DC/network/Net" - - worker: - ssh: - user: ubuntu - publicKey: | - ssh-rsa AAA... - rootVolumeSize: 50 - cpus: 2 - memory: 4096 - vmTemplate: "/DC/vm/template" - network: "/DC/network/Net" -``` diff --git a/docs/vsphere/hosted-control-plane.md b/docs/vsphere/hosted-control-plane.md deleted file mode 100644 index 15ab4fee7..000000000 --- a/docs/vsphere/hosted-control-plane.md +++ /dev/null @@ -1,61 +0,0 @@ -# Hosted control plane (k0smotron) deployment - -## Prerequisites - -- Management Kubernetes cluster (v1.28+) deployed on vSphere with HMC installed - on it - -Keep in mind that all control plane components for all managed clusters will -reside in the management cluster. - - -## ManagedCluster manifest - -Hosted CP template has mostly identical parameters with standalone CP, you can -check them in the [cluster parameters](cluster-parameters.md) and the -[machine parameters](machine-parameters.md) sections. - -**Important note on control plane endpoint IP** - -Since vSphere provider requires that user will provide control plane endpoint IP -before deploying the cluster you should make sure that this IP will be the same -that will be assigned to the k0smotron LB service. Thus you must provide control -plane endpoint IP to the k0smotron service via annotation which is accepted by -your LB provider (in the following example `kube-vip` annotation is used) - -```yaml -apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster -metadata: - name: cluster-1 -spec: - template: vsphere-hosted-cp - config: - clusterIdentity: - name: vsphere-cluster-identity - vsphere: - server: vcenter.example.com - thumbprint: "00:00:00" - datacenter: "DC" - datastore: "/DC/datastore/DC" - resourcePool: "/DC/host/vCluster/Resources/ResPool" - folder: "/DC/vm/example" - username: "user" - password: "Passw0rd" - controlPlaneEndpointIP: "172.16.0.10" - - ssh: - user: ubuntu - publicKey: | - ssh-rsa AAA... - rootVolumeSize: 50 - cpus: 2 - memory: 4096 - vmTemplate: "/DC/vm/template" - network: "/DC/network/Net" - - k0smotron: - service: - annotations: - kube-vip.io/loadbalancerIPs: "172.16.0.10" -``` diff --git a/docs/vsphere/machine-parameters.md b/docs/vsphere/machine-parameters.md deleted file mode 100644 index f69fb7991..000000000 --- a/docs/vsphere/machine-parameters.md +++ /dev/null @@ -1,55 +0,0 @@ -# vSphere machine parameters - -## SSH - -Currently SSH configuration on vSphere expects that user is already created -during template creation. Because of that you must pass username along with SSH -public key to configure SSH access. - - -SSH public key can be passed to `.spec.config.ssh.publicKey` (in case of -hosted CP) parameter or `.spec.config.controlPlane.ssh.publicKey` and -`.spec.config.worker.ssh.publicKey` parameters (in case of standalone CP) of the -`ManagedCluster` object. - -SSH public key must be passed literally as a string. - -Username can be passed to `.spec.config.controlPlane.ssh.user`, -`.spec.config.worker.ssh.user` or `.spec.config.ssh.user` depending on you -deployment model. - -## VM resources - -The following parameters are used to define VM resources: - -| Parameter | Example | Description | -|-------------------|---------|----------------------------------------------------------------------| -| `.rootVolumeSize` | `50` | Root volume size in GB (can't be less than one defined in the image) | -| `.cpus` | `2` | Number of CPUs | -| `.memory` | `4096` | Memory size in MB | - -The resource parameters are the same for hosted and standalone CP deployments, -but they are positioned differently in the spec, which means that they're going to: - -- `.spec.config` in case of hosted CP deployment. -- `.spec.config.controlPlane` in in case of standalone CP for control plane - nodes. -- `.spec.config.worker` in in case of standalone CP for worker nodes. - -## VM Image and network - -To provide image template path and network path the following parameters must be -used: - -| Parameter | Example | Description | -|---------------|-------------------|---------------------| -| `.vmTemplate` | `/DC/vm/template` | Image template path | -| `.network` | `/DC/network/Net` | Network path | - -As with resource parameters the position of these parameters in the -`ManagedCluster` depends on deployment type and these parameters are used in: - -- `.spec.config` in case of hosted CP deployment. -- `.spec.config.controlPlane` in in case of standalone CP for control plane - nodes. -- `.spec.config.worker` in in case of standalone CP for worker nodes. diff --git a/docs/vsphere/main.md b/docs/vsphere/main.md deleted file mode 100644 index e9cc841fe..000000000 --- a/docs/vsphere/main.md +++ /dev/null @@ -1,62 +0,0 @@ -# vSphere provider - -## Prerequisites - -1. `kubectl` CLI installed locally. -2. vSphere instance version `6.7.0` or higher. -3. vSphere account with appropriate privileges. -4. Image template. -5. vSphere network with DHCP enabled. - -### Image template - -You can use pre-buit image templates from [CAPV project](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/main/README.md#kubernetes-versions-with-published-ovas) -or build your own. - -When building your own image make sure that vmware tools and cloud-init are -installed and properly configured. - -You can follow [official open-vm-tools guide](https://docs.vmware.com/en/VMware-Tools/11.0.0/com.vmware.vsphere.vmwaretools.doc/GUID-C48E1F14-240D-4DD1-8D4C-25B6EBE4BB0F.html) -on how to correctly install vmware-tools. - -When setting up cloud-init you can refer to [official docs](https://cloudinit.readthedocs.io/en/latest/index.html) -and specifically [vmware datasource docs](https://cloudinit.readthedocs.io/en/latest/reference/datasources/vmware.html) -for extended information regarding cloud-init on vSphere. - -### vSphere network - -When creating network make sure that it has DHCP service. - -Also make sure that the part of your network is out of DHCP range (e.g. network -172.16.0.0/24 with DHCP range 172.16.0.100-172.16.0.254). This is needed to make -sure that LB services will not create any IP conflicts in the network. - -### vSphere privileges - -To function properly the user assigned to vSphere provider should be able to -manipulate vSphere resources. The following is the general overview of the -required privileges: - -- `Virtual machine` - full permissions are required -- `Network` - `Assign network` is sufficient -- `Datastore` - it should be possible for user to manipulate virtual machine - files and metadata - -In addition to that specific CSI driver permissions are required see -[the official doc](https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/2.0/vmware-vsphere-csp-getting-started/GUID-0AB6E692-AA47-4B6A-8CEA-38B754E16567.html) -to get more information on CSI specific permissions. - -## vSphere cluster parameters - -Follow the [vSphere cluster parameters](cluster-parameters.md) guide to setup -mandatory parameters for vSphere clusters. - -## vSphere machine parameters - -Follow the [vSphere machine parameters](machine-parameters.md) guide if you want -to setup/modify the default machine parameters. - -## vSphere hosted control plane - -Follow the [Hosted control plane](hosted-control-plane.md) guide to deploy -hosted control plane cluster on vSphere. diff --git a/go.mod b/go.mod index 30b04312e..a45203560 100644 --- a/go.mod +++ b/go.mod @@ -1,20 +1,22 @@ module github.com/Mirantis/hmc -go 1.22.0 +go 1.22.7 require ( github.com/a8m/envsubst v1.4.2 github.com/cert-manager/cert-manager v1.15.3 - github.com/fluxcd/helm-controller/api v1.0.1 + github.com/fluxcd/helm-controller/api v1.1.0 github.com/fluxcd/pkg/apis/meta v1.6.1 github.com/fluxcd/pkg/runtime v0.49.1 - github.com/fluxcd/source-controller/api v1.3.0 + github.com/fluxcd/source-controller/api v1.4.1 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 + github.com/projectsveltos/addon-controller v0.38.2 + github.com/projectsveltos/libsveltos v0.38.3 github.com/segmentio/analytics-go v3.1.0+incompatible gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.16.1 @@ -22,36 +24,39 @@ require ( k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 sigs.k8s.io/cluster-api v1.8.3 sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/yaml v1.4.0 ) require ( dario.cat/mergo v1.0.1 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect - github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/containerd v1.7.12 // indirect + github.com/chai2010/gettext-go v1.0.3 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/containerd v1.7.21 // indirect + github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.3.1 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v25.0.1+incompatible // indirect + github.com/docker/cli v27.2.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v26.1.5+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/docker v27.2.1+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -61,7 +66,7 @@ require ( github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fluxcd/pkg/apis/acl v0.3.0 // indirect - github.com/fluxcd/pkg/apis/kustomize v1.5.0 // indirect + github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect @@ -77,14 +82,14 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect + github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -104,12 +109,12 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect - github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -119,45 +124,46 @@ require ( github.com/opencontainers/image-spec v1.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.20.3 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rivo/uniseg v0.4.2 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/rubenv/sql-migrate v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/segmentio/backo-go v1.1.0 // indirect + github.com/sergi/go-diff v1.3.1 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.starlark.net v0.0.0-20240725214946-42030a7cedce // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.27.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/tools v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -166,13 +172,12 @@ require ( k8s.io/cli-runtime v0.31.1 // indirect k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect k8s.io/kubectl v0.31.1 // indirect - oras.land/oras-go v1.2.5 // indirect + oras.land/oras-go v1.2.6 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.3 // indirect sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index cad81aba1..df64d244b 100644 --- a/go.sum +++ b/go.sum @@ -2,12 +2,12 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -20,12 +20,10 @@ github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.6 h1:qEnZjoHXv+4/s0LmKZWE0/AiZmMWEIkFfWBSf1a0wlU= +github.com/Microsoft/hcsshim v0.12.6/go.mod h1:ZABCLVcvLMjIkzr9rUGcQ1QA0p0P3Ps+d3N1g2DsFfk= github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg= github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -42,47 +40,52 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cert-manager/cert-manager v1.15.3 h1:/u9T0griwd5MegPfWbB7v0KcVcT9OJrEvPNhc9tl7xQ= github.com/cert-manager/cert-manager v1.15.3/go.mod h1:stBge/DTvrhfQMB/93+Y62s+gQgZBsfL1o0C/4AL/mI= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= -github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= +github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/containerd v1.7.21 h1:USGXRK1eOC/SX0L195YgxTHb0a00anxajOzgfN0qrCA= +github.com/containerd/containerd v1.7.21/go.mod h1:e3Jz1rYRUZ2Lt51YrH9Rz0zPyJBOlSvB3ghr2jbVD8g= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= -github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= +github.com/cyphar/filepath-securejoin v0.3.2 h1:QhZu5AxQ+o1XZH0Ye05YzvJ0kAdK6VQc0z9NNMek7gc= +github.com/cyphar/filepath-securejoin v0.3.2/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= -github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/distribution/v3 v3.0.0-beta.1 h1:X+ELTxPuZ1Xe5MsD3kp2wfGUhc8I+MPfRis8dZ818Ic= +github.com/distribution/distribution/v3 v3.0.0-beta.1/go.mod h1:O9O8uamhHzWWQVTjuQpyYUVm/ShPHPUDgvQMpHGVBDs= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= -github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.2.1+incompatible h1:U5BPtiD0viUzjGAjV1p0MGB8eVA3L3cbIrnyWmSJI70= +github.com/docker/cli v27.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= -github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/docker v27.2.1+incompatible h1:fQdiLfW7VLscyoeYEBz7/J8soYFDZV1u6VW6gJEjNMI= +github.com/docker/docker v27.2.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -103,18 +106,18 @@ github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluxcd/helm-controller/api v1.0.1 h1:Gn9qEVuif6D5+gHmVwTEZkR4+nmLOcOhKx4Sw2gL2EA= -github.com/fluxcd/helm-controller/api v1.0.1/go.mod h1:/6AD5a2qjo/ttxVM8GR33syLZwqigta60DCLdy8GrME= +github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY= +github.com/fluxcd/helm-controller/api v1.1.0/go.mod h1:BgHMgMY6CWynzl4KIbHpd6Wpn3FN9BqgkwmvoKCp6iE= github.com/fluxcd/pkg/apis/acl v0.3.0 h1:UOrKkBTOJK+OlZX7n8rWt2rdBmDCoTK+f5TY2LcZi8A= github.com/fluxcd/pkg/apis/acl v0.3.0/go.mod h1:WVF9XjSMVBZuU+HTTiSebGAWMgM7IYexFLyVWbK9bNY= -github.com/fluxcd/pkg/apis/kustomize v1.5.0 h1:ah4sfqccnio+/5Edz/tVz6LetFhiBoDzXAElj6fFCzU= -github.com/fluxcd/pkg/apis/kustomize v1.5.0/go.mod h1:nEzhnhHafhWOUUV8VMFLojUOH+HHDEsL75y54mt/c30= +github.com/fluxcd/pkg/apis/kustomize v1.6.1 h1:22FJc69Mq4i8aCxnKPlddHhSMyI4UPkQkqiAdWFcqe0= +github.com/fluxcd/pkg/apis/kustomize v1.6.1/go.mod h1:5dvQ4IZwz0hMGmuj8tTWGtarsuxW0rWsxJOwC6i+0V8= github.com/fluxcd/pkg/apis/meta v1.6.1 h1:maLhcRJ3P/70ArLCY/LF/YovkxXbX+6sTWZwZQBeNq0= github.com/fluxcd/pkg/apis/meta v1.6.1/go.mod h1:YndB/gxgGZmKfqpAfFxyCDNFJFP0ikpeJzs66jwq280= github.com/fluxcd/pkg/runtime v0.49.1 h1:Xyruu1VvkaKZaAhm/32tHJnHab9aU3HzZCf+w6Xoq2A= github.com/fluxcd/pkg/runtime v0.49.1/go.mod h1:ieDaIEcxzVj77Nw64q4Vd3ZGYdLqpnXOr+GX+XwqTS4= -github.com/fluxcd/source-controller/api v1.3.0 h1:Z5Lq0aJY87yg0cQDEuwGLKS60GhdErCHtsi546HUt10= -github.com/fluxcd/source-controller/api v1.3.0/go.mod h1:+tfd0vltjcVs/bbnq9AlYR9AAHSVfM/Z4v4TpQmdJf4= +github.com/fluxcd/source-controller/api v1.4.1 h1:zV01D7xzHOXWbYXr36lXHWWYS7POARsjLt61Nbh3kVY= +github.com/fluxcd/source-controller/api v1.4.1/go.mod h1:gSjg57T+IG66SsBR0aquv+DFrm4YyBNpKIJVDnu3Ya8= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -162,10 +165,8 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -175,22 +176,25 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134 h1:c5FlPPgxOn7kJz3VoPLkQYQXGBS3EklQ4Zfi57uOuqQ= +github.com/google/pprof v0.0.0-20240910150728-a0b0bb1d4134/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -203,7 +207,10 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -225,8 +232,11 @@ github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2 github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -246,8 +256,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -261,10 +271,12 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= -github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -301,27 +313,37 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/projectsveltos/addon-controller v0.38.2 h1:Q23bgHRTYfN7jKvbR1vLftBOtU5mHrJhU3bDJ+b4pOw= +github.com/projectsveltos/addon-controller v0.38.2/go.mod h1:bRm51IhMphV5EYfX5SkaXuwrzxzNfhYzlNFra646uOo= +github.com/projectsveltos/libsveltos v0.38.3 h1:LcNeAwEJWuez9CP2Z5D++i9GUJUPQcqBllPLsCTRCCE= +github.com/projectsveltos/libsveltos v0.38.3/go.mod h1:FITu0ZxiB0lfPVfFkIKJ9rk/ZWDup1OTymiIX+uEXtw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= +github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= -github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= @@ -332,8 +354,8 @@ github.com/segmentio/analytics-go v3.1.0+incompatible h1:IyiOfUgQFVHvsykKKbdI7Zs github.com/segmentio/analytics-go v3.1.0+incompatible/go.mod h1:C7CYBtQWk4vRk2RyLu0qOcbHJ18E3F1HV2C/8JvKN48= github.com/segmentio/backo-go v1.1.0 h1:cJIfHQUdmLsd8t9IXqf5J8SdrOMn9vMa7cIvOavHAhc= github.com/segmentio/backo-go v1.1.0/go.mod h1:ckenwdf+v/qbyhVdNPWHnqh2YdJBED1O9cidYyM5J18= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -343,8 +365,9 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -353,6 +376,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -375,24 +399,42 @@ github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c h1:3lbZUMbMiGUW/LMkfsEAB github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c/go.mod h1:UrdRz5enIKZ63MEE3IF9l2/ebyx59GyGgPi+tICQdmM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= +go.opentelemetry.io/contrib/exporters/autoexport v0.46.1 h1:ysCfPZB9AjUlMa1UHYup3c9dAOCMQX/6sxSfPBUoxHw= +go.opentelemetry.io/contrib/exporters/autoexport v0.46.1/go.mod h1:ha0aiYm+DOPsLHjh0zoQ8W8sLT+LJ58J3j47lGpSLrU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= +go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 h1:dEZWPjVN22urgYCza3PXRUGEyCB++y1sAqm6guWFesk= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= +go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.starlark.net v0.0.0-20240725214946-42030a7cedce h1:YyGqCjZtGZJ+mRPaenEiB87afEO2MFRzLiJNZ0Z0bPw= +go.starlark.net v0.0.0-20240725214946-42030a7cedce/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -405,12 +447,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -419,8 +461,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -452,22 +494,26 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= @@ -475,6 +521,7 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -501,14 +548,14 @@ k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM= -k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= -oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.6 h1:z8cmxQXBU8yZ4mkytWqXfo6tZcamPwjsuxYU81xJ8Lk= +oras.land/oras-go v1.2.6/go.mod h1:OVPc1PegSEe/K8YiLfosrlqlqTN9PUyFvOw5Y9gwrT8= sigs.k8s.io/cluster-api v1.8.3 h1:N6i25rF5QMadwVg2UPfuO6CzmNXjqnF2r1MAO+kcsro= sigs.k8s.io/cluster-api v1.8.3/go.mod h1:pXv5LqLxuIbhGIXykyNKiJh+KrLweSBajVHHitPLyoY= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= diff --git a/internal/controller/credential_controller.go b/internal/controller/credential_controller.go new file mode 100644 index 000000000..53df59b11 --- /dev/null +++ b/internal/controller/credential_controller.go @@ -0,0 +1,95 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "errors" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" +) + +// CredentialReconciler reconciles a Credential object +type CredentialReconciler struct { + client.Client +} + +func (r *CredentialReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Credential reconcile start") + + cred := &hmc.Credential{} + if err := r.Client.Get(ctx, req.NamespacedName, cred); err != nil { + l.Error(err, "unable to fetch Credential") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + clIdty := &unstructured.Unstructured{} + clIdty.SetAPIVersion(cred.Spec.IdentityRef.APIVersion) + clIdty.SetKind(cred.Spec.IdentityRef.Kind) + clIdty.SetName(cred.Spec.IdentityRef.Name) + clIdty.SetNamespace(cred.Spec.IdentityRef.Namespace) + + if err := r.Client.Get(ctx, client.ObjectKey{ + Name: cred.Spec.IdentityRef.Name, + Namespace: cred.Spec.IdentityRef.Namespace, + }, clIdty); err != nil { + if apierrors.IsNotFound(err) { + stateErr := r.setState(ctx, cred, hmc.CredentialNotFound) + if stateErr != nil { + err = errors.Join(err, stateErr) + } + + l.Error(err, "ClusterIdentity not found") + + return ctrl.Result{}, err + } + + l.Error(err, "failed to get ClusterIdentity") + + return ctrl.Result{}, err + } + + if err := r.setState(ctx, cred, hmc.CredentialReady); err != nil { + l.Error(err, "failed to set Credential state") + + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func (r *CredentialReconciler) setState(ctx context.Context, cred *hmc.Credential, state hmc.CredentialState) error { + cred.Status.State = state + + if err := r.Client.Status().Update(ctx, cred); err != nil { + return fmt.Errorf("failed to update Credential %s/%s status: %w", cred.Namespace, cred.Name, err) + } + + return nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CredentialReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&hmc.Credential{}). + Complete(r) +} diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index 282146884..a2b6e8e16 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -16,10 +16,12 @@ package controller import ( "context" + "encoding/json" "errors" "fmt" "time" + "github.com/Mirantis/hmc/internal/sveltos" hcv2 "github.com/fluxcd/helm-controller/api/v2" fluxmeta "github.com/fluxcd/pkg/apis/meta" fluxconditions "github.com/fluxcd/pkg/runtime/conditions" @@ -27,6 +29,8 @@ import ( "github.com/go-logr/logr" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,12 +51,16 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" "github.com/Mirantis/hmc/internal/telemetry" + "github.com/Mirantis/hmc/internal/utils" +) + +const ( + DefaultRequeueInterval = 10 * time.Second ) // ManagedClusterReconciler reconciles a ManagedCluster object type ManagedClusterReconciler struct { client.Client - Scheme *runtime.Scheme Config *rest.Config DynamicClient *dynamic.DynamicClient } @@ -84,14 +92,16 @@ var ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ManagedClusterController", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ManagedCluster") + managedCluster := &hmc.ManagedCluster{} if err := r.Get(ctx, req.NamespacedName, managedCluster); err != nil { if apierrors.IsNotFound(err) { l.Info("ManagedCluster not found, ignoring since object must be deleted") return ctrl.Result{}, nil } + l.Error(err, "Failed to get ManagedCluster") return ctrl.Result{}, err } @@ -103,7 +113,7 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque if managedCluster.Status.ObservedGeneration == 0 { mgmt := &hmc.Management{} - mgmtRef := types.NamespacedName{Name: hmc.ManagementName} + mgmtRef := client.ObjectKey{Name: hmc.ManagementName} if err := r.Get(ctx, mgmtRef, mgmt); err != nil { l.Error(err, "Failed to get Management object") return ctrl.Result{}, err @@ -190,7 +200,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }() template := &hmc.ClusterTemplate{} - templateRef := types.NamespacedName{Name: managedCluster.Spec.Template, Namespace: managedCluster.Namespace} + templateRef := client.ObjectKey{Name: managedCluster.Spec.Template, Namespace: managedCluster.Namespace} if err := r.Get(ctx, templateRef, template); err != nil { l.Error(err, "Failed to get Template") errMsg := fmt.Sprintf("failed to get provided template: %s", err) @@ -269,9 +279,47 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma Message: "Helm chart is valid", }) + cred := &hmc.Credential{} + err = r.Client.Get(ctx, client.ObjectKey{ + Name: managedCluster.Spec.Credential, + Namespace: managedCluster.Namespace, + }, cred) + if err != nil { + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialReadyCondition, + Status: metav1.ConditionFalse, + Reason: hmc.FailedReason, + Message: fmt.Sprintf("Failed to get Credential: %s", err), + }) + return ctrl.Result{}, err + } + + if cred.Status.State != hmc.CredentialReady { + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialReadyCondition, + Status: metav1.ConditionFalse, + Reason: hmc.FailedReason, + Message: "Credential is not in Ready state", + }) + return ctrl.Result{}, + fmt.Errorf("credential is not in Ready state") + } + + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + Type: hmc.CredentialReadyCondition, + Status: metav1.ConditionTrue, + Reason: hmc.SucceededReason, + Message: "Credential is Ready", + }) + if !managedCluster.Spec.DryRun { + helmValues, err := setIdentityHelmValues(managedCluster.Spec.Config, cred.Spec.IdentityRef) + if err != nil { + return ctrl.Result{}, + fmt.Errorf("error setting identity values: %s", err) + } hr, _, err := helm.ReconcileHelmRelease(ctx, r.Client, managedCluster.Name, managedCluster.Namespace, helm.ReconcileHelmReleaseOpts{ - Values: managedCluster.Spec.Config, + Values: helmValues, OwnerReference: &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), Kind: hmc.ManagedClusterKind, @@ -303,24 +351,138 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma requeue, err := r.setStatusFromClusterStatus(ctx, l, managedCluster) if err != nil { if requeue { - return ctrl.Result{RequeueAfter: 10 * time.Second}, err + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err } return ctrl.Result{}, err } if requeue { - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } if !fluxconditions.IsReady(hr) { - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } + + return r.updateServices(ctx, managedCluster) } return ctrl.Result{}, nil } +// updateServices reconciles services provided in ManagedCluster.Spec.Services. +// TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places. +func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) { + l := log.FromContext(ctx).WithValues("ManagedClusterController", fmt.Sprintf("%s/%s", mc.Namespace, mc.Name)) + opts := []sveltos.HelmChartOpts{} + + // NOTE: The Profile object will be updated with no helm + // charts if len(mc.Spec.Services) == 0. This will result in the + // helm charts being uninstalled on matching clusters if + // Profile originally had len(m.Spec.Sevices) > 0. + for _, svc := range mc.Spec.Services { + if svc.Disable { + l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mc.Name)) + continue + } + + tmpl := &hmc.ServiceTemplate{} + tmplRef := types.NamespacedName{Name: svc.Template, Namespace: mc.Namespace} + if err := r.Get(ctx, tmplRef, tmpl); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err) + } + + source, err := r.getServiceTemplateSource(ctx, tmpl) + if err != nil { + return ctrl.Result{}, fmt.Errorf("could not get repository url: %w", err) + } + + opts = append(opts, sveltos.HelmChartOpts{ + RepositoryURL: source.Spec.URL, + // We don't have repository name so chart name becomes repository name. + RepositoryName: tmpl.Spec.Helm.ChartName, + ChartName: func() string { + if source.Spec.Type == utils.RegistryTypeOCI { + return tmpl.Spec.Helm.ChartName + } + // Sveltos accepts ChartName in / format for non-OCI. + // We don't have a repository name, so we can use / instead. + // See: https://projectsveltos.github.io/sveltos/addons/helm_charts/. + return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName) + }(), + ChartVersion: tmpl.Spec.Helm.ChartVersion, + ReleaseName: svc.Name, + Values: svc.Values, + ReleaseNamespace: func() string { + if svc.Namespace != "" { + return svc.Namespace + } + return svc.Name + }(), + // The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because + // the source.Spec.Insecure field is meant to be used for connecting to repositories + // over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for. + // See: https://github.com/fluxcd/source-controller/pull/1288 + PlainHTTP: source.Spec.Insecure, + }) + } + + if _, err := sveltos.ReconcileProfile(ctx, r.Client, l, mc.Namespace, mc.Name, + map[string]string{ + hmc.FluxHelmChartNamespaceKey: mc.Namespace, + hmc.FluxHelmChartNameKey: mc.Name, + }, + sveltos.ReconcileProfileOpts{ + OwnerReference: &metav1.OwnerReference{ + APIVersion: hmc.GroupVersion.String(), + Kind: hmc.ManagedClusterKind, + Name: mc.Name, + UID: mc.UID, + }, + HelmChartOpts: opts, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile Profile: %w", err) + } + + // We don't technically need to requeue here, but doing so because golint fails with: + // `(*ManagedClusterReconciler).updateServices` - result `res` is always `nil` (unparam) + // + // This will be automatically resolved once setting status is implemented (https://github.com/Mirantis/hmc/issues/361), + // as it is likely that some execution path in the function will have to return with a requeue to fetch latest status. + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil +} + +// getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate. +// It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository. +func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) { + tmplRef := types.NamespacedName{Namespace: tmpl.Namespace, Name: tmpl.Name} + + if tmpl.Status.ChartRef == nil { + return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String()) + } + + hc := &sourcev1.HelmChart{} + if err := r.Get(ctx, types.NamespacedName{ + Namespace: tmpl.Status.ChartRef.Namespace, + Name: tmpl.Status.ChartRef.Name, + }, hc); err != nil { + return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err) + } + + repo := &sourcev1.HelmRepository{} + if err := r.Get(ctx, types.NamespacedName{ + // Using chart's namespace because it's source + // (helm repository in this case) should be within the same namespace. + Namespace: hc.Namespace, + Name: hc.Spec.SourceRef.Name, + }, repo); err != nil { + return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err) + } + + return repo, nil +} + func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error { install := action.NewInstall(actionConfig) install.DryRun = true @@ -381,7 +543,7 @@ func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.Cros if ref == nil { return nil, fmt.Errorf("helm chart source is not provided") } - chartRef := types.NamespacedName{Namespace: ref.Namespace, Name: ref.Name} + chartRef := client.ObjectKey{Namespace: ref.Namespace, Name: ref.Name} hc := sourcev1.HelmChart{} if err := r.Client.Get(ctx, chartRef, &hc); err != nil { return nil, err @@ -391,7 +553,7 @@ func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.Cros func (r *ManagedClusterReconciler) Delete(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { hr := &hcv2.HelmRelease{} - err := r.Get(ctx, types.NamespacedName{ + err := r.Get(ctx, client.ObjectKey{ Name: managedCluster.Name, Namespace: managedCluster.Namespace, }, hr) @@ -415,13 +577,18 @@ func (r *ManagedClusterReconciler) Delete(ctx context.Context, l logr.Logger, ma return ctrl.Result{}, err } + err = sveltos.DeleteProfile(ctx, r.Client, managedCluster.Namespace, managedCluster.Name) + if err != nil { + return ctrl.Result{}, err + } + err = r.releaseCluster(ctx, managedCluster.Namespace, managedCluster.Name, managedCluster.Spec.Template) if err != nil { return ctrl.Result{}, err } l.Info("HelmRelease still exists, retrying") - return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace, name, templateName string) error { @@ -462,9 +629,9 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNamespace, templateName string) ([]string, error) { template := &hmc.ClusterTemplate{} - templateRef := types.NamespacedName{Name: templateName, Namespace: templateNamespace} + templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { - log.FromContext(ctx).Error(err, "Failed to get ClusterTemplate", "namespace", templateNamespace, "name", templateName) + ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "namespace", templateNamespace, "name", templateName) return nil, err } return template.Status.Providers.InfrastructureProviders, nil @@ -491,7 +658,7 @@ func (r *ManagedClusterReconciler) removeClusterFinalizer(ctx context.Context, c originalCluster := *cluster finalizersUpdated := controllerutil.RemoveFinalizer(cluster, hmc.BlockingFinalizer) if finalizersUpdated { - log.FromContext(ctx).Info("Allow to stop cluster", "finalizer", hmc.BlockingFinalizer) + ctrl.LoggerFrom(ctx).Info("Allow to stop cluster", "finalizer", hmc.BlockingFinalizer) if err := r.Client.Patch(ctx, cluster, client.MergeFrom(&originalCluster)); err != nil { return fmt.Errorf("failed to patch cluster %s/%s: %w", cluster.Namespace, cluster.Name, err) } @@ -514,6 +681,24 @@ func (r *ManagedClusterReconciler) machinesAvailable(ctx context.Context, namesp return len(itemsList.Items) != 0, nil } +func setIdentityHelmValues(values *apiextensionsv1.JSON, idRef *corev1.ObjectReference) (*apiextensionsv1.JSON, error) { + var valuesJSON map[string]any + err := json.Unmarshal(values.Raw, &valuesJSON) + if err != nil { + return nil, fmt.Errorf("error unmarshalling values: %s", err) + } + + valuesJSON["clusterIdentity"] = idRef + valuesRaw, err := json.Marshal(valuesJSON) + if err != nil { + return nil, fmt.Errorf("error marshalling values: %s", err) + } + + return &apiextensionsv1.JSON{ + Raw: valuesRaw, + }, nil +} + // SetupWithManager sets up the controller with the Manager. func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -521,7 +706,7 @@ func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Watches(&hcv2.HelmRelease{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []ctrl.Request { managedCluster := hmc.ManagedCluster{} - managedClusterRef := types.NamespacedName{ + managedClusterRef := client.ObjectKey{ Namespace: o.GetNamespace(), Name: o.GetName(), } diff --git a/internal/controller/managedcluster_controller_test.go b/internal/controller/managedcluster_controller_test.go index 2ad111b06..72f2eddec 100644 --- a/internal/controller/managedcluster_controller_test.go +++ b/internal/controller/managedcluster_controller_test.go @@ -130,7 +130,6 @@ var _ = Describe("ManagedCluster Controller", func() { controllerReconciler := &ManagedClusterReconciler{ Client: k8sClient, - Scheme: k8sClient.Scheme(), } Expect(k8sClient.Delete(ctx, managedCluster)).To(Succeed()) @@ -148,7 +147,6 @@ var _ = Describe("ManagedCluster Controller", func() { By("Reconciling the created resource") controllerReconciler := &ManagedClusterReconciler{ Client: k8sClient, - Scheme: k8sClient.Scheme(), Config: &rest.Config{}, } diff --git a/internal/controller/management_controller.go b/internal/controller/management_controller.go index c189de638..3e6f5bffd 100644 --- a/internal/controller/management_controller.go +++ b/internal/controller/management_controller.go @@ -27,12 +27,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/certmanager" @@ -49,15 +47,16 @@ type ManagementReconciler struct { } func (r *ManagementReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ManagementController", req.NamespacedName) - log.IntoContext(ctx, l) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling Management") + management := &hmc.Management{} if err := r.Get(ctx, req.NamespacedName, management); err != nil { if apierrors.IsNotFound(err) { l.Info("Management not found, ignoring since object must be deleted") return ctrl.Result{}, nil } + l.Error(err, "Failed to get Management") return ctrl.Result{}, err } @@ -71,7 +70,7 @@ func (r *ManagementReconciler) Reconcile(ctx context.Context, req ctrl.Request) } func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Management) (ctrl.Result, error) { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) finalizersUpdated := controllerutil.AddFinalizer(management, hmc.ManagementFinalizer) if finalizersUpdated { @@ -82,7 +81,7 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag } release := &hmc.Release{} - if err := r.Client.Get(ctx, types.NamespacedName{Name: management.Spec.Release}, release); err != nil { + if err := r.Client.Get(ctx, client.ObjectKey{Name: management.Spec.Release}, release); err != nil { l.Error(err, "failed to get Release object") return ctrl.Result{}, err } @@ -100,7 +99,7 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag components := wrappedComponents(management, release) for _, component := range components { template := &hmc.ProviderTemplate{} - err := r.Get(ctx, types.NamespacedName{ + err := r.Get(ctx, client.ObjectKey{ Name: component.Template, }, template) if err != nil { @@ -147,7 +146,7 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag } func (r *ManagementReconciler) Delete(ctx context.Context, management *hmc.Management) (ctrl.Result, error) { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) listOpts := &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue}), } @@ -170,10 +169,10 @@ func (r *ManagementReconciler) Delete(ctx context.Context, management *hmc.Manag } func (r *ManagementReconciler) removeHelmReleases(ctx context.Context, hmcReleaseName string, opts *client.ListOptions) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) l.Info("Suspending HMC Helm Release reconciles") hmcRelease := &fluxv2.HelmRelease{} - err := r.Client.Get(ctx, types.NamespacedName{Namespace: r.SystemNamespace, Name: hmcReleaseName}, hmcRelease) + err := r.Client.Get(ctx, client.ObjectKey{Namespace: r.SystemNamespace, Name: hmcReleaseName}, hmcRelease) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -193,7 +192,7 @@ func (r *ManagementReconciler) removeHelmReleases(ctx context.Context, hmcReleas } func (r *ManagementReconciler) removeHelmCharts(ctx context.Context, opts *client.ListOptions) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) l.Info("Ensuring all HelmCharts owned by HMC are removed") gvk := sourcev1.GroupVersion.WithKind(sourcev1.HelmChartKind) if err := utils.EnsureDeleteAllOf(ctx, r.Client, gvk, opts); err != nil { @@ -204,7 +203,7 @@ func (r *ManagementReconciler) removeHelmCharts(ctx context.Context, opts *clien } func (r *ManagementReconciler) removeHelmRepositories(ctx context.Context, opts *client.ListOptions) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) l.Info("Ensuring all HelmRepositories owned by HMC are removed") gvk := sourcev1.GroupVersion.WithKind(sourcev1.HelmRepositoryKind) if err := utils.EnsureDeleteAllOf(ctx, r.Client, gvk, opts); err != nil { @@ -266,7 +265,7 @@ func wrappedComponents(mgmt *hmc.Management, release *hmc.Release) []component { // enableAdditionalComponents enables the admission controller and cluster api operator // once the cert manager is ready func (r *ManagementReconciler) enableAdditionalComponents(ctx context.Context, mgmt *hmc.Management) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) hmcComponent := &mgmt.Spec.Core.HMC config := make(map[string]any) @@ -309,9 +308,13 @@ func (r *ManagementReconciler) enableAdditionalComponents(ctx context.Context, m // Enable HMC capi operator only if it was not explicitly disabled in the config to // support installation with existing cluster api operator - if v, ok := capiOperatorValues["enabled"].(bool); ok && v { - l.Info("Enabling cluster API operator") - capiOperatorValues["enabled"] = true + { + enabledV, enabledExists := capiOperatorValues["enabled"] + enabledValue, castedOk := enabledV.(bool) + if !enabledExists || !castedOk || enabledValue { + l.Info("Enabling cluster API operator") + capiOperatorValues["enabled"] = true + } } config["cluster-api-operator"] = capiOperatorValues diff --git a/internal/controller/release_controller.go b/internal/controller/release_controller.go index 6b131eff5..3b1149a51 100644 --- a/internal/controller/release_controller.go +++ b/internal/controller/release_controller.go @@ -34,7 +34,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/build" @@ -54,8 +53,9 @@ type Poller struct { Config *rest.Config - CreateManagement bool - CreateTemplates bool + CreateManagement bool + CreateTemplateManagement bool + CreateTemplates bool HMCTemplatesChartName string SystemNamespace string @@ -79,7 +79,7 @@ func (p *Poller) Start(ctx context.Context) error { } func (p *Poller) Tick(ctx context.Context) error { - l := log.FromContext(ctx).WithValues("controller", "ReleaseController") + l := ctrl.LoggerFrom(ctx).WithValues("controller", "ReleaseController") l.Info("Poll is run") defer l.Info("Poll is finished") @@ -89,19 +89,21 @@ func (p *Poller) Tick(ctx context.Context) error { l.Error(err, "failed to reconcile HMC Templates") return err } - err = p.ensureManagement(ctx) + mgmt, err := p.getOrCreateManagement(ctx) if err != nil { - l.Error(err, "failed to ensure default Management object") + l.Error(err, "failed to get or create Management object") + return err + } + err = p.ensureTemplateManagement(ctx, mgmt) + if err != nil { + l.Error(err, "failed to ensure default TemplateManagement object") return err } return nil } -func (p *Poller) ensureManagement(ctx context.Context) error { - if !p.CreateManagement { - return nil - } - l := log.FromContext(ctx) +func (p *Poller) getOrCreateManagement(ctx context.Context) (*hmc.Management, error) { + l := ctrl.LoggerFrom(ctx) mgmtObj := &hmc.Management{ ObjectMeta: metav1.ObjectMeta{ Name: hmc.ManagementName, @@ -113,30 +115,32 @@ func (p *Poller) ensureManagement(ctx context.Context) error { }, mgmtObj) if err != nil { if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to get %s Management object", hmc.ManagementName) + return nil, fmt.Errorf("failed to get %s Management object: %w", hmc.ManagementName, err) + } + if !p.CreateManagement { + return nil, nil } - mgmtObj.Spec.Release, err = p.getCurrentReleaseName(ctx) if err != nil { - return err + return nil, err } if err := mgmtObj.Spec.SetProvidersDefaults(); err != nil { - return err + return nil, err } getter := helm.NewMemoryRESTClientGetter(p.Config, p.RESTMapper()) actionConfig := new(action.Configuration) err = actionConfig.Init(getter, p.SystemNamespace, "secret", l.Info) if err != nil { - return err + return nil, err } hmcConfig := make(chartutil.Values) release, err := actionConfig.Releases.Last("hmc") if err != nil { if !errors.Is(err, driver.ErrReleaseNotFound) { - return err + return nil, err } } else { if len(release.Config) > 0 { @@ -152,7 +156,7 @@ func (p *Poller) ensureManagement(ctx context.Context) error { }) rawConfig, err := json.Marshal(hmcConfig) if err != nil { - return err + return nil, err } mgmtObj.Spec.Core = &hmc.Core{ HMC: hmc.Component{ @@ -164,15 +168,52 @@ func (p *Poller) ensureManagement(ctx context.Context) error { err = p.Create(ctx, mgmtObj) if err != nil { - return fmt.Errorf("failed to create %s Management object: %s", hmc.ManagementName, err) + return nil, fmt.Errorf("failed to create %s Management object: %s", hmc.ManagementName, err) } l.Info("Successfully created Management object with default configuration") } + return mgmtObj, nil +} + +func (p *Poller) ensureTemplateManagement(ctx context.Context, mgmt *hmc.Management) error { + l := ctrl.LoggerFrom(ctx) + if !p.CreateTemplateManagement { + return nil + } + if mgmt == nil { + return fmt.Errorf("management object is not found") + } + tmObj := &hmc.TemplateManagement{ + ObjectMeta: metav1.ObjectMeta{ + Name: hmc.TemplateManagementName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hmc.GroupVersion.String(), + Kind: mgmt.Kind, + Name: mgmt.Name, + UID: mgmt.UID, + }, + }, + }, + } + err := p.Get(ctx, client.ObjectKey{ + Name: hmc.TemplateManagementName, + }, tmObj) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to get %s TemplateManagement object: %w", hmc.TemplateManagementName, err) + } + err = p.Create(ctx, tmObj) + if err != nil { + return fmt.Errorf("failed to create %s TemplateManagement object: %w", hmc.TemplateManagementName, err) + } + l.Info("Successfully created TemplateManagement object") + } return nil } func (p *Poller) reconcileHMCTemplates(ctx context.Context) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) if !p.CreateTemplates { l.Info("Reconciling HMC Templates is skipped") return nil diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index d6627aae1..50f8d66f3 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -45,6 +45,7 @@ import ( hmcmirantiscomv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" hmcwebhook "github.com/Mirantis/hmc/internal/webhook" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) @@ -113,6 +114,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = helmcontrollerv2.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = sveltosv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index b2171e886..6ea6f98db 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -28,11 +28,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" @@ -73,7 +71,7 @@ type ProviderTemplateReconciler struct { } func (r *ClusterTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ClusterTemplateController", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ClusterTemplate") clusterTemplate := &hmc.ClusterTemplate{} @@ -90,7 +88,7 @@ func (r *ClusterTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Requ } func (r *ServiceTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ServiceTemplateReconciler", req.NamespacedName) + l := ctrl.LoggerFrom(ctx).WithValues("ServiceTemplateReconciler", req.NamespacedName) l.Info("Reconciling ServiceTemplate") serviceTemplate := &hmc.ServiceTemplate{} @@ -107,7 +105,7 @@ func (r *ServiceTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Requ } func (r *ProviderTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ProviderTemplateReconciler", req.NamespacedName) + l := ctrl.LoggerFrom(ctx).WithValues("ProviderTemplateReconciler", req.NamespacedName) l.Info("Reconciling ProviderTemplate") providerTemplate := &hmc.ProviderTemplate{} @@ -131,7 +129,7 @@ type Template interface { } func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template Template) (ctrl.Result, error) { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) spec := template.GetSpec() status := template.GetStatus() @@ -274,7 +272,7 @@ func (r *TemplateReconciler) updateStatus(ctx context.Context, template Template } func (r *TemplateReconciler) reconcileDefaultHelmRepository(ctx context.Context, namespace string) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) if namespace == "" { namespace = r.SystemNamespace } @@ -360,7 +358,7 @@ func (r *TemplateReconciler) getHelmChartFromChartRef(ctx context.Context, chart return nil, fmt.Errorf("invalid chartRef.Kind: %s. Only HelmChart kind is supported", chartRef.Kind) } helmChart := &sourcev1.HelmChart{} - err := r.Get(ctx, types.NamespacedName{ + err := r.Get(ctx, client.ObjectKey{ Namespace: chartRef.Namespace, Name: chartRef.Name, }, helmChart) diff --git a/internal/controller/templatemanagement_controller.go b/internal/controller/templatemanagement_controller.go index 745b64644..a6be1ebf9 100644 --- a/internal/controller/templatemanagement_controller.go +++ b/internal/controller/templatemanagement_controller.go @@ -23,11 +23,9 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/templateutil" @@ -36,21 +34,21 @@ import ( // TemplateManagementReconciler reconciles a TemplateManagement object type TemplateManagementReconciler struct { client.Client - Scheme *runtime.Scheme Config *rest.Config SystemNamespace string } func (r *TemplateManagementReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, err error) { - l := log.FromContext(ctx).WithValues("TemplateManagementController", req.Name) - log.IntoContext(ctx, l) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling TemplateManagement") + templateMgmt := &hmc.TemplateManagement{} if err := r.Get(ctx, req.NamespacedName, templateMgmt); err != nil { if apierrors.IsNotFound(err) { l.Info("TemplateManagement not found, ignoring since object must be deleted") return ctrl.Result{}, nil } + l.Error(err, "Failed to get TemplateManagement") return ctrl.Result{}, err } @@ -113,7 +111,7 @@ func (r *TemplateManagementReconciler) distributeTemplates(ctx context.Context, } func (r *TemplateManagementReconciler) applyTemplates(ctx context.Context, kind string, name string, namespaces map[string]bool) error { - l := log.FromContext(ctx) + l := ctrl.LoggerFrom(ctx) meta := metav1.ObjectMeta{ Name: name, Labels: map[string]string{ diff --git a/internal/controller/templatemanagement_controller_test.go b/internal/controller/templatemanagement_controller_test.go index a1f14df7b..214e0f4bb 100644 --- a/internal/controller/templatemanagement_controller_test.go +++ b/internal/controller/templatemanagement_controller_test.go @@ -198,14 +198,8 @@ var _ = Describe("Template Management Controller", func() { Expect(k8sClient.Delete(ctx, ns)).To(Succeed()) } - tm := &hmcmirantiscomv1alpha1.TemplateManagement{} - err := k8sClient.Get(ctx, types.NamespacedName{Name: tmName}, tm) - Expect(err).NotTo(HaveOccurred()) - By("Cleanup the specific resource instance TemplateManagement") - Expect(k8sClient.Delete(ctx, tm)).To(Succeed()) - ctChain := &hmcmirantiscomv1alpha1.ClusterTemplateChain{} - err = k8sClient.Get(ctx, types.NamespacedName{Name: ctChainName}, ctChain) + err := k8sClient.Get(ctx, types.NamespacedName{Name: ctChainName}, ctChain) Expect(err).NotTo(HaveOccurred()) By("Cleanup the specific resource instance ClusterTemplateChain") Expect(k8sClient.Delete(ctx, ctChain)).To(Succeed()) @@ -228,7 +222,6 @@ var _ = Describe("Template Management Controller", func() { By("Reconciling the created resource") controllerReconciler := &TemplateManagementReconciler{ Client: k8sClient, - Scheme: k8sClient.Scheme(), SystemNamespace: systemNamespace.Name, } _, err = controllerReconciler.Reconcile(ctx, reconcile.Request{ diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go new file mode 100644 index 000000000..4b6948189 --- /dev/null +++ b/internal/sveltos/profile.go @@ -0,0 +1,142 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sveltos + +import ( + "context" + "fmt" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/go-logr/logr" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" + libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/yaml" +) + +type ReconcileProfileOpts struct { + OwnerReference *metav1.OwnerReference + HelmChartOpts []HelmChartOpts +} + +type HelmChartOpts struct { + RepositoryURL string + RepositoryName string + ChartName string + ChartVersion string + ReleaseName string + ReleaseNamespace string + Values *apiextensionsv1.JSON + PlainHTTP bool + InsecureSkipTLSVerify bool +} + +// ReconcileProfile reconciles a Sveltos Profile object. +func ReconcileProfile(ctx context.Context, + cl client.Client, + l logr.Logger, + namespace string, + name string, + matchLabels map[string]string, + opts ReconcileProfileOpts, +) (*sveltosv1beta1.Profile, error) { + cp := &sveltosv1beta1.Profile{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + + operation, err := ctrl.CreateOrUpdate(ctx, cl, cp, func() error { + if cp.Labels == nil { + cp.Labels = make(map[string]string) + } + + cp.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue + if opts.OwnerReference != nil { + cp.OwnerReferences = []metav1.OwnerReference{*opts.OwnerReference} + } + + cp.Spec = sveltosv1beta1.Spec{ + ClusterSelector: libsveltosv1beta1.Selector{ + LabelSelector: metav1.LabelSelector{ + MatchLabels: matchLabels, + }, + }, + } + + for _, hc := range opts.HelmChartOpts { + helmChart := sveltosv1beta1.HelmChart{ + RepositoryURL: hc.RepositoryURL, + RepositoryName: hc.RepositoryName, + ChartName: hc.ChartName, + ChartVersion: hc.ChartVersion, + ReleaseName: hc.ReleaseName, + ReleaseNamespace: hc.ReleaseNamespace, + HelmChartAction: sveltosv1beta1.HelmChartActionInstall, + RegistryCredentialsConfig: &sveltosv1beta1.RegistryCredentialsConfig{ + PlainHTTP: hc.PlainHTTP, + InsecureSkipTLSVerify: hc.InsecureSkipTLSVerify, + }, + } + + if hc.PlainHTTP { + // InsecureSkipTLSVerify is redundant in this case. + helmChart.RegistryCredentialsConfig.InsecureSkipTLSVerify = false + } + + if hc.Values != nil { + b, err := hc.Values.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to marshal values to JSON for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) + } + + b, err = yaml.JSONToYAML(b) + if err != nil { + return fmt.Errorf("failed to convert values from JSON to YAML for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) + } + + helmChart.Values = string(b) + } + + cp.Spec.HelmCharts = append(cp.Spec.HelmCharts, helmChart) + } + return nil + }) + if err != nil { + return nil, err + } + + if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { + l.Info(fmt.Sprintf("Successfully %s Profile (%s/%s)", string(operation), cp.Namespace, cp.Name)) + } + + return cp, nil +} + +func DeleteProfile(ctx context.Context, cl client.Client, namespace string, name string) error { + err := cl.Delete(ctx, &sveltosv1beta1.Profile{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }) + + return client.IgnoreNotFound(err) +} diff --git a/internal/telemetry/client.go b/internal/telemetry/client.go index 5f221cc9d..828775f9b 100644 --- a/internal/telemetry/client.go +++ b/internal/telemetry/client.go @@ -19,13 +19,14 @@ import ( ) var ( - segmentToken = "" - client = newClient() + segmentToken = "" + analyticsClient analytics.Client ) -func newClient() analytics.Client { +func init() { if segmentToken == "" { - return nil + return } - return analytics.New(segmentToken) + + analyticsClient = analytics.New(segmentToken) } diff --git a/internal/telemetry/event.go b/internal/telemetry/event.go index 2f37e69d7..176470905 100644 --- a/internal/telemetry/event.go +++ b/internal/telemetry/event.go @@ -50,10 +50,10 @@ func TrackManagedClusterHeartbeat(id, managedClusterID, clusterID, template, tem } func TrackEvent(name, id string, properties map[string]any) error { - if client == nil { + if analyticsClient == nil { return nil } - return client.Enqueue(analytics.Track{ + return analyticsClient.Enqueue(analytics.Track{ AnonymousId: id, Event: name, Properties: properties, diff --git a/internal/telemetry/tracker.go b/internal/telemetry/tracker.go index fbabb12b3..a9b578bdf 100644 --- a/internal/telemetry/tracker.go +++ b/internal/telemetry/tracker.go @@ -21,15 +21,14 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/types" - crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/Mirantis/hmc/api/v1alpha1" ) type Tracker struct { - crclient.Client + client.Client SystemNamespace string } @@ -63,26 +62,23 @@ func (t *Tracker) Tick(ctx context.Context) { func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { mgmt := &v1alpha1.Management{} - mgmtRef := types.NamespacedName{Name: v1alpha1.ManagementName} - err := t.Get(ctx, mgmtRef, mgmt) - if err != nil { + if err := t.Get(ctx, client.ObjectKey{Name: v1alpha1.ManagementName}, mgmt); err != nil { return err } - templates := make(map[string]v1alpha1.ClusterTemplate) templatesList := &v1alpha1.ClusterTemplateList{} - err = t.List(ctx, templatesList, crclient.InNamespace(t.SystemNamespace)) - if err != nil { + if err := t.List(ctx, templatesList, client.InNamespace(t.SystemNamespace)); err != nil { return err } + + templates := make(map[string]v1alpha1.ClusterTemplate) for _, template := range templatesList.Items { templates[template.Name] = template } var errs error managedClusters := &v1alpha1.ManagedClusterList{} - err = t.List(ctx, managedClusters, &crclient.ListOptions{}) - if err != nil { + if err := t.List(ctx, managedClusters); err != nil { return err } @@ -90,7 +86,8 @@ func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { template := templates[managedCluster.Spec.Template] // TODO: get k0s cluster ID once it's exposed in k0smotron API clusterID := "" - err = TrackManagedClusterHeartbeat( + + err := TrackManagedClusterHeartbeat( string(mgmt.UID), string(managedCluster.UID), clusterID, diff --git a/internal/templateutil/state.go b/internal/templateutil/state.go index 279dd0392..b7db545e9 100644 --- a/internal/templateutil/state.go +++ b/internal/templateutil/state.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" hmc "github.com/Mirantis/hmc/api/v1alpha1" @@ -109,7 +108,7 @@ func ParseAccessRules(ctx context.Context, cl client.Client, rules []hmc.AccessR var serviceTemplates []string for _, ctChainName := range rule.ClusterTemplateChains { ctChain := &hmc.ClusterTemplateChain{} - err := cl.Get(ctx, types.NamespacedName{ + err := cl.Get(ctx, client.ObjectKey{ Name: ctChainName, }, ctChain) if err != nil { @@ -122,7 +121,7 @@ func ParseAccessRules(ctx context.Context, cl client.Client, rules []hmc.AccessR } for _, stChainName := range rule.ServiceTemplateChains { stChain := &hmc.ServiceTemplateChain{} - err := cl.Get(ctx, types.NamespacedName{ + err := cl.Get(ctx, client.ObjectKey{ Name: stChainName, }, stChain) if err != nil { diff --git a/internal/utils/helm.go b/internal/utils/helm.go index 6ed9bfa56..016282055 100644 --- a/internal/utils/helm.go +++ b/internal/utils/helm.go @@ -20,8 +20,8 @@ import ( ) const ( - registryTypeOCI = "oci" - registryTypeDefault = "default" + RegistryTypeOCI = "oci" + RegistryTypeDefault = "default" ) func DetermineDefaultRepositoryType(defaultRegistryURL string) (string, error) { @@ -32,9 +32,9 @@ func DetermineDefaultRepositoryType(defaultRegistryURL string) (string, error) { switch parsedRegistryURL.Scheme { case "oci": - return registryTypeOCI, nil + return RegistryTypeOCI, nil case "http", "https": - return registryTypeDefault, nil + return RegistryTypeDefault, nil default: return "", fmt.Errorf("invalid default registry URL scheme: %s must be 'oci://', 'http://', or 'https://'", parsedRegistryURL.Scheme) } diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index 48b5e221c..79cc2b2a5 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -23,7 +23,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -121,7 +120,7 @@ func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Objec func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (*v1alpha1.ClusterTemplate, error) { template := &v1alpha1.ClusterTemplate{} - templateRef := types.NamespacedName{Name: templateName, Namespace: templateNamespace} + templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := v.Get(ctx, templateRef, template); err != nil { return nil, err } @@ -142,7 +141,7 @@ func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *v1alpha1.ClusterTemplate) error { requiredProviders := template.Status.Providers management := &v1alpha1.Management{} - managementRef := types.NamespacedName{Name: v1alpha1.ManagementName} + managementRef := client.ObjectKey{Name: v1alpha1.ManagementName} if err := v.Get(ctx, managementRef, management); err != nil { return err } diff --git a/internal/webhook/templatemanagement_webhook.go b/internal/webhook/templatemanagement_webhook.go index a1c8f8af1..5b9a53e7c 100644 --- a/internal/webhook/templatemanagement_webhook.go +++ b/internal/webhook/templatemanagement_webhook.go @@ -16,11 +16,13 @@ package webhook import ( "context" + "errors" "fmt" "sort" "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,6 +33,8 @@ import ( "github.com/Mirantis/hmc/internal/templateutil" ) +var errTemplateManagementDeletionForbidden = errors.New("TemplateManagement deletion is forbidden") + type TemplateManagementValidator struct { client.Client SystemNamespace string @@ -51,7 +55,16 @@ var ( ) // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (*TemplateManagementValidator) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) { +func (v *TemplateManagementValidator) ValidateCreate(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { + itemsList := &metav1.PartialObjectMetadataList{} + gvk := v1alpha1.GroupVersion.WithKind(v1alpha1.TemplateManagementKind) + itemsList.SetGroupVersionKind(gvk) + if err := v.List(ctx, itemsList); err != nil { + return nil, err + } + if len(itemsList.Items) > 0 { + return nil, fmt.Errorf("TemplateManagement object already exists") + } return nil, nil } @@ -113,7 +126,20 @@ func getManagedClustersForTemplate(ctx context.Context, cl client.Client, namesp } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (*TemplateManagementValidator) ValidateDelete(context.Context, runtime.Object) (admission.Warnings, error) { +func (v *TemplateManagementValidator) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { + partialList := &metav1.PartialObjectMetadataList{} + gvk := v1alpha1.GroupVersion.WithKind(v1alpha1.ManagementKind) + partialList.SetGroupVersionKind(gvk) + err := v.List(ctx, partialList) + if err != nil { + return nil, fmt.Errorf("failed to list Management objects: %v", err) + } + if len(partialList.Items) > 0 { + mgmt := partialList.Items[0] + if mgmt.DeletionTimestamp == nil { + return nil, errTemplateManagementDeletionForbidden + } + } return nil, nil } diff --git a/internal/webhook/templatemanagement_webhook_test.go b/internal/webhook/templatemanagement_webhook_test.go index 06b53c06f..50e429587 100644 --- a/internal/webhook/templatemanagement_webhook_test.go +++ b/internal/webhook/templatemanagement_webhook_test.go @@ -28,12 +28,63 @@ import ( "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/utils" "github.com/Mirantis/hmc/test/objects/managedcluster" + "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/objects/template" chain "github.com/Mirantis/hmc/test/objects/templatechain" tm "github.com/Mirantis/hmc/test/objects/templatemanagement" "github.com/Mirantis/hmc/test/scheme" ) +func TestTemplateManagementValidateCreate(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + tests := []struct { + name string + tm *v1alpha1.TemplateManagement + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "should fail if the TemplateManagement object already exists", + tm: tm.NewTemplateManagement(tm.WithName("new")), + existingObjects: []runtime.Object{tm.NewTemplateManagement(tm.WithName(v1alpha1.TemplateManagementName))}, + err: "TemplateManagement object already exists", + }, + { + name: "should succeed", + tm: tm.NewTemplateManagement(tm.WithName("new")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithRuntimeObjects(tt.existingObjects...). + WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.TemplateKey, v1alpha1.ExtractTemplateName). + Build() + validator := &TemplateManagementValidator{Client: c, SystemNamespace: utils.DefaultSystemNamespace} + warn, err := validator.ValidateCreate(ctx, tt.tm) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + if err.Error() != tt.err { + t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) + } + } else { + g.Expect(err).To(Succeed()) + } + if len(tt.warnings) > 0 { + g.Expect(warn).To(Equal(tt.warnings)) + } else { + g.Expect(warn).To(BeEmpty()) + } + }) + } +} + func TestTemplateManagementValidateUpdate(t *testing.T) { g := NewWithT(t) @@ -205,3 +256,60 @@ func TestTemplateManagementValidateUpdate(t *testing.T) { }) } } + +func TestTemplateManagementValidateDelete(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + tmName := "test" + + tests := []struct { + name string + tm *v1alpha1.TemplateManagement + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "should fail if Management object exists and was not deleted", + tm: tm.NewTemplateManagement(tm.WithName(tmName)), + existingObjects: []runtime.Object{management.NewManagement()}, + err: "TemplateManagement deletion is forbidden", + }, + { + name: "should succeed if Management object is not found", + tm: tm.NewTemplateManagement(tm.WithName(tmName)), + }, + { + name: "should succeed if Management object was deleted", + tm: tm.NewTemplateManagement(tm.WithName(tmName)), + existingObjects: []runtime.Object{management.NewManagement(management.WithDeletionTimestamp(metav1.Now()))}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithRuntimeObjects(tt.existingObjects...). + WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.TemplateKey, v1alpha1.ExtractTemplateName). + Build() + validator := &TemplateManagementValidator{Client: c, SystemNamespace: utils.DefaultSystemNamespace} + warn, err := validator.ValidateDelete(ctx, tt.tm) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + if err.Error() != tt.err { + t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) + } + } else { + g.Expect(err).To(Succeed()) + } + if len(tt.warnings) > 0 { + g.Expect(warn).To(Equal(tt.warnings)) + } else { + g.Expect(warn).To(BeEmpty()) + } + }) + } +} diff --git a/templates/cluster/aws-hosted-cp/Chart.yaml b/templates/cluster/aws-hosted-cp/Chart.yaml index 2917fb012..e42db585f 100644 --- a/templates/cluster/aws-hosted-cp/Chart.yaml +++ b/templates/cluster/aws-hosted-cp/Chart.yaml @@ -15,5 +15,5 @@ version: 0.1.4 appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/infrastructure-providers: aws - hmc.mirantis.com/controlplane-providers: k0smotron + hmc.mirantis.com/control-plane-providers: k0smotron hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/cluster/aws-standalone-cp/Chart.yaml b/templates/cluster/aws-standalone-cp/Chart.yaml index 20e783367..e0c82882f 100644 --- a/templates/cluster/aws-standalone-cp/Chart.yaml +++ b/templates/cluster/aws-standalone-cp/Chart.yaml @@ -14,5 +14,5 @@ version: 0.1.4 appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/infrastructure-providers: aws - hmc.mirantis.com/controlplane-providers: k0s + hmc.mirantis.com/control-plane-providers: k0s hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/cluster/azure-hosted-cp/Chart.yaml b/templates/cluster/azure-hosted-cp/Chart.yaml index 9f97c38b7..a034e4778 100644 --- a/templates/cluster/azure-hosted-cp/Chart.yaml +++ b/templates/cluster/azure-hosted-cp/Chart.yaml @@ -15,5 +15,5 @@ version: 0.1.1 appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/infrastructure-providers: azure - hmc.mirantis.com/controlplane-providers: k0s + hmc.mirantis.com/control-plane-providers: k0s hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/cluster/azure-standalone-cp/Chart.yaml b/templates/cluster/azure-standalone-cp/Chart.yaml index cc88cd3c1..7155ea597 100644 --- a/templates/cluster/azure-standalone-cp/Chart.yaml +++ b/templates/cluster/azure-standalone-cp/Chart.yaml @@ -14,5 +14,5 @@ version: 0.1.1 appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/infrastructure-providers: azure - hmc.mirantis.com/controlplane-providers: k0s + hmc.mirantis.com/control-plane-providers: k0s hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/cluster/vsphere-hosted-cp/Chart.yaml b/templates/cluster/vsphere-hosted-cp/Chart.yaml index 613e74a63..4619a40e9 100644 --- a/templates/cluster/vsphere-hosted-cp/Chart.yaml +++ b/templates/cluster/vsphere-hosted-cp/Chart.yaml @@ -16,5 +16,5 @@ appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/type: deployment hmc.mirantis.com/infrastructure-providers: vsphere - hmc.mirantis.com/controlplane-providers: k0s + hmc.mirantis.com/control-plane-providers: k0s hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/cluster/vsphere-standalone-cp/Chart.yaml b/templates/cluster/vsphere-standalone-cp/Chart.yaml index 3d8b11db8..23f270373 100644 --- a/templates/cluster/vsphere-standalone-cp/Chart.yaml +++ b/templates/cluster/vsphere-standalone-cp/Chart.yaml @@ -15,5 +15,5 @@ appVersion: "1.30.4+k0s.0" annotations: hmc.mirantis.com/type: deployment hmc.mirantis.com/infrastructure-providers: vsphere - hmc.mirantis.com/controlplane-providers: k0s + hmc.mirantis.com/control-plane-providers: k0s hmc.mirantis.com/bootstrap-providers: k0s diff --git a/templates/provider/hmc-templates/files/templates/ingress-nginx.yaml b/templates/provider/hmc-templates/files/templates/ingress-nginx.yaml new file mode 100644 index 000000000..46cf8dde6 --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/ingress-nginx.yaml @@ -0,0 +1,8 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ServiceTemplate +metadata: + name: ingress-nginx +spec: + helm: + chartName: ingress-nginx + chartVersion: 4.11.0 diff --git a/templates/provider/hmc-templates/files/templates/kyverno.yaml b/templates/provider/hmc-templates/files/templates/kyverno.yaml new file mode 100644 index 000000000..27300d442 --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/kyverno.yaml @@ -0,0 +1,8 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ServiceTemplate +metadata: + name: kyverno +spec: + helm: + chartName: kyverno + chartVersion: 3.2.6 diff --git a/templates/provider/hmc-templates/files/templates/projectsveltos.yaml b/templates/provider/hmc-templates/files/templates/projectsveltos.yaml index 646a4aba6..390e620f4 100644 --- a/templates/provider/hmc-templates/files/templates/projectsveltos.yaml +++ b/templates/provider/hmc-templates/files/templates/projectsveltos.yaml @@ -5,4 +5,4 @@ metadata: spec: helm: chartName: projectsveltos - chartVersion: 0.38.1 + chartVersion: 0.38.2 diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplatechains.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplatechains.yaml index 494a7c5c8..f2f8a3ccc 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplatechains.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplatechains.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: clustertemplatechains.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index 60726eba9..ab9fe08dd 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: clustertemplates.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_credentials.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_credentials.yaml new file mode 100644 index 000000000..29f5bd21d --- /dev/null +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_credentials.yaml @@ -0,0 +1,110 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.3 + name: credentials.hmc.mirantis.com +spec: + group: hmc.mirantis.com + names: + kind: Credential + listKind: CredentialList + plural: credentials + shortNames: + - cred + singular: credential + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.description + name: Description + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Credential is the Schema for the credentials API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CredentialSpec defines the desired state of Credential + properties: + description: + description: Description of the Credential object + type: string + identityRef: + description: Reference to the Credential Identity + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + required: + - identityRef + type: object + status: + description: CredentialStatus defines the observed state of Credential + properties: + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index d98619a45..eaf6d0fd9 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: managedclusters.hmc.mirantis.com spec: group: hmc.mirantis.com @@ -62,10 +62,46 @@ spec: If no Config provided, the field will be populated with the default values for the template and DryRun will be enabled. x-kubernetes-preserve-unknown-fields: true + credential: + type: string dryRun: description: DryRun specifies whether the template should be applied after validation or only validated. type: boolean + services: + description: |- + Services is a list of services created via ServiceTemplates + that could be installed on the target cluster. + items: + description: ManagedClusterServiceSpec represents a Service within + ManagedCluster + properties: + disable: + description: Disable can be set to disable handling of this + service. + type: boolean + name: + description: Name is the chart release. + minLength: 1 + type: string + namespace: + description: |- + Namespace is the namespace the release will be installed in. + It will default to Name if not provided. + type: string + template: + description: Template is a reference to a Template object located + in the same namespace. + minLength: 1 + type: string + values: + description: Values is the helm values to be passed to the template. + x-kubernetes-preserve-unknown-fields: true + required: + - name + - template + type: object + type: array template: description: Template is a reference to a Template object located in the same namespace. @@ -81,16 +117,8 @@ spec: description: Conditions contains details for the current state of the ManagedCluster items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -131,12 +159,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml index cc6dc89b9..21aed29a4 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: managements.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index 0c13a883f..5249d0bd6 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: providertemplates.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml index f8f8e8004..a37cfe329 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: releases.hmc.mirantis.com spec: group: hmc.mirantis.com @@ -97,16 +97,8 @@ spec: description: Conditions contains details for the current state of the Release items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -147,12 +139,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplatechains.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplatechains.yaml index 37f12c8f1..ce4ca88b1 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplatechains.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplatechains.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: servicetemplatechains.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml index 975b526d8..e3747b9d9 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: servicetemplates.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_templatemanagements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_templatemanagements.yaml index 776351956..1cca6e5b2 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_templatemanagements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_templatemanagements.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: templatemanagements.hmc.mirantis.com spec: group: hmc.mirantis.com diff --git a/templates/provider/hmc/templates/deployment.yaml b/templates/provider/hmc/templates/deployment.yaml index 925c9d491..42ef066ab 100644 --- a/templates/provider/hmc/templates/deployment.yaml +++ b/templates/provider/hmc/templates/deployment.yaml @@ -27,6 +27,7 @@ spec: - --registry-creds-secret={{ .Values.controller.registryCredsSecret }} {{- end }} - --create-management={{ .Values.controller.createManagement }} + - --create-template-management={{ .Values.controller.createTemplateManagement }} - --create-templates={{ .Values.controller.createTemplates }} - --enable-telemetry={{ .Values.controller.enableTelemetry }} - --enable-webhook={{ .Values.admissionWebhook.enabled }} diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 70877836d..739a355e3 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -43,12 +43,7 @@ rules: - hmc.mirantis.com resources: - templatemanagements - verbs: - - get - - list - - patch - - update - - watch + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com resources: @@ -138,6 +133,39 @@ rules: resources: - namespaces verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - credentials + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} +- apiGroups: + - hmc.mirantis.com + resources: + - credentials/finalizers + verbs: + - update +- apiGroups: + - hmc.mirantis.com + resources: + - credentials/status + verbs: + - get + - patch + - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclusterstaticidentities + - awsclustercontrolleridentities + - awsclusterroleidentities + - azureclusteridentities + - vsphereclusteridentities + verbs: {{ include "rbac.viewerVerbs" . | nindent 4 }} +- apiGroups: + - config.projectsveltos.io + resources: + - profiles + verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/templates/provider/hmc/values.schema.json b/templates/provider/hmc/values.schema.json index f8df0f9d5..fc2bf2482 100644 --- a/templates/provider/hmc/values.schema.json +++ b/templates/provider/hmc/values.schema.json @@ -116,6 +116,9 @@ "createManagement": { "type": "boolean" }, + "createTemplateManagement": { + "type": "boolean" + }, "createTemplate": { "type": "boolean" }, diff --git a/templates/provider/hmc/values.yaml b/templates/provider/hmc/values.yaml index 6f297a780..a9ecf806c 100644 --- a/templates/provider/hmc/values.yaml +++ b/templates/provider/hmc/values.yaml @@ -11,6 +11,7 @@ controller: registryCredsSecret: "" insecureRegistry: false createManagement: true + createTemplateManagement: true createTemplates: true enableTelemetry: true diff --git a/templates/provider/projectsveltos/Chart.lock b/templates/provider/projectsveltos/Chart.lock index aa2e201b1..8dd63225e 100644 --- a/templates/provider/projectsveltos/Chart.lock +++ b/templates/provider/projectsveltos/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: projectsveltos repository: https://projectsveltos.github.io/helm-charts - version: 0.38.1 -digest: sha256:6fae3801c4b89a99b64a8185d21d6b12ec7cf4f40b15c5555985afa88894f4a8 -generated: "2024-09-13T11:52:06.517657-04:00" + version: 0.38.2 +digest: sha256:49e61851b4a79742612428f193ae6f450bd3b85959e3f9c3639342064b612058 +generated: "2024-09-25T20:27:28.220464-04:00" diff --git a/templates/provider/projectsveltos/Chart.yaml b/templates/provider/projectsveltos/Chart.yaml index 3d597e127..ac8c32bae 100644 --- a/templates/provider/projectsveltos/Chart.yaml +++ b/templates/provider/projectsveltos/Chart.yaml @@ -2,11 +2,9 @@ apiVersion: v2 name: projectsveltos description: A Helm chart to refer the official projectsveltos helm chart type: application -version: 0.38.1 -appVersion: "0.38.1" +version: 0.38.2 +appVersion: "0.38.2" dependencies: - name: projectsveltos - version: 0.38.1 + version: 0.38.2 repository: https://projectsveltos.github.io/helm-charts -annotations: - hmc.mirantis.com/type: provider diff --git a/templates/provider/projectsveltos/values.yaml b/templates/provider/projectsveltos/values.yaml new file mode 100644 index 000000000..fff99b0e4 --- /dev/null +++ b/templates/provider/projectsveltos/values.yaml @@ -0,0 +1,37 @@ +projectsveltos: + accessManager: + manager: + image: + repository: docker.io/projectsveltos/access-manager + addonController: + controller: + image: + repository: docker.io/projectsveltos/addon-controller + classifierManager: + manager: + image: + repository: docker.io/projectsveltos/classifier + conversionWebhook: + sveltosWebhook: + image: + repository: docker.io/projectsveltos/webhook-conversion + eventManager: + manager: + image: + repository: docker.io/projectsveltos/event-manager + hcManager: + manager: + image: + repository: docker.io/projectsveltos/healthcheck-manager + registerMgmtClusterJob: + registerMgmtCluster: + image: + repository: docker.io/projectsveltos/register-mgmt-cluster + scManager: + manager: + image: + repository: docker.io/projectsveltos/sveltoscluster-manager + shardController: + manager: + image: + repository: docker.io/projectsveltos/shard-controller diff --git a/templates/service/ingress-nginx/Chart.lock b/templates/service/ingress-nginx/Chart.lock new file mode 100644 index 000000000..b4a4bb6d5 --- /dev/null +++ b/templates/service/ingress-nginx/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: ingress-nginx + repository: https://kubernetes.github.io/ingress-nginx + version: 4.11.0 +digest: sha256:3790a88ef1fd5ee83e99e8205361441d4a54abf963d8608be527862dfed03cdc +generated: "2024-09-25T05:42:16.197417-04:00" diff --git a/templates/service/ingress-nginx/Chart.yaml b/templates/service/ingress-nginx/Chart.yaml new file mode 100644 index 000000000..3e8948b9e --- /dev/null +++ b/templates/service/ingress-nginx/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: ingress-nginx +description: A Helm chart to refer the official ingress-nginx helm chart +type: application +version: 4.11.0 +appVersion: "1.11.0" +dependencies: + - name: ingress-nginx + version: 4.11.0 + repository: https://kubernetes.github.io/ingress-nginx diff --git a/templates/service/kyverno/Chart.lock b/templates/service/kyverno/Chart.lock new file mode 100644 index 000000000..9a9d64cb1 --- /dev/null +++ b/templates/service/kyverno/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: kyverno + repository: https://kyverno.github.io/kyverno/ + version: 3.2.6 +digest: sha256:319b12c570e7cb6c5c8d5485c27bab8bd8f8d65ddf08d4b9893dc37265b0896b +generated: "2024-09-25T05:42:17.739693-04:00" diff --git a/templates/service/kyverno/Chart.yaml b/templates/service/kyverno/Chart.yaml new file mode 100644 index 000000000..1ac2b852f --- /dev/null +++ b/templates/service/kyverno/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: kyverno +description: A Helm chart to refer the official kyverno helm chart +type: application +version: 3.2.6 +appVersion: "v1.12.5" +dependencies: + - name: kyverno + version: 3.2.6 + repository: https://kyverno.github.io/kyverno/ diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 502197514..e13282d0c 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -34,6 +34,7 @@ import ( "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/managedcluster" "github.com/Mirantis/hmc/test/managedcluster/aws" + "github.com/Mirantis/hmc/test/managedcluster/azure" "github.com/Mirantis/hmc/test/managedcluster/vsphere" "github.com/Mirantis/hmc/test/utils" ) @@ -62,7 +63,6 @@ var _ = Describe("controller", Ordered, func() { Context("Operator", func() { It("should run successfully", func() { kc := kubeclient.NewFromLocal(namespace) - aws.CreateCredentialSecret(context.Background(), kc) By("validating that the hmc-controller and capi provider controllers are running") Eventually(func() error { @@ -73,6 +73,11 @@ var _ = Describe("controller", Ordered, func() { } return nil }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + GinkgoT().Setenv("NAMESPACE", namespace) + cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + // aws.CreateCredentialSecret(context.Background(), kc) }) }) @@ -89,7 +94,11 @@ var _ = Describe("controller", Ordered, func() { BeforeAll(func() { By("ensuring AWS credentials are set") kc = kubeclient.NewFromLocal(namespace) - aws.CreateCredentialSecret(context.Background(), kc) + // aws.CreateCredentialSecret(context.Background(), kc) + GinkgoT().Setenv("NAMESPACE", namespace) + cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { @@ -159,11 +168,15 @@ var _ = Describe("controller", Ordered, func() { cmd = exec.Command("make", "dev-templates") _, err = utils.Run(cmd) Expect(err).NotTo(HaveOccurred()) + GinkgoT().Setenv("NAMESPACE", namespace) + cmd = exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) // Ensure AWS credentials are set in the standalone cluster. standaloneClient = kc.NewFromCluster(context.Background(), namespace, clusterName) - aws.CreateCredentialSecret(context.Background(), standaloneClient) + // aws.CreateCredentialSecret(context.Background(), standaloneClient) templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") Eventually(func() error { @@ -179,7 +192,7 @@ var _ = Describe("controller", Ordered, func() { // Populate the environment variables required for the hosted // cluster. - aws.PopulateHostedTemplateVars(context.Background(), kc) + aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) @@ -211,7 +224,7 @@ var _ = Describe("controller", Ordered, func() { ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) // Delete the hosted ManagedCluster and verify it is removed. templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") @@ -308,7 +321,7 @@ var _ = Describe("controller", Ordered, func() { ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) deletionValidator := managedcluster.NewProviderValidator( managedcluster.TemplateVSphereStandaloneCP, @@ -323,8 +336,165 @@ var _ = Describe("controller", Ordered, func() { }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) }) }) + + Describe("Azure Templates", Label("provider"), func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + sdName string + ) + + BeforeAll(func() { + By("ensuring Azure credentials are set") + kc = kubeclient.NewFromLocal(namespace) + azure.CreateCredentialSecret(context.Background(), kc) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + By("collecting failure logs from controllers") + if kc != nil { + collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + if standaloneClient != nil { + collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + + By("deleting resources after failure") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + } + }) + + It("should work with an Azure provider", func() { + templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + sdName = sd.GetName() + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + azure.SetAzureEnvironmentVariables(sdName, kc) + + hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hdName := hd.GetName() + + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + + By("Deploy onto standalone cluster") + deployOnAzureCluster(kubeCfgPath) + + templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") + standaloneClient = kc.NewFromCluster(context.Background(), namespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient, managedcluster.ProviderAzure) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + azure.CreateCredentialSecret(context.Background(), standaloneClient) + + templateBy(managedcluster.TemplateAzureHostedCP, + fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureHostedCP)) + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err := hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) + }) }) +func deployOnAzureCluster(kubeCfgPath string) { + GinkgoT().Helper() + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("kubectl", "create", "-f", + "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ + "storageclass-azuredisk-csi.yaml") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", + "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-deploy") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-templates") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) +} + // templateBy wraps a Ginkgo By with a block describing the template being // tested. func templateBy(t managedcluster.Template, description string) { diff --git a/test/managedcluster/aws/aws.go b/test/managedcluster/aws/aws.go index b6f1eb30e..0bdccafa3 100644 --- a/test/managedcluster/aws/aws.go +++ b/test/managedcluster/aws/aws.go @@ -17,64 +17,91 @@ package aws import ( + "bufio" + "bytes" "context" "encoding/json" + "errors" + "io" "os" - "os/exec" - - corev1 "k8s.io/api/core/v1" + "github.com/a8m/envsubst" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/types" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" + "k8s.io/client-go/restmapper" "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/managedcluster" - "github.com/Mirantis/hmc/test/utils" ) -// CreateCredentialSecret uses clusterawsadm to encode existing AWS -// credentials and create a secret in the given namespace if one does not -// already exist. func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { GinkgoHelper() - - _, err := kc.Client.CoreV1().Secrets(kc.Namespace). - Get(ctx, managedcluster.AWSCredentialsSecretName, metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred(), "failed to get AWS credentials secret") - return + serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + yamlFile, err := os.ReadFile("config/dev/aws-credentials.yaml") + Expect(err).NotTo(HaveOccurred()) + + yamlFile, err = envsubst.Bytes(yamlFile) + Expect(err).NotTo(HaveOccurred()) + + c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) + groupResources, err := restmapper.GetAPIGroupResources(c) + Expect(err).NotTo(HaveOccurred()) + + yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) + for { + yamlDoc, err := yamlReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") + } + + credentialResource := &unstructured.Unstructured{} + _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) + Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) + Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") + + dc := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: credentialResource.GroupVersionKind().Group, + Version: credentialResource.GroupVersionKind().Version, + Resource: mapping.Resource.Resource, + }) + + exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") + } + + if exists == nil { + if _, err := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { + Expect(err).NotTo(HaveOccurred(), "failed to create azure credential secret") + } + } } - - cmd := exec.Command("./bin/clusterawsadm", "bootstrap", "credentials", "encode-as-profile") - output, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred(), "failed to encode AWS credentials with clusterawsadm") - - _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: managedcluster.AWSCredentialsSecretName, - }, - Data: map[string][]byte{ - "AWS_B64ENCODED_CREDENTIALS": output, - }, - Type: corev1.SecretTypeOpaque, - }, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to create AWS credentials secret") } // PopulateHostedTemplateVars populates the environment variables required for // the AWS hosted CP template by querying the standalone CP cluster with the // given kubeclient. -func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient) { +func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) { GinkgoHelper() c := getAWSClusterClient(kc) - awsCluster, err := c.Get(ctx, os.Getenv(managedcluster.EnvVarManagedClusterName), metav1.GetOptions{}) + awsCluster, err := c.Get(ctx, clusterName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster") vpcID, found, err := unstructured.NestedString(awsCluster.Object, "spec", "network", "vpc", "id") diff --git a/test/managedcluster/azure/azure.go b/test/managedcluster/azure/azure.go new file mode 100644 index 000000000..fcf262080 --- /dev/null +++ b/test/managedcluster/azure/azure.go @@ -0,0 +1,152 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + + "github.com/a8m/envsubst" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" + "k8s.io/client-go/restmapper" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/test/kubeclient" +) + +func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) map[string]any { + GinkgoHelper() + resourceID := schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "azureclusters", + } + + dc := kc.GetDynamicClient(resourceID) + list, err := dc.List(ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), + }) + + Expect(err).NotTo(HaveOccurred()) + Expect(len(list.Items)).NotTo(BeEquivalentTo(0)) + + spec, found, err := unstructured.NestedMap(list.Items[0].Object, "spec") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + return spec +} + +func SetAzureEnvironmentVariables(clusterName string, kc *kubeclient.KubeClient) { + GinkgoHelper() + spec := getAzureInfo(context.Background(), clusterName, kc) + + networkSpec, found, err := unstructured.NestedMap(spec, "networkSpec") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + vnet, found, err := unstructured.NestedMap(networkSpec, "vnet") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + vnetName, ok := vnet["name"].(string) + Expect(ok).To(BeTrue()) + GinkgoT().Setenv("AZURE_VM_NET_NAME", vnetName) + + subnets, found, err := unstructured.NestedSlice(networkSpec, "subnets") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + resourceGroup := spec["resourceGroup"] + GinkgoT().Setenv("AZURE_RESOURCE_GROUP", fmt.Sprintf("%s", resourceGroup)) + subnetMap, ok := subnets[0].(map[string]any) + Expect(ok).To(BeTrue()) + subnetName := subnetMap["name"] + GinkgoT().Setenv("AZURE_NODE_SUBNET", fmt.Sprintf("%s", subnetName)) + + securityGroup, found, err := unstructured.NestedMap(subnetMap, "securityGroup") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + securityGroupName := securityGroup["name"] + GinkgoT().Setenv("AZURE_SECURITY_GROUP", fmt.Sprintf("%s", securityGroupName)) + + routeTable, found, err := unstructured.NestedMap(subnetMap, "routeTable") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + routeTableName := routeTable["name"] + GinkgoT().Setenv("AZURE_ROUTE_TABLE", fmt.Sprintf("%s", routeTableName)) +} + +func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { + GinkgoHelper() + serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + yamlFile, err := os.ReadFile("config/dev/azure-credentials.yaml") + Expect(err).NotTo(HaveOccurred()) + + yamlFile, err = envsubst.Bytes(yamlFile) + Expect(err).NotTo(HaveOccurred()) + + c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) + groupResources, err := restmapper.GetAPIGroupResources(c) + Expect(err).NotTo(HaveOccurred()) + + yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) + for { + yamlDoc, err := yamlReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") + } + + credentialResource := &unstructured.Unstructured{} + _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) + Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) + Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") + + dc := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: credentialResource.GroupVersionKind().Group, + Version: credentialResource.GroupVersionKind().Version, + Resource: mapping.Resource.Resource, + }) + + exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) + if !apierrors.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") + } + + if exists == nil { + if _, createErr := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { + Expect(createErr).NotTo(HaveOccurred(), "failed to create azure credential secret") + } + } + } +} diff --git a/test/managedcluster/managedcluster.go b/test/managedcluster/managedcluster.go index 0af7f149b..6c1edca02 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/managedcluster/managedcluster.go @@ -26,6 +26,8 @@ import ( . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/Mirantis/hmc/internal/utils" ) type ProviderType string @@ -44,6 +46,8 @@ type Template string const ( TemplateAWSStandaloneCP Template = "aws-standalone-cp" TemplateAWSHostedCP Template = "aws-hosted-cp" + TemplateAzureHostedCP Template = "azure-hosted-cp" + TemplateAzureStandaloneCP Template = "azure-standalone-cp" TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" TemplateVSphereHostedCP Template = "vsphere-hosted-cp" ) @@ -54,6 +58,12 @@ var awsStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/aws-hosted-cp.yaml.tpl var awsHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/azure-standalone-cp.yaml.tpl +var azureStandaloneCPManagedClusterTemplateBytes []byte + +//go:embed resources/azure-hosted-cp.yaml.tpl +var azureHostedCPManagedClusterTemplateBytes []byte + //go:embed resources/vsphere-standalone-cp.yaml.tpl var vsphereStandaloneCPManagedClusterTemplateBytes []byte @@ -71,7 +81,7 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { generatedName := os.Getenv(EnvVarManagedClusterName) if generatedName == "" { - generatedName = uuid.New().String()[:8] + "-e2e-test" + generatedName = "e2e-test-" + uuid.New().String()[:8] _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) } else { @@ -104,10 +114,15 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes case TemplateVSphereHostedCP: managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes + case TemplateAzureHostedCP: + managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes + case TemplateAzureStandaloneCP: + managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("unsupported AWS template: %s", templateName)) + Fail(fmt.Sprintf("unsupported template: %s", templateName)) } + Expect(os.Setenv("NAMESPACE", utils.DefaultSystemNamespace)).NotTo(HaveOccurred()) managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") diff --git a/test/managedcluster/providervalidator.go b/test/managedcluster/providervalidator.go index fd474253c..2deae8ff7 100644 --- a/test/managedcluster/providervalidator.go +++ b/test/managedcluster/providervalidator.go @@ -64,6 +64,8 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") + case TemplateAzureStandaloneCP, TemplateVSphereHostedCP: + delete(resourcesToValidate, "csi-driver") } } else { resourcesToValidate = map[string]resourceValidationFunc{ diff --git a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/managedcluster/resources/aws-hosted-cp.yaml.tpl index 06a4cf4cc..64e46a0b1 100644 --- a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -5,6 +5,9 @@ metadata: spec: template: aws-hosted-cp config: + clusterIdentity: + name: aws-cluster-identity + namespace: ${NAMESPACE} vpcID: ${AWS_VPC_ID} region: ${AWS_REGION} subnets: diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl index 0d107ca43..e5ae88486 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -1,10 +1,13 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-aws spec: template: aws-standalone-cp config: + clusterIdentity: + name: aws-cluster-identity + namespace: ${NAMESPACE} region: ${AWS_REGION} publicIP: ${AWS_PUBLIC_IP:=true} controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl new file mode 100644 index 000000000..6b8f7ad97 --- /dev/null +++ b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -0,0 +1,23 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-hosted-cp + config: + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: hmc-system + resourceGroup: "${AZURE_RESOURCE_GROUP}" + network: + vnetName: "${AZURE_VM_NET_NAME}" + nodeSubnetName: "${AZURE_NODE_SUBNET}" + routeTableName: "${AZURE_ROUTE_TABLE}" + securityGroupName: "${AZURE_SECURITY_GROUP}" + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl new file mode 100644 index 000000000..44d5abf60 --- /dev/null +++ b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -0,0 +1,22 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-standalone-cp + config: + controlPlaneNumber: 1 + workersNumber: 1 + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + controlPlane: + vmSize: Standard_A4_v2 + worker: + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: ${NAMESPACE} + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index 2c556d9cc..a4c328b77 100644 --- a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-vsphere spec: template: vsphere-hosted-cp config: diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 98d193257..81eb8edf3 100644 --- a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-vsphere spec: template: vsphere-standalone-cp config: diff --git a/test/managedcluster/validate_deployed.go b/test/managedcluster/validate_deployed.go index f0d4edeca..89c8ca694 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/managedcluster/validate_deployed.go @@ -243,7 +243,7 @@ func validateCCM(ctx context.Context, kc *kubeclient.KubeClient, clusterName str } for _, i := range service.Status.LoadBalancer.Ingress { - if i.Hostname != "" { + if i.Hostname != "" || i.IP != "" { return nil } } diff --git a/test/objects/management/management.go b/test/objects/management/management.go index 198bc0584..26a31899f 100644 --- a/test/objects/management/management.go +++ b/test/objects/management/management.go @@ -29,7 +29,8 @@ type Opt func(management *v1alpha1.Management) func NewManagement(opts ...Opt) *v1alpha1.Management { p := &v1alpha1.Management{ ObjectMeta: metav1.ObjectMeta{ - Name: DefaultName, + Name: DefaultName, + Finalizers: []string{v1alpha1.ManagementFinalizer}, }, } @@ -45,6 +46,12 @@ func WithName(name string) Opt { } } +func WithDeletionTimestamp(deletionTimestamp metav1.Time) Opt { + return func(p *v1alpha1.Management) { + p.DeletionTimestamp = &deletionTimestamp + } +} + func WithCoreComponents(core *v1alpha1.Core) Opt { return func(p *v1alpha1.Management) { p.Spec.Core = core diff --git a/test/scheme/scheme.go b/test/scheme/scheme.go index a38a689b7..d0155c616 100644 --- a/test/scheme/scheme.go +++ b/test/scheme/scheme.go @@ -17,37 +17,26 @@ package scheme import ( hcv2 "github.com/fluxcd/helm-controller/api/v2" sourcev1 "github.com/fluxcd/source-controller/api/v1" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/runtime/serializer/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "github.com/Mirantis/hmc/api/v1alpha1" ) var ( - Scheme = runtime.NewScheme() - Codecs = serializer.NewCodecFactory(Scheme) - Builder = runtime.SchemeBuilder{ + Scheme = runtime.NewScheme() + + builder = runtime.SchemeBuilder{ clientgoscheme.AddToScheme, v1alpha1.AddToScheme, sourcev1.AddToScheme, hcv2.AddToScheme, + sveltosv1beta1.AddToScheme, } ) -var Encoder = json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) func init() { - err := Builder.AddToScheme(Scheme) - if err != nil { - panic(err) - } -} - -func Decode(yaml []byte) (runtime.Object, error) { - return runtime.Decode(Codecs.UniversalDeserializer(), yaml) -} - -func Encode(obj runtime.Object) ([]byte, error) { - return runtime.Encode(Encoder, obj) + utilruntime.Must(builder.AddToScheme(Scheme)) }