diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index fe2fa81cd..4e2961847 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -112,7 +112,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'controller' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + CLUSTER_DEPLOYMENT_NAME: ${{ needs.build.outputs.clustername }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | @@ -162,7 +162,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:cloud' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + CLUSTER_DEPLOYMENT_NAME: ${{ needs.build.outputs.clustername }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | @@ -215,7 +215,7 @@ jobs: - name: Run E2E tests env: GINKGO_LABEL_FILTER: 'provider:onprem' - MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + CLUSTER_DEPLOYMENT_NAME: ${{ needs.build.outputs.clustername }} IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' VERSION: ${{ needs.build.outputs.version }} run: | diff --git a/Makefile b/Makefile index 561d85d38..42fd4e87f 100644 --- a/Makefile +++ b/Makefile @@ -334,6 +334,10 @@ dev-templates: templates-generate dev-release: @$(YQ) e ".spec.version = \"${VERSION}\"" $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/release.yaml | $(KUBECTL) -n $(NAMESPACE) apply -f - +.PHONY: dev-adopted-creds +dev-adopted-creds: envsubst + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -i config/dev/adopted-credentials.yaml | $(KUBECTL) apply -f - + .PHONY: dev-aws-creds dev-aws-creds: envsubst @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -i config/dev/aws-credentials.yaml | $(KUBECTL) apply -f - @@ -359,11 +363,11 @@ dev-destroy: kind-undeploy registry-undeploy ## Destroy the development environm .PHONY: dev-mcluster-apply dev-mcluster-apply: envsubst - @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) apply -f - + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-clusterdeployment.yaml | $(KUBECTL) apply -f - .PHONY: dev-mcluster-delete dev-mcluster-delete: envsubst - @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-managedcluster.yaml | $(KUBECTL) delete -f - + @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/$(DEV_PROVIDER)-clusterdeployment.yaml | $(KUBECTL) delete -f - .PHONY: dev-creds-apply dev-creds-apply: dev-$(DEV_PROVIDER)-creds diff --git a/PROJECT b/PROJECT index 6db141328..1dd4d0d6d 100644 --- a/PROJECT +++ b/PROJECT @@ -14,7 +14,7 @@ resources: controller: true domain: hmc.mirantis.com group: hmc.mirantis.com - kind: ManagedCluster + kind: ClusterDeployment path: github.com/Mirantis/hmc/api/v1alpha1 version: v1alpha1 - api: diff --git a/README.md b/README.md index 987d7566f..ccf01b249 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,7 @@ If you want to deploy hosted control plane template, make sure to check additional notes on Hosted control plane in 2A Docs, see [Documentation](#documentation). -2. Create the file with the `ManagedCluster` configuration: +2. Create the file with the `ClusterDeployment` configuration: > [!NOTE] > Substitute the parameters enclosed in angle brackets with the corresponding @@ -140,7 +140,7 @@ additional notes on Hosted control plane in 2A Docs, see ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: namespace: @@ -152,46 +152,46 @@ spec: ``` -3. Create the `ManagedCluster` object: +3. Create the `ClusterDeployment` object: -`kubectl create -f managedcluster.yaml` +`kubectl create -f ClusterDeployment.yaml` -4. Check the status of the newly created `ManagedCluster` object: +4. Check the status of the newly created `ClusterDeployment` object: -`kubectl -n get managedcluster -o=yaml` +`kubectl -n get ClusterDeployment -o=yaml` 5. Wait for infrastructure to be provisioned and the cluster to be deployed (the provisioning starts only when `spec.dryRun` is disabled): ```bash -kubectl -n get cluster -o=yaml +kubectl -n get cluster -o=yaml ``` > [!NOTE] > You may also watch the process with the `clusterctl describe` command > (requires the `clusterctl` CLI to be installed): ``` clusterctl describe -> cluster -n --show-conditions +> cluster -n --show-conditions > all ``` 6. Retrieve the `kubeconfig` of your managed cluster: ``` -kubectl get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig +kubectl get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig ``` ### Dry run -HMC `ManagedCluster` supports two modes: with and without (default) `dryRun`. +HMC `ClusterDeployment` supports two modes: with and without (default) `dryRun`. -If no configuration (`spec.config`) provided, the `ManagedCluster` object will +If no configuration (`spec.config`) provided, the `ClusterDeployment` object will be populated with defaults (default configuration can be found in the corresponding `Template` status) and automatically marked as `dryRun`. -Here is an example of the `ManagedCluster` object with default configuration: +Here is an example of the `ClusterDeployment` object with default configuration: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: namespace: @@ -226,11 +226,11 @@ After you adjust your configuration and ensure that it passes validation (`TemplateReady` condition from `status.conditions`), remove the `spec.dryRun` flag to proceed with the deployment. -Here is an example of a `ManagedCluster` object that passed the validation: +Here is an example of a `ClusterDeployment` object that passed the validation: ```yaml apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: aws-standalone namespace: hmc-system @@ -259,7 +259,7 @@ spec: status: "True" type: HelmChartReady - lastTransitionTime: "2024-07-22T09:25:49Z" - message: ManagedCluster is ready + message: ClusterDeployment is ready reason: Succeeded status: "True" type: Ready @@ -275,7 +275,7 @@ kubectl delete management.hmc hmc ``` > [!NOTE] -> Make sure you have no HMC ManagedCluster objects left in the cluster prior to +> Make sure you have no HMC ClusterDeployment objects left in the cluster prior to > Management deletion 2. Remove the `hmc` Helm release: diff --git a/api/v1alpha1/indexers.go b/api/v1alpha1/indexers.go index 684dfce46..f1d87733e 100644 --- a/api/v1alpha1/indexers.go +++ b/api/v1alpha1/indexers.go @@ -25,9 +25,9 @@ import ( func SetupIndexers(ctx context.Context, mgr ctrl.Manager) error { var merr error for _, f := range []func(context.Context, ctrl.Manager) error{ - setupManagedClusterIndexer, - setupManagedClusterServicesIndexer, - setupManagedClusterCredentialIndexer, + setupClusterDeploymentIndexer, + setupClusterDeploymentServicesIndexer, + setupClusterDeploymentCredentialIndexer, setupReleaseVersionIndexer, setupReleaseTemplatesIndexer, setupClusterTemplateChainIndexer, @@ -44,17 +44,17 @@ func SetupIndexers(ctx context.Context, mgr ctrl.Manager) error { // managed cluster -// ManagedClusterTemplateIndexKey indexer field name to extract ClusterTemplate name reference from a ManagedCluster object. -const ManagedClusterTemplateIndexKey = ".spec.template" +// ClusterDeploymentTemplateIndexKey indexer field name to extract ClusterTemplate name reference from a ClusterDeployment object. +const ClusterDeploymentTemplateIndexKey = ".spec.template" -func setupManagedClusterIndexer(ctx context.Context, mgr ctrl.Manager) error { - return mgr.GetFieldIndexer().IndexField(ctx, &ManagedCluster{}, ManagedClusterTemplateIndexKey, ExtractTemplateNameFromManagedCluster) +func setupClusterDeploymentIndexer(ctx context.Context, mgr ctrl.Manager) error { + return mgr.GetFieldIndexer().IndexField(ctx, &ClusterDeployment{}, ClusterDeploymentTemplateIndexKey, ExtractTemplateNameFromClusterDeployment) } -// ExtractTemplateNameFromManagedCluster returns referenced ClusterTemplate name -// declared in a ManagedCluster object. -func ExtractTemplateNameFromManagedCluster(rawObj client.Object) []string { - cluster, ok := rawObj.(*ManagedCluster) +// ExtractTemplateNameFromClusterDeployment returns referenced ClusterTemplate name +// declared in a ClusterDeployment object. +func ExtractTemplateNameFromClusterDeployment(rawObj client.Object) []string { + cluster, ok := rawObj.(*ClusterDeployment) if !ok { return nil } @@ -62,17 +62,17 @@ func ExtractTemplateNameFromManagedCluster(rawObj client.Object) []string { return []string{cluster.Spec.Template} } -// ManagedClusterServiceTemplatesIndexKey indexer field name to extract service templates names from a ManagedCluster object. -const ManagedClusterServiceTemplatesIndexKey = ".spec.services[].Template" +// ClusterDeploymentServiceTemplatesIndexKey indexer field name to extract service templates names from a ClusterDeployment object. +const ClusterDeploymentServiceTemplatesIndexKey = ".spec.services[].Template" -func setupManagedClusterServicesIndexer(ctx context.Context, mgr ctrl.Manager) error { - return mgr.GetFieldIndexer().IndexField(ctx, &ManagedCluster{}, ManagedClusterServiceTemplatesIndexKey, ExtractServiceTemplateNamesFromManagedCluster) +func setupClusterDeploymentServicesIndexer(ctx context.Context, mgr ctrl.Manager) error { + return mgr.GetFieldIndexer().IndexField(ctx, &ClusterDeployment{}, ClusterDeploymentServiceTemplatesIndexKey, ExtractServiceTemplateNamesFromClusterDeployment) } -// ExtractServiceTemplateNamesFromManagedCluster returns a list of service templates names -// declared in a ManagedCluster object. -func ExtractServiceTemplateNamesFromManagedCluster(rawObj client.Object) []string { - cluster, ok := rawObj.(*ManagedCluster) +// ExtractServiceTemplateNamesFromClusterDeployment returns a list of service templates names +// declared in a ClusterDeployment object. +func ExtractServiceTemplateNamesFromClusterDeployment(rawObj client.Object) []string { + cluster, ok := rawObj.(*ClusterDeployment) if !ok { return nil } @@ -85,17 +85,17 @@ func ExtractServiceTemplateNamesFromManagedCluster(rawObj client.Object) []strin return templates } -// ManagedClusterCredentialIndexKey indexer field name to extract Credential name reference from a ManagedCluster object. -const ManagedClusterCredentialIndexKey = ".spec.credential" +// ClusterDeploymentCredentialIndexKey indexer field name to extract Credential name reference from a ClusterDeployment object. +const ClusterDeploymentCredentialIndexKey = ".spec.credential" -func setupManagedClusterCredentialIndexer(ctx context.Context, mgr ctrl.Manager) error { - return mgr.GetFieldIndexer().IndexField(ctx, &ManagedCluster{}, ManagedClusterCredentialIndexKey, ExtractCredentialNameFromManagedCluster) +func setupClusterDeploymentCredentialIndexer(ctx context.Context, mgr ctrl.Manager) error { + return mgr.GetFieldIndexer().IndexField(ctx, &ClusterDeployment{}, ClusterDeploymentCredentialIndexKey, ExtractCredentialNameFromClusterDeployment) } -// ExtractCredentialNameFromManagedCluster returns referenced Credential name -// declared in a ManagedCluster object. -func ExtractCredentialNameFromManagedCluster(rawObj client.Object) []string { - cluster, ok := rawObj.(*ManagedCluster) +// ExtractCredentialNameFromClusterDeployment returns referenced Credential name +// declared in a ClusterDeployment object. +func ExtractCredentialNameFromClusterDeployment(rawObj client.Object) []string { + cluster, ok := rawObj.(*ClusterDeployment) if !ok { return nil } diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 404a6de15..9e1434f03 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -22,8 +22,8 @@ import ( ) const ( - BlockingFinalizer = "hmc.mirantis.com/cleanup" - ManagedClusterFinalizer = "hmc.mirantis.com/managed-cluster" + BlockingFinalizer = "hmc.mirantis.com/cleanup" + ClusterDeploymentFinalizer = "hmc.mirantis.com/cluster-deployment" FluxHelmChartNameKey = "helm.toolkit.fluxcd.io/name" FluxHelmChartNamespaceKey = "helm.toolkit.fluxcd.io/namespace" @@ -35,20 +35,20 @@ const ( ) const ( - // ManagedClusterKind is the string representation of a ManagedCluster. - ManagedClusterKind = "ManagedCluster" + // ClusterDeploymentKind is the string representation of a ClusterDeployment. + ClusterDeploymentKind = "ClusterDeployment" // TemplateReadyCondition indicates the referenced Template exists and valid. TemplateReadyCondition = "TemplateReady" // HelmChartReadyCondition indicates the corresponding HelmChart is valid and ready. HelmChartReadyCondition = "HelmChartReady" // HelmReleaseReadyCondition indicates the corresponding HelmRelease is ready and fully reconciled. HelmReleaseReadyCondition = "HelmReleaseReady" - // ReadyCondition indicates the ManagedCluster is ready and fully reconciled. + // ReadyCondition indicates the ClusterDeployment is ready and fully reconciled. ReadyCondition string = "Ready" ) -// ManagedClusterSpec defines the desired state of ManagedCluster -type ManagedClusterSpec struct { +// ClusterDeploymentSpec defines the desired state of ClusterDeployment +type ClusterDeploymentSpec struct { // Config allows to provide parameters for template customization. // If no Config provided, the field will be populated with the default values for // the template and DryRun will be enabled. @@ -86,14 +86,14 @@ type ManagedClusterSpec struct { StopOnConflict bool `json:"stopOnConflict,omitempty"` } -// ManagedClusterStatus defines the observed state of ManagedCluster -type ManagedClusterStatus struct { +// ClusterDeploymentStatus defines the observed state of ClusterDeployment +type ClusterDeploymentStatus struct { // Services contains details for the state of services. Services []ServiceStatus `json:"services,omitempty"` // Currently compatible exact Kubernetes version of the cluster. Being set only if // provided by the corresponding ClusterTemplate. KubernetesVersion string `json:"k8sVersion,omitempty"` - // Conditions contains details for the current state of the ManagedCluster. + // Conditions contains details for the current state of the ClusterDeployment. Conditions []metav1.Condition `json:"conditions,omitempty"` // AvailableUpgrades is the list of ClusterTemplate names to which @@ -111,27 +111,27 @@ type ManagedClusterStatus struct { // +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Status",priority=0 // +kubebuilder:printcolumn:name="dryRun",type="string",JSONPath=".spec.dryRun",description="Dry Run",priority=1 -// ManagedCluster is the Schema for the managedclusters API -type ManagedCluster struct { +// ClusterDeployment is the Schema for the ClusterDeployments API +type ClusterDeployment struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ManagedClusterSpec `json:"spec,omitempty"` - Status ManagedClusterStatus `json:"status,omitempty"` + Spec ClusterDeploymentSpec `json:"spec,omitempty"` + Status ClusterDeploymentStatus `json:"status,omitempty"` } -func (in *ManagedCluster) HelmValues() (values map[string]any, err error) { +func (in *ClusterDeployment) HelmValues() (values map[string]any, err error) { if in.Spec.Config != nil { err = yaml.Unmarshal(in.Spec.Config.Raw, &values) } return values, err } -func (in *ManagedCluster) GetConditions() *[]metav1.Condition { +func (in *ClusterDeployment) GetConditions() *[]metav1.Condition { return &in.Status.Conditions } -func (in *ManagedCluster) InitConditions() { +func (in *ClusterDeployment) InitConditions() { apimeta.SetStatusCondition(in.GetConditions(), metav1.Condition{ Type: TemplateReadyCondition, Status: metav1.ConditionUnknown, @@ -156,19 +156,19 @@ func (in *ManagedCluster) InitConditions() { Type: ReadyCondition, Status: metav1.ConditionUnknown, Reason: ProgressingReason, - Message: "ManagedCluster is not yet ready", + Message: "ClusterDeployment is not yet ready", }) } // +kubebuilder:object:root=true -// ManagedClusterList contains a list of ManagedCluster -type ManagedClusterList struct { +// ClusterDeploymentList contains a list of ClusterDeployment +type ClusterDeploymentList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []ManagedCluster `json:"items"` + Items []ClusterDeployment `json:"items"` } func init() { - SchemeBuilder.Register(&ManagedCluster{}, &ManagedClusterList{}) + SchemeBuilder.Register(&ClusterDeployment{}, &ClusterDeploymentList{}) } diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go index 6bbcb5404..d892554ba 100644 --- a/api/v1alpha1/templates_common.go +++ b/api/v1alpha1/templates_common.go @@ -56,7 +56,7 @@ func (s *HelmSpec) String() string { // TemplateStatusCommon defines the observed state of Template common for all Template types type TemplateStatusCommon struct { // Config demonstrates available parameters for template customization, - // that can be used when creating ManagedCluster objects. + // that can be used when creating ClusterDeployment objects. Config *apiextensionsv1.JSON `json:"config,omitempty"` // ChartRef is a reference to a source controller resource containing the // Helm chart representing the template. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 48fbe8536..ffacfee28 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -175,6 +175,124 @@ func (in *AvailableUpgrade) DeepCopy() *AvailableUpgrade { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeployment) DeepCopyInto(out *ClusterDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeployment. +func (in *ClusterDeployment) DeepCopy() *ClusterDeployment { + if in == nil { + return nil + } + out := new(ClusterDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentList. +func (in *ClusterDeploymentList) DeepCopy() *ClusterDeploymentList { + if in == nil { + return nil + } + out := new(ClusterDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentSpec) DeepCopyInto(out *ClusterDeploymentSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(apiextensionsv1.JSON) + (*in).DeepCopyInto(*out) + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServiceSpec, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentSpec. +func (in *ClusterDeploymentSpec) DeepCopy() *ClusterDeploymentSpec { + if in == nil { + return nil + } + out := new(ClusterDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterDeploymentStatus) DeepCopyInto(out *ClusterDeploymentStatus) { + *out = *in + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]ServiceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpgrades != nil { + in, out := &in.AvailableUpgrades, &out.AvailableUpgrades + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentStatus. +func (in *ClusterDeploymentStatus) DeepCopy() *ClusterDeploymentStatus { + if in == nil { + return nil + } + out := new(ClusterDeploymentStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterTemplate) DeepCopyInto(out *ClusterTemplate) { *out = *in @@ -557,124 +675,6 @@ func (in *HelmSpec) DeepCopy() *HelmSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedCluster) DeepCopyInto(out *ManagedCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedCluster. -func (in *ManagedCluster) DeepCopy() *ManagedCluster { - if in == nil { - return nil - } - out := new(ManagedCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterList) DeepCopyInto(out *ManagedClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ManagedCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterList. -func (in *ManagedClusterList) DeepCopy() *ManagedClusterList { - if in == nil { - return nil - } - out := new(ManagedClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ManagedClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterSpec) DeepCopyInto(out *ManagedClusterSpec) { - *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(apiextensionsv1.JSON) - (*in).DeepCopyInto(*out) - } - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServiceSpec, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterSpec. -func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { - if in == nil { - return nil - } - out := new(ManagedClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { - *out = *in - if in.Services != nil { - in, out := &in.Services, &out.Services - *out = make([]ServiceStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AvailableUpgrades != nil { - in, out := &in.AvailableUpgrades, &out.AvailableUpgrades - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterStatus. -func (in *ManagedClusterStatus) DeepCopy() *ManagedClusterStatus { - if in == nil { - return nil - } - out := new(ManagedClusterStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Management) DeepCopyInto(out *Management) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index a6e1aa80c..bd165e50d 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -213,13 +213,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "ProviderTemplate") os.Exit(1) } - if err = (&controller.ManagedClusterReconciler{ + if err = (&controller.ClusterDeploymentReconciler{ Client: mgr.GetClient(), Config: mgr.GetConfig(), DynamicClient: dc, SystemNamespace: currentNamespace, }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ManagedCluster") + setupLog.Error(err, "unable to create controller", "controller", "ClusterDeployment") os.Exit(1) } if err = (&controller.ManagementReconciler{ @@ -328,8 +328,8 @@ func main() { } func setupWebhooks(mgr ctrl.Manager, currentNamespace string) error { - if err := (&hmcwebhook.ManagedClusterValidator{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "ManagedCluster") + if err := (&hmcwebhook.ClusterDeploymentValidator{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ClusterDeployment") return err } if err := (&hmcwebhook.MultiClusterServiceValidator{SystemNamespace: currentNamespace}).SetupWebhookWithManager(mgr); err != nil { diff --git a/config/dev/adopted-clusterdeployment.yaml b/config/dev/adopted-clusterdeployment.yaml new file mode 100644 index 000000000..36afd93aa --- /dev/null +++ b/config/dev/adopted-clusterdeployment.yaml @@ -0,0 +1,16 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ClusterDeployment +metadata: + name: aws-dev + namespace: ${NAMESPACE} +spec: + template: adopted-cluster-0-0-4 + credential: adopted-cluster-cred + config: {} + services: + - template: kyverno-3-2-6 + name: kyverno + namespace: kyverno + - template: ingress-nginx-4-11-0 + name: ingress-nginx + namespace: ingress-nginx \ No newline at end of file diff --git a/config/dev/adopted-credentials.yaml b/config/dev/adopted-credentials.yaml new file mode 100644 index 000000000..bdd68f0bf --- /dev/null +++ b/config/dev/adopted-credentials.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +data: + value: ${KUBECONFIG_DATA} +kind: Secret +metadata: + name: adopted-cluster-kubeconf + namespace: ${NAMESPACE} +type: Opaque +--- +apiVersion: hmc.mirantis.com/v1alpha1 +kind: Credential +metadata: + name: adopted-cluster-cred + namespace: ${NAMESPACE} +spec: + description: Adopted Credentials + identityRef: + apiVersion: v1 + kind: Secret + name: adopted-cluster-kubeconf + namespace: ${NAMESPACE} \ No newline at end of file diff --git a/config/dev/aws-cloud-nuke.yaml.tpl b/config/dev/aws-cloud-nuke.yaml.tpl index 378bf20c5..36041093f 100644 --- a/config/dev/aws-cloud-nuke.yaml.tpl +++ b/config/dev/aws-cloud-nuke.yaml.tpl @@ -1,13 +1,13 @@ # This config file is used by cloud-nuke to clean up named resources associated # with a specific managed cluster across an AWS account. CLUSTER_NAME is -# typically the metadata.name of the ManagedCluster. +# typically the metadata.name of the ClusterDeployment. # The resources listed here are ALL of the potential resources that can be # filtered by cloud-nuke, except for IAM resources since we'll never touch those. # See: https://github.com/gruntwork-io/cloud-nuke?tab=readme-ov-file#whats-supported # # Usage: # - 'CLUSTER_NAME=foo make dev-aws-nuke' will nuke resources affiliated with an AWS cluster named 'foo' -# Check cluster names with 'kubectl get managedcluster.hmc.mirantis.com -n hmc-system' +# Check cluster names with 'kubectl get clusterdeployment.hmc.mirantis.com -n hmc-system' ACM: include: diff --git a/config/dev/aws-managedcluster.yaml b/config/dev/aws-clusterdeployment.yaml similarity index 93% rename from config/dev/aws-managedcluster.yaml rename to config/dev/aws-clusterdeployment.yaml index 0458923dc..ce7ca988b 100644 --- a/config/dev/aws-managedcluster.yaml +++ b/config/dev/aws-clusterdeployment.yaml @@ -1,5 +1,5 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: aws-dev namespace: ${NAMESPACE} diff --git a/config/dev/azure-cloud-nuke.yaml.tpl b/config/dev/azure-cloud-nuke.yaml.tpl index be98b051f..86bd85d08 100644 --- a/config/dev/azure-cloud-nuke.yaml.tpl +++ b/config/dev/azure-cloud-nuke.yaml.tpl @@ -1,12 +1,12 @@ # This config file is used by azure-nuke to clean up named resources associated # with a specific managed cluster across an Azure account. CLUSTER_NAME is -# typically the metadata.name of the ManagedCluster. -# This will nuke the ResourceGroup affiliated with the ManagedCluster. +# typically the metadata.name of the ClusterDeployment. +# This will nuke the ResourceGroup affiliated with the ClusterDeployment. # # Usage: # 'CLUSTER_NAME=foo AZURE_REGION=westus3 AZURE_TENANT_ID=12345 make dev-azure-nuke' # -# Check cluster names with 'kubectl get managedcluster.hmc.mirantis.com -n hmc-system' +# Check cluster names with 'kubectl get clusterdeployment.hmc.mirantis.com -n hmc-system' regions: - global diff --git a/config/dev/azure-managedcluster.yaml b/config/dev/azure-clusterdeployment.yaml similarity index 95% rename from config/dev/azure-managedcluster.yaml rename to config/dev/azure-clusterdeployment.yaml index 830b47553..8264422f6 100644 --- a/config/dev/azure-managedcluster.yaml +++ b/config/dev/azure-clusterdeployment.yaml @@ -1,5 +1,5 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: azure-dev namespace: ${NAMESPACE} diff --git a/config/dev/eks-managedcluster.yaml b/config/dev/eks-clusterdeployment.yaml similarity index 90% rename from config/dev/eks-managedcluster.yaml rename to config/dev/eks-clusterdeployment.yaml index d17cc48d9..8973e7ffd 100644 --- a/config/dev/eks-managedcluster.yaml +++ b/config/dev/eks-clusterdeployment.yaml @@ -1,5 +1,5 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: eks-dev namespace: ${NAMESPACE} diff --git a/config/dev/vsphere-managedcluster.yaml b/config/dev/vsphere-clusterdeployment.yaml similarity index 97% rename from config/dev/vsphere-managedcluster.yaml rename to config/dev/vsphere-clusterdeployment.yaml index 48ceae711..833c7272a 100644 --- a/config/dev/vsphere-managedcluster.yaml +++ b/config/dev/vsphere-clusterdeployment.yaml @@ -1,5 +1,5 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: name: vsphere-dev namespace: ${NAMESPACE} diff --git a/docs/dev.md b/docs/dev.md index 1484eebfc..e96b629ae 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -78,6 +78,15 @@ To properly deploy dev cluster you need to have the following variable set: - `DEV_PROVIDER` - should be "eks" +- The rest of deployment procedure is the same as for other providers. + +### Adopted Cluster Setup + +To "adopt" an existing cluster first obtain the kubeconfig file for the cluster. +Then set the `DEV_PROVIDER` to "adopted". Export the kubeconfig file as a variable by running the following: + +`export KUBECONFIG_DATA=$(cat kubeconfig | base64 -w 0)` + The rest of deployment procedure is the same as for other providers. ## Deploy HMC @@ -87,9 +96,9 @@ another provider change `DEV_PROVIDER` variable with the name of provider before running make (e.g. `export DEV_PROVIDER=azure`). 1. Configure your cluster parameters in provider specific file - (for example `config/dev/aws-managedcluster.yaml` in case of AWS): + (for example `config/dev/aws-clusterDeployment.yaml` in case of AWS): - * Configure the `name` of the ManagedCluster + * Configure the `name` of the ClusterDeployment * Change instance type or size for control plane and worker machines * Specify the number of control plane and worker machines, etc @@ -108,7 +117,7 @@ running make (e.g. `export DEV_PROVIDER=azure`). ```bash export KUBECONFIG=~/.kube/config -./bin/clusterctl describe cluster -n hmc-system --show-conditions all +./bin/clusterctl describe cluster -n hmc-system --show-conditions all ``` > [!NOTE] @@ -122,7 +131,7 @@ export KUBECONFIG=~/.kube/config 7. Retrieve the `kubeconfig` of your managed cluster: ```bash -kubectl --kubeconfig ~/.kube/config get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig +kubectl --kubeconfig ~/.kube/config get secret -n hmc-system -kubeconfig -o=jsonpath={.data.value} | base64 -d > kubeconfig ``` ## Running E2E tests locally @@ -140,13 +149,13 @@ IMG="ghcr.io/mirantis/hmc/controller-ci:v0.0.1-179-ga5bdf29" \ Optionally, the `NO_CLEANUP=1` env var can be used to disable `After` nodes from running within some specs, this will allow users to debug tests by re-running them without the need to wait a while for an infrastructure deployment to occur. -For subsequent runs the `MANAGED_CLUSTER_NAME=` env var should be +For subsequent runs the `CLUSTER_DEPLOYMENT_NAME=` env var should be passed to tell the test what cluster name to use so that it does not try to generate a new name and deploy a new cluster. Tests that run locally use autogenerated names like `12345678-e2e-test` while tests that run in CI use names such as `ci-1234567890-e2e-test`. You can always -pass `MANAGED_CLUSTER_NAME=` from the get-go to customize the name used by the +pass `CLUSTER_DEPLOYMENT_NAME=` from the get-go to customize the name used by the test. ### Filtering test runs diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/clusterdeployment_controller.go similarity index 75% rename from internal/controller/managedcluster_controller.go rename to internal/controller/clusterdeployment_controller.go index 3df786aef..44205e297 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/clusterdeployment_controller.go @@ -59,8 +59,8 @@ const ( DefaultRequeueInterval = 10 * time.Second ) -// ManagedClusterReconciler reconciles a ManagedCluster object -type ManagedClusterReconciler struct { +// ClusterDeploymentReconciler reconciles a ClusterDeployment object +type ClusterDeploymentReconciler struct { client.Client Config *rest.Config DynamicClient *dynamic.DynamicClient @@ -69,46 +69,48 @@ type ManagedClusterReconciler struct { // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *ClusterDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) - l.Info("Reconciling ManagedCluster") + l.Info("Reconciling ClusterDeployment") - managedCluster := &hmc.ManagedCluster{} - if err := r.Get(ctx, req.NamespacedName, managedCluster); err != nil { + clusterDeployment := &hmc.ClusterDeployment{} + if err := r.Get(ctx, req.NamespacedName, clusterDeployment); err != nil { if apierrors.IsNotFound(err) { - l.Info("ManagedCluster not found, ignoring since object must be deleted") + l.Info("ClusterDeployment not found, ignoring since object must be deleted") return ctrl.Result{}, nil } - l.Error(err, "Failed to get ManagedCluster") + l.Error(err, "Failed to get ClusterDeployment") return ctrl.Result{}, err } - if !managedCluster.DeletionTimestamp.IsZero() { - l.Info("Deleting ManagedCluster") - return r.Delete(ctx, managedCluster) + if !clusterDeployment.DeletionTimestamp.IsZero() { + l.Info("Deleting ClusterDeployment") + return r.Delete(ctx, clusterDeployment) } - if managedCluster.Status.ObservedGeneration == 0 { + if clusterDeployment.Status.ObservedGeneration == 0 { mgmt := &hmc.Management{} mgmtRef := client.ObjectKey{Name: hmc.ManagementName} if err := r.Get(ctx, mgmtRef, mgmt); err != nil { l.Error(err, "Failed to get Management object") return ctrl.Result{}, err } - if err := telemetry.TrackManagedClusterCreate(string(mgmt.UID), string(managedCluster.UID), managedCluster.Spec.Template, managedCluster.Spec.DryRun); err != nil { - l.Error(err, "Failed to track ManagedCluster creation") + if err := telemetry.TrackClusterDeploymentCreate(string(mgmt.UID), string(clusterDeployment.UID), clusterDeployment.Spec.Template, clusterDeployment.Spec.DryRun); err != nil { + l.Error(err, "Failed to track ClusterDeployment creation") } } - return r.reconcileUpdate(ctx, managedCluster) + return r.reconcileUpdate(ctx, clusterDeployment) } -func (r *ManagedClusterReconciler) setStatusFromChildObjects(ctx context.Context, managedCluster *hmc.ManagedCluster, gvr schema.GroupVersionResource, conditions []string) (requeue bool, _ error) { +func (r *ClusterDeploymentReconciler) setStatusFromChildObjects( + ctx context.Context, clusterDeployment *hmc.ClusterDeployment, gvr schema.GroupVersionResource, conditions []string, +) (requeue bool, _ error) { l := ctrl.LoggerFrom(ctx) - resourceConditions, err := status.GetResourceConditions(ctx, managedCluster.Namespace, r.DynamicClient, gvr, - labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: managedCluster.Name}).String()) + resourceConditions, err := status.GetResourceConditions(ctx, clusterDeployment.Namespace, r.DynamicClient, gvr, + labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: clusterDeployment.Name}).String()) if err != nil { if errors.As(err, &status.ResourceNotFoundError{}) { l.Info(err.Error()) @@ -129,19 +131,19 @@ func (r *ManagedClusterReconciler) setStatusFromChildObjects(ctx context.Context metaCondition.Message += " is Ready" metaCondition.Reason = "Succeeded" } - apimeta.SetStatusCondition(managedCluster.GetConditions(), metaCondition) + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metaCondition) } } return !allConditionsComplete, nil } -func (r *ManagedClusterReconciler) reconcileUpdate(ctx context.Context, mc *hmc.ManagedCluster) (_ ctrl.Result, err error) { +func (r *ClusterDeploymentReconciler) reconcileUpdate(ctx context.Context, mc *hmc.ClusterDeployment) (_ ctrl.Result, err error) { l := ctrl.LoggerFrom(ctx) - if controllerutil.AddFinalizer(mc, hmc.ManagedClusterFinalizer) { + if controllerutil.AddFinalizer(mc, hmc.ClusterDeploymentFinalizer) { if err := r.Client.Update(ctx, mc); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update managedCluster %s/%s: %w", mc.Namespace, mc.Name, err) + return ctrl.Result{}, fmt.Errorf("failed to update clusterDeployment %s/%s: %w", mc.Namespace, mc.Name, err) } return ctrl.Result{}, nil } @@ -187,7 +189,7 @@ func (r *ManagedClusterReconciler) reconcileUpdate(ctx context.Context, mc *hmc. return ctrl.Result{}, nil } -func (r *ManagedClusterReconciler) updateCluster(ctx context.Context, mc *hmc.ManagedCluster, clusterTpl *hmc.ClusterTemplate) (ctrl.Result, error) { +func (r *ClusterDeploymentReconciler) updateCluster(ctx context.Context, mc *hmc.ClusterDeployment, clusterTpl *hmc.ClusterTemplate) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) if clusterTpl == nil { @@ -306,7 +308,7 @@ func (r *ManagedClusterReconciler) updateCluster(ctx context.Context, mc *hmc.Ma Values: helmValues, OwnerReference: &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), - Kind: hmc.ManagedClusterKind, + Kind: hmc.ClusterDeploymentKind, Name: mc.Name, UID: mc.UID, }, @@ -357,7 +359,7 @@ func (r *ManagedClusterReconciler) updateCluster(ctx context.Context, mc *hmc.Ma return ctrl.Result{}, nil } -func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context, managedCluster *hmc.ManagedCluster) (requeue bool, _ error) { +func (r *ClusterDeploymentReconciler) aggregateCapoConditions(ctx context.Context, clusterDeployment *hmc.ClusterDeployment) (requeue bool, _ error) { type objectToCheck struct { gvr schema.GroupVersionResource conditions []string @@ -382,7 +384,7 @@ func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context, conditions: []string{"Available"}, }, } { - needRequeue, err := r.setStatusFromChildObjects(ctx, managedCluster, obj.gvr, obj.conditions) + needRequeue, err := r.setStatusFromChildObjects(ctx, clusterDeployment, obj.gvr, obj.conditions) errs = errors.Join(errs, err) if needRequeue { requeue = true @@ -392,8 +394,8 @@ func (r *ManagedClusterReconciler) aggregateCapoConditions(ctx context.Context, return requeue, errs } -// updateServices reconciles services provided in ManagedCluster.Spec.Services. -func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (_ ctrl.Result, err error) { +// updateServices reconciles services provided in ClusterDeployment.Spec.Services. +func (r *ClusterDeploymentReconciler) updateServices(ctx context.Context, mc *hmc.ClusterDeployment) (_ ctrl.Result, err error) { l := ctrl.LoggerFrom(ctx) l.Info("Reconciling Services") @@ -439,7 +441,7 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M sveltos.ReconcileProfileOpts{ OwnerReference: &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), - Kind: hmc.ManagedClusterKind, + Kind: hmc.ClusterDeploymentKind, Name: mc.Name, UID: mc.UID, }, @@ -479,14 +481,14 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M return ctrl.Result{}, nil } -func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error { +func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, clusterDeployment *hmc.ClusterDeployment, hcChart *chart.Chart) error { install := action.NewInstall(actionConfig) install.DryRun = true - install.ReleaseName = managedCluster.Name - install.Namespace = managedCluster.Namespace + install.ReleaseName = clusterDeployment.Name + install.Namespace = clusterDeployment.Namespace install.ClientOnly = true - vals, err := managedCluster.HelmValues() + vals, err := clusterDeployment.HelmValues() if err != nil { return err } @@ -497,23 +499,23 @@ func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configu return nil } -// updateStatus updates the status for the ManagedCluster object. -func (r *ManagedClusterReconciler) updateStatus(ctx context.Context, managedCluster *hmc.ManagedCluster, template *hmc.ClusterTemplate) error { - managedCluster.Status.ObservedGeneration = managedCluster.Generation - managedCluster.Status.Conditions = updateStatusConditions(managedCluster.Status.Conditions, "ManagedCluster is ready") +// updateStatus updates the status for the ClusterDeployment object. +func (r *ClusterDeploymentReconciler) updateStatus(ctx context.Context, clusterDeployment *hmc.ClusterDeployment, template *hmc.ClusterTemplate) error { + clusterDeployment.Status.ObservedGeneration = clusterDeployment.Generation + clusterDeployment.Status.Conditions = updateStatusConditions(clusterDeployment.Status.Conditions, "ClusterDeployment is ready") - if err := r.setAvailableUpgrades(ctx, managedCluster, template); err != nil { + if err := r.setAvailableUpgrades(ctx, clusterDeployment, template); err != nil { return errors.New("failed to set available upgrades") } - if err := r.Status().Update(ctx, managedCluster); err != nil { - return fmt.Errorf("failed to update status for managedCluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) + if err := r.Status().Update(ctx, clusterDeployment); err != nil { + return fmt.Errorf("failed to update status for clusterDeployment %s/%s: %w", clusterDeployment.Namespace, clusterDeployment.Name, err) } return nil } -func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.CrossNamespaceSourceReference) (sourcev1.Source, error) { +func (r *ClusterDeploymentReconciler) getSource(ctx context.Context, ref *hcv2.CrossNamespaceSourceReference) (sourcev1.Source, error) { if ref == nil { return nil, errors.New("helm chart source is not provided") } @@ -525,27 +527,27 @@ func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.Cros return &hc, nil } -func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { +func (r *ClusterDeploymentReconciler) Delete(ctx context.Context, clusterDeployment *hmc.ClusterDeployment) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) hr := &hcv2.HelmRelease{} - if err := r.Get(ctx, client.ObjectKeyFromObject(managedCluster), hr); err != nil { + if err := r.Get(ctx, client.ObjectKeyFromObject(clusterDeployment), hr); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, err } - l.Info("Removing Finalizer", "finalizer", hmc.ManagedClusterFinalizer) - if controllerutil.RemoveFinalizer(managedCluster, hmc.ManagedClusterFinalizer) { - if err := r.Client.Update(ctx, managedCluster); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update managedCluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) + l.Info("Removing Finalizer", "finalizer", hmc.ClusterDeploymentFinalizer) + if controllerutil.RemoveFinalizer(clusterDeployment, hmc.ClusterDeploymentFinalizer) { + if err := r.Client.Update(ctx, clusterDeployment); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update clusterDeployment %s/%s: %w", clusterDeployment.Namespace, clusterDeployment.Name, err) } } - l.Info("ManagedCluster deleted") + l.Info("ClusterDeployment deleted") return ctrl.Result{}, nil } - if err := helm.DeleteHelmRelease(ctx, r.Client, managedCluster.Name, managedCluster.Namespace); err != nil { + if err := helm.DeleteHelmRelease(ctx, r.Client, clusterDeployment.Name, clusterDeployment.Namespace); err != nil { return ctrl.Result{}, err } @@ -554,11 +556,11 @@ func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *h // It is detailed in https://github.com/projectsveltos/addon-controller/issues/732. // We may try to remove the explicit call to Delete once a fix for it has been merged. // TODO(https://github.com/Mirantis/hmc/issues/526). - if err := sveltos.DeleteProfile(ctx, r.Client, managedCluster.Namespace, managedCluster.Name); err != nil { + if err := sveltos.DeleteProfile(ctx, r.Client, clusterDeployment.Namespace, clusterDeployment.Name); err != nil { return ctrl.Result{}, err } - if err := r.releaseCluster(ctx, managedCluster.Namespace, managedCluster.Name, managedCluster.Spec.Template); err != nil { + if err := r.releaseCluster(ctx, clusterDeployment.Namespace, clusterDeployment.Name, clusterDeployment.Spec.Template); err != nil { return ctrl.Result{}, err } @@ -566,7 +568,7 @@ func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *h return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } -func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace, name, templateName string) error { +func (r *ClusterDeploymentReconciler) releaseCluster(ctx context.Context, namespace, name, templateName string) error { providers, err := r.getInfraProvidersNames(ctx, namespace, templateName) if err != nil { return err @@ -626,7 +628,7 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace return nil } -func (r *ManagedClusterReconciler) getInfraProvidersNames(ctx context.Context, templateNamespace, templateName string) ([]string, error) { +func (r *ClusterDeploymentReconciler) getInfraProvidersNames(ctx context.Context, templateNamespace, templateName string) ([]string, error) { template := &hmc.ClusterTemplate{} templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { @@ -648,7 +650,7 @@ func (r *ManagedClusterReconciler) getInfraProvidersNames(ctx context.Context, t return ips[:len(ips):len(ips)], nil } -func (r *ManagedClusterReconciler) getCluster(ctx context.Context, namespace, name string, gvk schema.GroupVersionKind) (*metav1.PartialObjectMetadata, error) { +func (r *ClusterDeploymentReconciler) getCluster(ctx context.Context, namespace, name string, gvk schema.GroupVersionKind) (*metav1.PartialObjectMetadata, error) { opts := &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}), Namespace: namespace, @@ -665,7 +667,7 @@ func (r *ManagedClusterReconciler) getCluster(ctx context.Context, namespace, na return &itemsList.Items[0], nil } -func (r *ManagedClusterReconciler) removeClusterFinalizer(ctx context.Context, cluster *metav1.PartialObjectMetadata) error { +func (r *ClusterDeploymentReconciler) removeClusterFinalizer(ctx context.Context, cluster *metav1.PartialObjectMetadata) error { originalCluster := *cluster if controllerutil.RemoveFinalizer(cluster, hmc.BlockingFinalizer) { ctrl.LoggerFrom(ctx).Info("Allow to stop cluster", "finalizer", hmc.BlockingFinalizer) @@ -677,7 +679,7 @@ func (r *ManagedClusterReconciler) removeClusterFinalizer(ctx context.Context, c return nil } -func (r *ManagedClusterReconciler) objectsAvailable(ctx context.Context, namespace, clusterName string, gvk schema.GroupVersionKind) (bool, error) { +func (r *ClusterDeploymentReconciler) objectsAvailable(ctx context.Context, namespace, clusterName string, gvk schema.GroupVersionKind) (bool, error) { opts := &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.ClusterNameLabelKey: clusterName}), Namespace: namespace, @@ -691,28 +693,28 @@ func (r *ManagedClusterReconciler) objectsAvailable(ctx context.Context, namespa return len(itemsList.Items) != 0, nil } -func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Context, managedCluster *hmc.ManagedCluster) error { +func (r *ClusterDeploymentReconciler) reconcileCredentialPropagation(ctx context.Context, clusterDeployment *hmc.ClusterDeployment) error { l := ctrl.LoggerFrom(ctx) l.Info("Reconciling CCM credentials propagation") - providers, err := r.getInfraProvidersNames(ctx, managedCluster.Namespace, managedCluster.Spec.Template) + providers, err := r.getInfraProvidersNames(ctx, clusterDeployment.Namespace, clusterDeployment.Spec.Template) if err != nil { - return fmt.Errorf("failed to get cluster providers for cluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) + return fmt.Errorf("failed to get cluster providers for cluster %s/%s: %w", clusterDeployment.Namespace, clusterDeployment.Name, err) } kubeconfSecret := &corev1.Secret{} if err := r.Client.Get(ctx, client.ObjectKey{ - Name: managedCluster.Name + "-kubeconfig", - Namespace: managedCluster.Namespace, + Name: clusterDeployment.Name + "-kubeconfig", + Namespace: clusterDeployment.Namespace, }, kubeconfSecret); err != nil { - return fmt.Errorf("failed to get kubeconfig secret for cluster %s/%s: %w", managedCluster.Namespace, managedCluster.Name, err) + return fmt.Errorf("failed to get kubeconfig secret for cluster %s/%s: %w", clusterDeployment.Namespace, clusterDeployment.Name, err) } propnCfg := &credspropagation.PropagationCfg{ - Client: r.Client, - ManagedCluster: managedCluster, - KubeconfSecret: kubeconfSecret, - SystemNamespace: r.SystemNamespace, + Client: r.Client, + ClusterDeployment: clusterDeployment, + KubeconfSecret: kubeconfSecret, + SystemNamespace: r.SystemNamespace, } for _, provider := range providers { @@ -723,7 +725,7 @@ func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Co l.Info("Azure creds propagation start") if err := credspropagation.PropagateAzureSecrets(ctx, propnCfg); err != nil { errMsg := fmt.Sprintf("failed to create Azure CCM credentials: %s", err) - apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metav1.Condition{ Type: hmc.CredentialsPropagatedCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -733,7 +735,7 @@ func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Co return errors.New(errMsg) } - apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metav1.Condition{ Type: hmc.CredentialsPropagatedCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, @@ -743,7 +745,7 @@ func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Co l.Info("vSphere creds propagation start") if err := credspropagation.PropagateVSphereSecrets(ctx, propnCfg); err != nil { errMsg := fmt.Sprintf("failed to create vSphere CCM credentials: %s", err) - apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metav1.Condition{ Type: hmc.CredentialsPropagatedCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -752,14 +754,14 @@ func (r *ManagedClusterReconciler) reconcileCredentialPropagation(ctx context.Co return errors.New(errMsg) } - apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metav1.Condition{ Type: hmc.CredentialsPropagatedCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, Message: "vSphere CCM credentials created", }) default: - apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ + apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metav1.Condition{ Type: hmc.CredentialsPropagatedCondition, Status: metav1.ConditionFalse, Reason: hmc.FailedReason, @@ -789,7 +791,7 @@ func setIdentityHelmValues(values *apiextensionsv1.JSON, idRef *corev1.ObjectRef return &apiextensionsv1.JSON{Raw: valuesRaw}, nil } -func (r *ManagedClusterReconciler) setAvailableUpgrades(ctx context.Context, managedCluster *hmc.ManagedCluster, template *hmc.ClusterTemplate) error { +func (r *ClusterDeploymentReconciler) setAvailableUpgrades(ctx context.Context, clusterDeployment *hmc.ClusterDeployment, template *hmc.ClusterTemplate) error { if template == nil { return nil } @@ -817,22 +819,22 @@ func (r *ManagedClusterReconciler) setAvailableUpgrades(ctx context.Context, man availableUpgrades = append(availableUpgrades, availableUpgrade.Name) } - managedCluster.Status.AvailableUpgrades = availableUpgrades + clusterDeployment.Status.AvailableUpgrades = availableUpgrades return nil } // SetupWithManager sets up the controller with the Manager. -func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ClusterDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&hmc.ManagedCluster{}). + For(&hmc.ClusterDeployment{}). Watches(&hcv2.HelmRelease{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []ctrl.Request { - managedClusterRef := client.ObjectKeyFromObject(o) - if err := r.Client.Get(ctx, managedClusterRef, &hmc.ManagedCluster{}); err != nil { + clusterDeploymentRef := client.ObjectKeyFromObject(o) + if err := r.Client.Get(ctx, clusterDeploymentRef, &hmc.ClusterDeployment{}); err != nil { return []ctrl.Request{} } - return []ctrl.Request{{NamespacedName: managedClusterRef}} + return []ctrl.Request{{NamespacedName: clusterDeploymentRef}} }), ). Watches(&hmc.ClusterTemplateChain{}, @@ -844,14 +846,14 @@ func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { var req []ctrl.Request for _, template := range getTemplateNamesManagedByChain(chain) { - managedClusters := &hmc.ManagedClusterList{} - err := r.Client.List(ctx, managedClusters, + clusterDeployments := &hmc.ClusterDeploymentList{} + err := r.Client.List(ctx, clusterDeployments, client.InNamespace(chain.Namespace), - client.MatchingFields{hmc.ManagedClusterTemplateIndexKey: template}) + client.MatchingFields{hmc.ClusterDeploymentTemplateIndexKey: template}) if err != nil { return []ctrl.Request{} } - for _, cluster := range managedClusters.Items { + for _, cluster := range clusterDeployments.Items { req = append(req, ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: cluster.Namespace, @@ -876,16 +878,16 @@ func (r *ManagedClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { ). Watches(&hmc.Credential{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []ctrl.Request { - managedClusters := &hmc.ManagedClusterList{} - err := r.Client.List(ctx, managedClusters, + clusterDeployments := &hmc.ClusterDeploymentList{} + err := r.Client.List(ctx, clusterDeployments, client.InNamespace(o.GetNamespace()), - client.MatchingFields{hmc.ManagedClusterCredentialIndexKey: o.GetName()}) + client.MatchingFields{hmc.ClusterDeploymentCredentialIndexKey: o.GetName()}) if err != nil { return []ctrl.Request{} } req := []ctrl.Request{} - for _, cluster := range managedClusters.Items { + for _, cluster := range clusterDeployments.Items { req = append(req, ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: cluster.Namespace, diff --git a/internal/controller/managedcluster_controller_test.go b/internal/controller/clusterdeployment_controller_test.go similarity index 80% rename from internal/controller/managedcluster_controller_test.go rename to internal/controller/clusterdeployment_controller_test.go index f252e2e7c..4687c1591 100644 --- a/internal/controller/managedcluster_controller_test.go +++ b/internal/controller/clusterdeployment_controller_test.go @@ -33,11 +33,11 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" ) -var _ = Describe("ManagedCluster Controller", func() { +var _ = Describe("ClusterDeployment Controller", func() { Context("When reconciling a resource", func() { const ( - managedClusterName = "test-managed-cluster" - managedClusterNamespace = "test" + clusterDeploymentName = "test-cluster-deployment" + clusterDeploymentNamespace = "test" templateName = "test-template" svcTemplateName = "test-svc-template" @@ -47,10 +47,10 @@ var _ = Describe("ManagedCluster Controller", func() { ctx := context.Background() typeNamespacedName := types.NamespacedName{ - Name: managedClusterName, - Namespace: managedClusterNamespace, + Name: clusterDeploymentName, + Namespace: clusterDeploymentNamespace, } - managedCluster := &hmc.ManagedCluster{} + clusterDeployment := &hmc.ClusterDeployment{} template := &hmc.ClusterTemplate{} svcTemplate := &hmc.ServiceTemplate{} management := &hmc.Management{} @@ -58,12 +58,12 @@ var _ = Describe("ManagedCluster Controller", func() { namespace := &corev1.Namespace{} BeforeEach(func() { - By("creating ManagedCluster namespace") - err := k8sClient.Get(ctx, types.NamespacedName{Name: managedClusterNamespace}, namespace) + By("creating ClusterDeployment namespace") + err := k8sClient.Get(ctx, types.NamespacedName{Name: clusterDeploymentNamespace}, namespace) if err != nil && errors.IsNotFound(err) { namespace = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: managedClusterNamespace, + Name: clusterDeploymentNamespace, }, } Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) @@ -75,7 +75,7 @@ var _ = Describe("ManagedCluster Controller", func() { template = &hmc.ClusterTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: templateName, - Namespace: managedClusterNamespace, + Namespace: clusterDeploymentNamespace, }, Spec: hmc.ClusterTemplateSpec{ Helm: hmc.HelmSpec{ @@ -103,12 +103,12 @@ var _ = Describe("ManagedCluster Controller", func() { } By("creating the custom resource for the Kind ServiceTemplate") - err = k8sClient.Get(ctx, client.ObjectKey{Namespace: managedClusterNamespace, Name: svcTemplateName}, svcTemplate) + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: clusterDeploymentNamespace, Name: svcTemplateName}, svcTemplate) if err != nil && errors.IsNotFound(err) { svcTemplate = &hmc.ServiceTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: svcTemplateName, - Namespace: managedClusterNamespace, + Namespace: clusterDeploymentNamespace, }, Spec: hmc.ServiceTemplateSpec{ Helm: hmc.HelmSpec{ @@ -154,7 +154,7 @@ var _ = Describe("ManagedCluster Controller", func() { credential = &hmc.Credential{ ObjectMeta: metav1.ObjectMeta{ Name: credentialName, - Namespace: managedClusterNamespace, + Namespace: clusterDeploymentNamespace, }, Spec: hmc.CredentialSpec{ IdentityRef: &corev1.ObjectReference{ @@ -171,15 +171,15 @@ var _ = Describe("ManagedCluster Controller", func() { Expect(k8sClient.Status().Update(ctx, credential)).To(Succeed()) } - By("creating the custom resource for the Kind ManagedCluster") - err = k8sClient.Get(ctx, typeNamespacedName, managedCluster) + By("creating the custom resource for the Kind ClusterDeployment") + err = k8sClient.Get(ctx, typeNamespacedName, clusterDeployment) if err != nil && errors.IsNotFound(err) { - managedCluster = &hmc.ManagedCluster{ + clusterDeployment = &hmc.ClusterDeployment{ ObjectMeta: metav1.ObjectMeta{ - Name: managedClusterName, - Namespace: managedClusterNamespace, + Name: clusterDeploymentName, + Namespace: clusterDeploymentNamespace, }, - Spec: hmc.ManagedClusterSpec{ + Spec: hmc.ClusterDeploymentSpec{ Template: templateName, Credential: credentialName, Services: []hmc.ServiceSpec{ @@ -190,23 +190,23 @@ var _ = Describe("ManagedCluster Controller", func() { }, }, } - Expect(k8sClient.Create(ctx, managedCluster)).To(Succeed()) + Expect(k8sClient.Create(ctx, clusterDeployment)).To(Succeed()) } }) AfterEach(func() { By("Cleanup") - controllerReconciler := &ManagedClusterReconciler{ + controllerReconciler := &ClusterDeploymentReconciler{ Client: k8sClient, } - Expect(k8sClient.Delete(ctx, managedCluster)).To(Succeed()) - // Running reconcile to remove the finalizer and delete the ManagedCluster + Expect(k8sClient.Delete(ctx, clusterDeployment)).To(Succeed()) + // Running reconcile to remove the finalizer and delete the ClusterDeployment _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) Expect(err).NotTo(HaveOccurred()) - Eventually(k8sClient.Get, 1*time.Minute, 5*time.Second).WithArguments(ctx, typeNamespacedName, managedCluster).Should(HaveOccurred()) + Eventually(k8sClient.Get, 1*time.Minute, 5*time.Second).WithArguments(ctx, typeNamespacedName, clusterDeployment).Should(HaveOccurred()) Expect(k8sClient.Delete(ctx, template)).To(Succeed()) Expect(k8sClient.Delete(ctx, management)).To(Succeed()) @@ -214,7 +214,7 @@ var _ = Describe("ManagedCluster Controller", func() { }) It("should successfully reconcile the resource", func() { By("Reconciling the created resource") - controllerReconciler := &ManagedClusterReconciler{ + controllerReconciler := &ClusterDeploymentReconciler{ Client: k8sClient, Config: &rest.Config{}, } diff --git a/internal/controller/management_controller.go b/internal/controller/management_controller.go index 52b618408..f79186194 100644 --- a/internal/controller/management_controller.go +++ b/internal/controller/management_controller.go @@ -123,6 +123,9 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag requeue bool ) + + statusAccumulator.providers = append(statusAccumulator.providers, "infrastructure-internal") + for _, component := range components { l.V(1).Info("reconciling components", "component", component) template := new(hmc.ProviderTemplate) diff --git a/internal/controller/management_controller_test.go b/internal/controller/management_controller_test.go index bf527fcd3..3a44fa344 100644 --- a/internal/controller/management_controller_test.go +++ b/internal/controller/management_controller_test.go @@ -104,7 +104,7 @@ var _ = Describe("Management Controller", func() { helmReleaseName = someComponentName // WARN: helm release name should be equal to the component name helmReleaseNamespace = utils.DefaultSystemNamespace - someOtherHelmReleaseName = "managed-cluster-release-name" + someOtherHelmReleaseName = "cluster-deployment-release-name" timeout = time.Second * 10 interval = time.Millisecond * 250 @@ -196,7 +196,7 @@ var _ = Describe("Management Controller", func() { } Expect(k8sClient.Create(ctx, helmRelease)).To(Succeed()) - By("Creating a HelmRelease object for some managed cluster") + By("Creating a HelmRelease object for some cluster deployment") someOtherHelmRelease := &helmcontrollerv2.HelmRelease{ ObjectMeta: metav1.ObjectMeta{ Name: someOtherHelmReleaseName, @@ -204,7 +204,7 @@ var _ = Describe("Management Controller", func() { OwnerReferences: []metav1.OwnerReference{ { APIVersion: hmcmirantiscomv1alpha1.GroupVersion.String(), - Kind: hmcmirantiscomv1alpha1.ManagedClusterKind, + Kind: hmcmirantiscomv1alpha1.ClusterDeploymentKind, Name: "any-owner-ref", UID: types.UID("some-owner-uid"), }, @@ -294,7 +294,7 @@ var _ = Describe("Management Controller", func() { By("Checking the Management object does not have the removed component in its spec") Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgmt), mgmt)).To(Succeed()) - Expect(mgmt.Status.AvailableProviders).To(BeEmpty()) + Expect(mgmt.Status.AvailableProviders).To(BeEquivalentTo(hmcmirantiscomv1alpha1.Providers{"infrastructure-internal"})) By("Checking the other (managed) helm-release has not been removed") Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(someOtherHelmRelease), someOtherHelmRelease)).To(Succeed()) diff --git a/internal/controller/multiclusterservice_controller.go b/internal/controller/multiclusterservice_controller.go index d2bd10fd9..9ab6a7655 100644 --- a/internal/controller/multiclusterservice_controller.go +++ b/internal/controller/multiclusterservice_controller.go @@ -270,8 +270,8 @@ func (r *MultiClusterServiceReconciler) reconcileDelete(ctx context.Context, mcs // requeueSveltosProfileForClusterSummary asserts that the requested object has Sveltos ClusterSummary // type, fetches its owner (a Sveltos Profile or ClusterProfile object), and requeues its reference. -// When used with ManagedClusterReconciler or MultiClusterServiceReconciler, this effectively -// requeues a ManagedCluster or MultiClusterService object as these are referenced by the same +// When used with ClusterDeploymentReconciler or MultiClusterServiceReconciler, this effectively +// requeues a ClusterDeployment or MultiClusterService object as these are referenced by the same // namespace/name as the Sveltos Profile or ClusterProfile object that they create respectively. func requeueSveltosProfileForClusterSummary(ctx context.Context, obj client.Object) []ctrl.Request { l := ctrl.LoggerFrom(ctx) @@ -290,7 +290,7 @@ func requeueSveltosProfileForClusterSummary(ctx context.Context, obj client.Obje } // The Profile/ClusterProfile object has the same name as its - // owner object which is either ManagedCluster or MultiClusterService. + // owner object which is either ClusterDeployment or MultiClusterService. req := client.ObjectKey{Name: ownerRef.Name} if ownerRef.Kind == sveltosv1beta1.ProfileKind { req.Namespace = obj.GetNamespace() diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 18e71f837..1de9d3f6f 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -151,7 +151,7 @@ var _ = BeforeSuite(func() { err = hmcmirantiscomv1alpha1.SetupIndexers(ctx, mgr) Expect(err).NotTo(HaveOccurred()) - err = (&hmcwebhook.ManagedClusterValidator{}).SetupWebhookWithManager(mgr) + err = (&hmcwebhook.ClusterDeploymentValidator{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) err = (&hmcwebhook.MultiClusterServiceValidator{SystemNamespace: testSystemNamespace}).SetupWebhookWithManager(mgr) diff --git a/internal/credspropagation/azure.go b/internal/credspropagation/azure.go index 05eb6973a..38157d372 100644 --- a/internal/credspropagation/azure.go +++ b/internal/credspropagation/azure.go @@ -28,10 +28,10 @@ import ( func PropagateAzureSecrets(ctx context.Context, cfg *PropagationCfg) error { azureCluster := &capz.AzureCluster{} if err := cfg.Client.Get(ctx, client.ObjectKey{ - Name: cfg.ManagedCluster.Name, - Namespace: cfg.ManagedCluster.Namespace, + Name: cfg.ClusterDeployment.Name, + Namespace: cfg.ClusterDeployment.Namespace, }, azureCluster); err != nil { - return fmt.Errorf("failed to get AzureCluster %s: %w", cfg.ManagedCluster.Name, err) + return fmt.Errorf("failed to get AzureCluster %s: %w", cfg.ClusterDeployment.Name, err) } azureClIdty := &capz.AzureClusterIdentity{} diff --git a/internal/credspropagation/common.go b/internal/credspropagation/common.go index 9d72e5759..c6bcf02ca 100644 --- a/internal/credspropagation/common.go +++ b/internal/credspropagation/common.go @@ -29,10 +29,10 @@ import ( ) type PropagationCfg struct { - Client client.Client - ManagedCluster *hmc.ManagedCluster - KubeconfSecret *corev1.Secret - SystemNamespace string + Client client.Client + ClusterDeployment *hmc.ClusterDeployment + KubeconfSecret *corev1.Secret + SystemNamespace string } func applyCCMConfigs(ctx context.Context, kubeconfSecret *corev1.Secret, objects ...client.Object) error { diff --git a/internal/credspropagation/vsphere.go b/internal/credspropagation/vsphere.go index 189a6bd32..f9f3e5ab5 100644 --- a/internal/credspropagation/vsphere.go +++ b/internal/credspropagation/vsphere.go @@ -33,10 +33,10 @@ import ( func PropagateVSphereSecrets(ctx context.Context, cfg *PropagationCfg) error { vsphereCluster := &capv.VSphereCluster{} if err := cfg.Client.Get(ctx, client.ObjectKey{ - Name: cfg.ManagedCluster.Name, - Namespace: cfg.ManagedCluster.Namespace, + Name: cfg.ClusterDeployment.Name, + Namespace: cfg.ClusterDeployment.Namespace, }, vsphereCluster); err != nil { - return fmt.Errorf("failed to get VSphereCluster %s: %w", cfg.ManagedCluster.Name, err) + return fmt.Errorf("failed to get VSphereCluster %s: %w", cfg.ClusterDeployment.Name, err) } vsphereClIdty := &capv.VSphereClusterIdentity{} @@ -59,14 +59,14 @@ func PropagateVSphereSecrets(ctx context.Context, cfg *PropagationCfg) error { ctx, vsphereMachines, &client.ListOptions{ - Namespace: cfg.ManagedCluster.Namespace, + Namespace: cfg.ClusterDeployment.Namespace, LabelSelector: labels.SelectorFromSet(map[string]string{ - hmc.ClusterNameLabelKey: cfg.ManagedCluster.Name, + hmc.ClusterNameLabelKey: cfg.ClusterDeployment.Name, }), Limit: 1, }, ); err != nil { - return fmt.Errorf("failed to list VSphereMachines for cluster %s: %w", cfg.ManagedCluster.Name, err) + return fmt.Errorf("failed to list VSphereMachines for cluster %s: %w", cfg.ClusterDeployment.Name, err) } ccmSecret, ccmConfig, err := generateVSphereCCMConfigs(vsphereCluster, vsphereSecret, &vsphereMachines.Items[0]) if err != nil { diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go index 72e781e0e..776200c15 100644 --- a/internal/sveltos/profile.go +++ b/internal/sveltos/profile.go @@ -144,7 +144,7 @@ func GetHelmChartOpts(ctx context.Context, c client.Client, namespace string, se tmpl := &hmc.ServiceTemplate{} // Here we can use the same namespace for all services // because if the services slice is part of: - // 1. ManagedCluster: Then the referred template must be in its own namespace. + // 1. ClusterDeployment: Then the referred template must be in its own namespace. // 2. MultiClusterService: Then the referred template must be in system namespace. tmplRef := client.ObjectKey{Name: svc.Template, Namespace: namespace} if err := c.Get(ctx, tmplRef, tmpl); err != nil { diff --git a/internal/telemetry/event.go b/internal/telemetry/event.go index 81dd10971..9d806e65e 100644 --- a/internal/telemetry/event.go +++ b/internal/telemetry/event.go @@ -21,30 +21,30 @@ import ( ) const ( - managedClusterCreateEvent = "managed-cluster-create" - managedClusterHeartbeatEvent = "managed-cluster-heartbeat" + ClusterDeploymentCreateEvent = "cluster-deployment-create" + ClusterDeploymentHeartbeatEvent = "cluster-deployment-heartbeat" ) -func TrackManagedClusterCreate(id, managedClusterID, template string, dryRun bool) error { +func TrackClusterDeploymentCreate(id, clusterDeploymentID, template string, dryRun bool) error { props := map[string]any{ - "hmcVersion": build.Version, - "managedClusterID": managedClusterID, - "template": template, - "dryRun": dryRun, + "hmcVersion": build.Version, + "clusterDeploymentID": clusterDeploymentID, + "template": template, + "dryRun": dryRun, } - return TrackEvent(managedClusterCreateEvent, id, props) + return TrackEvent(ClusterDeploymentCreateEvent, id, props) } -func TrackManagedClusterHeartbeat(id, managedClusterID, clusterID, template, templateHelmChartVersion string, providers []string) error { +func TrackClusterDeploymentHeartbeat(id, clusterDeploymentID, clusterID, template, templateHelmChartVersion string, providers []string) error { props := map[string]any{ "hmcVersion": build.Version, - "managedClusterID": managedClusterID, + "clusterDeploymentID": clusterDeploymentID, "clusterID": clusterID, "template": template, "templateHelmChartVersion": templateHelmChartVersion, "providers": providers, } - return TrackEvent(managedClusterHeartbeatEvent, id, props) + return TrackEvent(ClusterDeploymentHeartbeatEvent, id, props) } func TrackEvent(name, id string, properties map[string]any) error { diff --git a/internal/telemetry/tracker.go b/internal/telemetry/tracker.go index bf8aec325..81fcfc76c 100644 --- a/internal/telemetry/tracker.go +++ b/internal/telemetry/tracker.go @@ -50,8 +50,8 @@ func (t *Tracker) Start(ctx context.Context) error { func (t *Tracker) Tick(ctx context.Context) { l := log.FromContext(ctx).WithName("telemetry tracker") - logger := l.WithValues("event", managedClusterHeartbeatEvent) - err := t.trackManagedClusterHeartbeat(ctx) + logger := l.WithValues("event", ClusterDeploymentHeartbeatEvent) + err := t.trackClusterDeploymentHeartbeat(ctx) if err != nil { logger.Error(err, "failed to track an event") } else { @@ -59,7 +59,7 @@ func (t *Tracker) Tick(ctx context.Context) { } } -func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { +func (t *Tracker) trackClusterDeploymentHeartbeat(ctx context.Context) error { mgmt := &v1alpha1.Management{} if err := t.Get(ctx, client.ObjectKey{Name: v1alpha1.ManagementName}, mgmt); err != nil { return err @@ -76,26 +76,26 @@ func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { } var errs error - managedClusters := &v1alpha1.ManagedClusterList{} - if err := t.List(ctx, managedClusters); err != nil { + clusterDeployments := &v1alpha1.ClusterDeploymentList{} + if err := t.List(ctx, clusterDeployments); err != nil { return err } - for _, managedCluster := range managedClusters.Items { - template := templates[managedCluster.Spec.Template] + for _, clusterDeployment := range clusterDeployments.Items { + template := templates[clusterDeployment.Spec.Template] // TODO: get k0s cluster ID once it's exposed in k0smotron API clusterID := "" - err := TrackManagedClusterHeartbeat( + err := TrackClusterDeploymentHeartbeat( string(mgmt.UID), - string(managedCluster.UID), + string(clusterDeployment.UID), clusterID, - managedCluster.Spec.Template, + clusterDeployment.Spec.Template, template.Spec.Helm.ChartVersion, template.Status.Providers, ) if err != nil { - errs = errors.Join(errs, fmt.Errorf("failed to track the heartbeat of the managedcluster %s/%s", managedCluster.Namespace, managedCluster.Name)) + errs = errors.Join(errs, fmt.Errorf("failed to track the heartbeat of the clusterDeployment %s/%s", clusterDeployment.Namespace, clusterDeployment.Name)) continue } } diff --git a/internal/webhook/accessmanagement_webhook_test.go b/internal/webhook/accessmanagement_webhook_test.go index 8dd007b2f..67b420992 100644 --- a/internal/webhook/accessmanagement_webhook_test.go +++ b/internal/webhook/accessmanagement_webhook_test.go @@ -60,7 +60,7 @@ func TestAccessManagementValidateCreate(t *testing.T) { c := fake.NewClientBuilder(). WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterTemplateIndexKey, v1alpha1.ExtractTemplateNameFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentTemplateIndexKey, v1alpha1.ExtractTemplateNameFromClusterDeployment). Build() validator := &AccessManagementValidator{Client: c, SystemNamespace: utils.DefaultSystemNamespace} warn, err := validator.ValidateCreate(ctx, tt.am) @@ -117,7 +117,7 @@ func TestAccessManagementValidateDelete(t *testing.T) { c := fake.NewClientBuilder(). WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterTemplateIndexKey, v1alpha1.ExtractTemplateNameFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentTemplateIndexKey, v1alpha1.ExtractTemplateNameFromClusterDeployment). Build() validator := &AccessManagementValidator{Client: c, SystemNamespace: utils.DefaultSystemNamespace} warn, err := validator.ValidateDelete(ctx, tt.am) diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/clusterdeployment_webhook.go similarity index 57% rename from internal/webhook/managedcluster_webhook.go rename to internal/webhook/clusterdeployment_webhook.go index 71603351f..ef8923c7c 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/clusterdeployment_webhook.go @@ -33,111 +33,111 @@ import ( hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" ) -type ManagedClusterValidator struct { +type ClusterDeploymentValidator struct { client.Client } -const invalidManagedClusterMsg = "the ManagedCluster is invalid" +const invalidClusterDeploymentMsg = "the ClusterDeployment is invalid" var errClusterUpgradeForbidden = errors.New("cluster upgrade is forbidden") -func (v *ManagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (v *ClusterDeploymentValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { v.Client = mgr.GetClient() return ctrl.NewWebhookManagedBy(mgr). - For(&hmcv1alpha1.ManagedCluster{}). + For(&hmcv1alpha1.ClusterDeployment{}). WithValidator(v). WithDefaulter(v). Complete() } var ( - _ webhook.CustomValidator = &ManagedClusterValidator{} - _ webhook.CustomDefaulter = &ManagedClusterValidator{} + _ webhook.CustomValidator = &ClusterDeploymentValidator{} + _ webhook.CustomDefaulter = &ClusterDeploymentValidator{} ) // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) +func (v *ClusterDeploymentValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + clusterDeployment, ok := obj.(*hmcv1alpha1.ClusterDeployment) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected clusterDeployment but got a %T", obj)) } - template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) + template, err := v.getClusterDeploymentTemplate(ctx, clusterDeployment.Namespace, clusterDeployment.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } if err := isTemplateValid(template.GetCommonStatus()); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } - if err := validateK8sCompatibility(ctx, v.Client, template, managedCluster); err != nil { + if err := validateK8sCompatibility(ctx, v.Client, template, clusterDeployment); err != nil { return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %w", err) } - if err := v.validateCredential(ctx, managedCluster, template); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + if err := v.validateCredential(ctx, clusterDeployment, template); err != nil { + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } - if err := validateServices(ctx, v.Client, managedCluster.Namespace, managedCluster.Spec.Services); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + if err := validateServices(ctx, v.Client, clusterDeployment.Namespace, clusterDeployment.Spec.Services); err != nil { + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } return nil, nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - oldManagedCluster, ok := oldObj.(*hmcv1alpha1.ManagedCluster) +func (v *ClusterDeploymentValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + oldClusterDeployment, ok := oldObj.(*hmcv1alpha1.ClusterDeployment) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", oldObj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ClusterDeployment but got a %T", oldObj)) } - newManagedCluster, ok := newObj.(*hmcv1alpha1.ManagedCluster) + newClusterDeployment, ok := newObj.(*hmcv1alpha1.ClusterDeployment) if !ok { - return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", newObj)) + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ClusterDeployment but got a %T", newObj)) } - oldTemplate := oldManagedCluster.Spec.Template - newTemplate := newManagedCluster.Spec.Template + oldTemplate := oldClusterDeployment.Spec.Template + newTemplate := newClusterDeployment.Spec.Template - template, err := v.getManagedClusterTemplate(ctx, newManagedCluster.Namespace, newTemplate) + template, err := v.getClusterDeploymentTemplate(ctx, newClusterDeployment.Namespace, newTemplate) if err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } if oldTemplate != newTemplate { - if !slices.Contains(oldManagedCluster.Status.AvailableUpgrades, newTemplate) { + if !slices.Contains(oldClusterDeployment.Status.AvailableUpgrades, newTemplate) { msg := fmt.Sprintf("Cluster can't be upgraded from %s to %s. This upgrade sequence is not allowed", oldTemplate, newTemplate) return admission.Warnings{msg}, errClusterUpgradeForbidden } if err := isTemplateValid(template.GetCommonStatus()); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } - if err := validateK8sCompatibility(ctx, v.Client, template, newManagedCluster); err != nil { + if err := validateK8sCompatibility(ctx, v.Client, template, newClusterDeployment); err != nil { return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %w", err) } } - if err := v.validateCredential(ctx, newManagedCluster, template); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + if err := v.validateCredential(ctx, newClusterDeployment, template); err != nil { + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } - if err := validateServices(ctx, v.Client, newManagedCluster.Namespace, newManagedCluster.Spec.Services); err != nil { - return nil, fmt.Errorf("%s: %w", invalidManagedClusterMsg, err) + if err := validateServices(ctx, v.Client, newClusterDeployment.Namespace, newClusterDeployment.Spec.Services); err != nil { + return nil, fmt.Errorf("%s: %w", invalidClusterDeploymentMsg, err) } return nil, nil } -func validateK8sCompatibility(ctx context.Context, cl client.Client, template *hmcv1alpha1.ClusterTemplate, mc *hmcv1alpha1.ManagedCluster) error { +func validateK8sCompatibility(ctx context.Context, cl client.Client, template *hmcv1alpha1.ClusterTemplate, mc *hmcv1alpha1.ClusterDeployment) error { if len(mc.Spec.Services) == 0 || template.Status.KubernetesVersion == "" { return nil // nothing to do } mcVersion, err := semver.NewVersion(template.Status.KubernetesVersion) if err != nil { // should never happen - return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", template.Status.KubernetesVersion, mc.Namespace, mc.Name, err) + return fmt.Errorf("failed to parse k8s version %s of the ClusterDeployment %s/%s: %w", template.Status.KubernetesVersion, mc.Namespace, mc.Name, err) } for _, v := range mc.Spec.Services { @@ -161,7 +161,7 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, template *h } if !tplConstraint.Check(mcVersion) { - return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrained version %s from the ServiceTemplate %s/%s", + return fmt.Errorf("k8s version %s of the ClusterDeployment %s/%s does not satisfy constrained version %s from the ServiceTemplate %s/%s", template.Status.KubernetesVersion, mc.Namespace, mc.Name, constraint, mc.Namespace, v.Template) } @@ -171,26 +171,26 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, template *h } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { +func (*ClusterDeploymentValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil } // Default implements webhook.Defaulter so a webhook will be registered for the type. -func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Object) error { - managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) +func (v *ClusterDeploymentValidator) Default(ctx context.Context, obj runtime.Object) error { + clusterDeployment, ok := obj.(*hmcv1alpha1.ClusterDeployment) if !ok { - return apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) + return apierrors.NewBadRequest(fmt.Sprintf("expected clusterDeployment but got a %T", obj)) } // Only apply defaults when there's no configuration provided; // if template ref is empty, then nothing to default - if managedCluster.Spec.Config != nil || managedCluster.Spec.Template == "" { + if clusterDeployment.Spec.Config != nil || clusterDeployment.Spec.Template == "" { return nil } - template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) + template, err := v.getClusterDeploymentTemplate(ctx, clusterDeployment.Namespace, clusterDeployment.Spec.Template) if err != nil { - return fmt.Errorf("could not get template for the managedcluster: %w", err) + return fmt.Errorf("could not get template for the clusterDeployment: %w", err) } if err := isTemplateValid(template.GetCommonStatus()); err != nil { @@ -201,18 +201,18 @@ func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Objec return nil } - managedCluster.Spec.DryRun = true - managedCluster.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} + clusterDeployment.Spec.DryRun = true + clusterDeployment.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} return nil } -func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (tpl *hmcv1alpha1.ClusterTemplate, err error) { +func (v *ClusterDeploymentValidator) getClusterDeploymentTemplate(ctx context.Context, templateNamespace, templateName string) (tpl *hmcv1alpha1.ClusterTemplate, err error) { tpl = new(hmcv1alpha1.ClusterTemplate) return tpl, v.Get(ctx, client.ObjectKey{Namespace: templateNamespace, Name: templateName}, tpl) } -func (v *ManagedClusterValidator) getManagedClusterCredential(ctx context.Context, credNamespace, credName string) (*hmcv1alpha1.Credential, error) { +func (v *ClusterDeploymentValidator) getClusterDeploymentCredential(ctx context.Context, credNamespace, credName string) (*hmcv1alpha1.Credential, error) { cred := &hmcv1alpha1.Credential{} credRef := client.ObjectKey{ Name: credName, @@ -232,7 +232,7 @@ func isTemplateValid(status *hmcv1alpha1.TemplateStatusCommon) error { return nil } -func (v *ManagedClusterValidator) validateCredential(ctx context.Context, managedCluster *hmcv1alpha1.ManagedCluster, template *hmcv1alpha1.ClusterTemplate) error { +func (v *ClusterDeploymentValidator) validateCredential(ctx context.Context, clusterDeployment *hmcv1alpha1.ClusterDeployment, template *hmcv1alpha1.ClusterTemplate) error { if len(template.Status.Providers) == 0 { return fmt.Errorf("template %q has no providers defined", template.Name) } @@ -249,7 +249,7 @@ func (v *ManagedClusterValidator) validateCredential(ctx context.Context, manage return fmt.Errorf("template %q has no infrastructure providers defined", template.Name) } - cred, err := v.getManagedClusterCredential(ctx, managedCluster.Namespace, managedCluster.Spec.Credential) + cred, err := v.getClusterDeploymentCredential(ctx, clusterDeployment.Namespace, clusterDeployment.Spec.Credential) if err != nil { return err } @@ -284,6 +284,10 @@ func isCredMatchTemplate(cred *hmcv1alpha1.Credential, template *hmcv1alpha1.Clu if idtyKind != "VSphereClusterIdentity" { return errMsg(provider) } + case "infrastructure-internal": + if idtyKind != "Secret" { + return errMsg(provider) + } default: if strings.HasPrefix(provider, "infrastructure-") { return fmt.Errorf("unsupported infrastructure provider %s", provider) diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/clusterdeployment_webhook_test.go similarity index 63% rename from internal/webhook/managedcluster_webhook_test.go rename to internal/webhook/clusterdeployment_webhook_test.go index 9e9986727..993f35390 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/clusterdeployment_webhook_test.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/test/objects/clusterdeployment" "github.com/Mirantis/hmc/test/objects/credential" - "github.com/Mirantis/hmc/test/objects/managedcluster" "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/objects/template" "github.com/Mirantis/hmc/test/scheme" @@ -60,7 +60,7 @@ var ( ) ) -func TestManagedClusterValidateCreate(t *testing.T) { +func TestClusterDeploymentValidateCreate(t *testing.T) { g := NewWithT(t) ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ @@ -70,22 +70,22 @@ func TestManagedClusterValidateCreate(t *testing.T) { }) tests := []struct { - name string - managedCluster *v1alpha1.ManagedCluster - existingObjects []runtime.Object - err string - warnings admission.Warnings + name string + ClusterDeployment *v1alpha1.ClusterDeployment + existingObjects []runtime.Object + err string + warnings admission.Warnings }{ { - name: "should fail if the template is unset", - managedCluster: managedcluster.NewManagedCluster(), - err: "the ManagedCluster is invalid: clustertemplates.hmc.mirantis.com \"\" not found", + name: "should fail if the template is unset", + ClusterDeployment: clusterdeployment.NewClusterDeployment(), + err: "the ClusterDeployment is invalid: clustertemplates.hmc.mirantis.com \"\" not found", }, { - name: "should fail if the ClusterTemplate is not found in the ManagedCluster's namespace", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), + name: "should fail if the ClusterTemplate is not found in the ClusterDeployment's namespace", + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, @@ -95,14 +95,14 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithNamespace(testNamespace), ), }, - err: fmt.Sprintf("the ManagedCluster is invalid: clustertemplates.hmc.mirantis.com \"%s\" not found", testTemplateName), + err: fmt.Sprintf("the ClusterDeployment is invalid: clustertemplates.hmc.mirantis.com \"%s\" not found", testTemplateName), }, { name: "should fail if the ServiceTemplates are not found in same namespace", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -121,13 +121,13 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithNamespace("othernamespace"), ), }, - err: fmt.Sprintf("the ManagedCluster is invalid: servicetemplates.hmc.mirantis.com \"%s\" not found", testSvcTemplate1Name), + err: fmt.Sprintf("the ClusterDeployment is invalid: servicetemplates.hmc.mirantis.com \"%s\" not found", testSvcTemplate1Name), }, { name: "should fail if the cluster template was found but is invalid (some validation error)", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, @@ -140,14 +140,14 @@ func TestManagedClusterValidateCreate(t *testing.T) { }), ), }, - err: "the ManagedCluster is invalid: the template is not valid: validation error example", + err: "the ClusterDeployment is invalid: the template is not valid: validation error example", }, { name: "should fail if the service templates were found but are invalid (some validation error)", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -169,14 +169,14 @@ func TestManagedClusterValidateCreate(t *testing.T) { }), ), }, - err: "the ManagedCluster is invalid: the template is not valid: validation error example", + err: "the ClusterDeployment is invalid: the template is not valid: validation error example", }, { name: "should succeed", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -198,9 +198,9 @@ func TestManagedClusterValidateCreate(t *testing.T) { }, { name: "cluster template k8s version does not satisfy service template constraints", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithServiceTemplate(testTemplateName), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithServiceTemplate(testTemplateName), ), existingObjects: []runtime.Object{ cred, @@ -220,12 +220,12 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ManagedCluster default/%s does not satisfy constrained version <1.30 from the ServiceTemplate default/%s`, managedcluster.DefaultName, testTemplateName), + err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ClusterDeployment default/%s does not satisfy constrained version <1.30 from the ServiceTemplate default/%s`, clusterdeployment.DefaultName, testTemplateName), warnings: admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, }, { - name: "should fail if the credential is unset", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + name: "should fail if the credential is unset", + ClusterDeployment: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -238,13 +238,13 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the ManagedCluster is invalid: credentials.hmc.mirantis.com \"\" not found", + err: "the ClusterDeployment is invalid: credentials.hmc.mirantis.com \"\" not found", }, { name: "should fail if credential is not Ready", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, @@ -267,13 +267,13 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the ManagedCluster is invalid: credential is not Ready", + err: "the ClusterDeployment is invalid: credential is not Ready", }, { name: "should fail if credential and template providers doesn't match", - managedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), + ClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ cred, @@ -294,14 +294,14 @@ func TestManagedClusterValidateCreate(t *testing.T) { template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the ManagedCluster is invalid: wrong kind of the ClusterIdentity \"AWSClusterStaticIdentity\" for provider \"infrastructure-azure\"", + err: "the ClusterDeployment is invalid: wrong kind of the ClusterIdentity \"AWSClusterStaticIdentity\" for provider \"infrastructure-azure\"", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &ManagedClusterValidator{Client: c} - warn, err := validator.ValidateCreate(ctx, tt.managedCluster) + validator := &ClusterDeploymentValidator{Client: c} + warn, err := validator.ValidateCreate(ctx, tt.ClusterDeployment) if tt.err != "" { g.Expect(err).To(HaveOccurred()) if err.Error() != tt.err { @@ -316,7 +316,7 @@ func TestManagedClusterValidateCreate(t *testing.T) { } } -func TestManagedClusterValidateUpdate(t *testing.T) { +func TestClusterDeploymentValidateUpdate(t *testing.T) { const ( upgradeTargetTemplateName = "upgrade-target-template" unmanagedByHMCTemplateName = "unmanaged-template" @@ -331,20 +331,20 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }) tests := []struct { - name string - oldManagedCluster *v1alpha1.ManagedCluster - newManagedCluster *v1alpha1.ManagedCluster - existingObjects []runtime.Object - err string - warnings admission.Warnings + name string + oldClusterDeployment *v1alpha1.ClusterDeployment + newClusterDeployment *v1alpha1.ClusterDeployment + existingObjects []runtime.Object + err string + warnings admission.Warnings }{ { name: "update spec.template: should fail if the new cluster template was found but is invalid (some validation error)", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithAvailableUpgrades([]string{newTemplateName}), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithAvailableUpgrades([]string{newTemplateName}), ), - newManagedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(newTemplateName)), + newClusterDeployment: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(newTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -355,18 +355,18 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }), ), }, - err: "the ManagedCluster is invalid: the template is not valid: validation error example", + err: "the ClusterDeployment is invalid: the template is not valid: validation error example", }, { name: "update spec.template: should fail if the template is not in the list of available", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithAvailableUpgrades([]string{}), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithAvailableUpgrades([]string{}), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(upgradeTargetTemplateName), - managedcluster.WithCredential(testCredentialName), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(upgradeTargetTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, cred, @@ -394,14 +394,14 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }, { name: "update spec.template: should succeed if the template is in the list of available", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithAvailableUpgrades([]string{newTemplateName}), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithAvailableUpgrades([]string{newTemplateName}), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(newTemplateName), - managedcluster.WithCredential(testCredentialName), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(newTemplateName), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, cred, @@ -427,15 +427,15 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }, { name: "should succeed if spec.template is not changed", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"foo":"bar"}`), - managedcluster.WithCredential(testCredentialName), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"foo":"bar"}`), + clusterdeployment.WithCredential(testCredentialName), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"a":"b"}`), - managedcluster.WithCredential(testCredentialName), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"a":"b"}`), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, @@ -456,16 +456,16 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }, { name: "should succeed if serviceTemplates are added", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"foo":"bar"}`), - managedcluster.WithCredential(testCredentialName), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"foo":"bar"}`), + clusterdeployment.WithCredential(testCredentialName), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"a":"b"}`), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"a":"b"}`), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -490,16 +490,16 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }, { name: "should succeed if serviceTemplates are removed", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"foo":"bar"}`), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"foo":"bar"}`), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"a":"b"}`), - managedcluster.WithCredential(testCredentialName), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"a":"b"}`), + clusterdeployment.WithCredential(testCredentialName), ), existingObjects: []runtime.Object{ mgmt, @@ -524,16 +524,16 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }, { name: "should fail if serviceTemplates are not in the same namespace", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"foo":"bar"}`), - managedcluster.WithCredential(testCredentialName), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"foo":"bar"}`), + clusterdeployment.WithCredential(testCredentialName), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"a":"b"}`), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"a":"b"}`), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -556,20 +556,20 @@ func TestManagedClusterValidateUpdate(t *testing.T) { template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: fmt.Sprintf("the ManagedCluster is invalid: servicetemplates.hmc.mirantis.com \"%s\" not found", testSvcTemplate1Name), + err: fmt.Sprintf("the ClusterDeployment is invalid: servicetemplates.hmc.mirantis.com \"%s\" not found", testSvcTemplate1Name), }, { name: "should fail if the ServiceTemplates were found but are invalid", - oldManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"foo":"bar"}`), - managedcluster.WithCredential(testCredentialName), + oldClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"foo":"bar"}`), + clusterdeployment.WithCredential(testCredentialName), ), - newManagedCluster: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(`{"a":"b"}`), - managedcluster.WithCredential(testCredentialName), - managedcluster.WithServiceTemplate(testSvcTemplate1Name), + newClusterDeployment: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(`{"a":"b"}`), + clusterdeployment.WithCredential(testCredentialName), + clusterdeployment.WithServiceTemplate(testSvcTemplate1Name), ), existingObjects: []runtime.Object{ mgmt, @@ -594,14 +594,14 @@ func TestManagedClusterValidateUpdate(t *testing.T) { }), ), }, - err: "the ManagedCluster is invalid: the template is not valid: validation error example", + err: "the ClusterDeployment is invalid: the template is not valid: validation error example", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &ManagedClusterValidator{Client: c} - warn, err := validator.ValidateUpdate(ctx, tt.oldManagedCluster, tt.newManagedCluster) + validator := &ClusterDeploymentValidator{Client: c} + warn, err := validator.ValidateUpdate(ctx, tt.oldClusterDeployment, tt.newClusterDeployment) if tt.err != "" { g.Expect(err).To(HaveOccurred()) if err.Error() != tt.err { @@ -616,29 +616,29 @@ func TestManagedClusterValidateUpdate(t *testing.T) { } } -func TestManagedClusterDefault(t *testing.T) { +func TestClusterDeploymentDefault(t *testing.T) { g := NewWithT(t) ctx := context.Background() - managedClusterConfig := `{"foo":"bar"}` + clusterDeploymentConfig := `{"foo":"bar"}` tests := []struct { name string - input *v1alpha1.ManagedCluster - output *v1alpha1.ManagedCluster + input *v1alpha1.ClusterDeployment + output *v1alpha1.ClusterDeployment existingObjects []runtime.Object err string }{ { name: "should not set defaults if the config is provided", - input: managedcluster.NewManagedCluster(managedcluster.WithConfig(managedClusterConfig)), - output: managedcluster.NewManagedCluster(managedcluster.WithConfig(managedClusterConfig)), + input: clusterdeployment.NewClusterDeployment(clusterdeployment.WithConfig(clusterDeploymentConfig)), + output: clusterdeployment.NewClusterDeployment(clusterdeployment.WithConfig(clusterDeploymentConfig)), }, { name: "should not set defaults: template is invalid", - input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + input: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), + output: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -653,8 +653,8 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should not set defaults: config in template status is unset", - input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + input: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), + output: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -665,18 +665,18 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should set defaults", - input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster( - managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithConfig(managedClusterConfig), - managedcluster.WithDryRun(true), + input: clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(testTemplateName)), + output: clusterdeployment.NewClusterDeployment( + clusterdeployment.WithClusterTemplate(testTemplateName), + clusterdeployment.WithConfig(clusterDeploymentConfig), + clusterdeployment.WithDryRun(true), ), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( template.WithName(testTemplateName), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), - template.WithConfigStatus(managedClusterConfig), + template.WithConfigStatus(clusterDeploymentConfig), ), }, }, @@ -685,7 +685,7 @@ func TestManagedClusterDefault(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &ManagedClusterValidator{Client: c} + validator := &ClusterDeploymentValidator{Client: c} err := validator.Default(ctx, tt.input) if tt.err != "" { g.Expect(err).To(HaveOccurred()) diff --git a/internal/webhook/management_webhook.go b/internal/webhook/management_webhook.go index 516ebf93a..51a5764a8 100644 --- a/internal/webhook/management_webhook.go +++ b/internal/webhook/management_webhook.go @@ -159,18 +159,18 @@ func checkComponentsRemoval(ctx context.Context, cl client.Client, oldMgmt, newM } for _, cltpl := range clusterTemplates.Items { - mcls := new(hmcv1alpha1.ManagedClusterList) + mcls := new(hmcv1alpha1.ClusterDeploymentList) if err := cl.List(ctx, mcls, - client.MatchingFields{hmcv1alpha1.ManagedClusterTemplateIndexKey: cltpl.Name}, + client.MatchingFields{hmcv1alpha1.ClusterDeploymentTemplateIndexKey: cltpl.Name}, client.Limit(1)); err != nil { - return fmt.Errorf("failed to list ManagedClusters: %w", err) + return fmt.Errorf("failed to list ClusterDeployments: %w", err) } if len(mcls.Items) == 0 { continue } - return fmt.Errorf("provider %s is required by at least one ManagedCluster (%s) and cannot be removed from the Management %s", providerName, client.ObjectKeyFromObject(&mcls.Items[0]), newMgmt.Name) + return fmt.Errorf("provider %s is required by at least one ClusterDeployment (%s) and cannot be removed from the Management %s", providerName, client.ObjectKeyFromObject(&mcls.Items[0]), newMgmt.Name) } } @@ -242,13 +242,13 @@ func getIncompatibleContracts(ctx context.Context, cl client.Client, mgmt *hmcv1 // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (v *ManagementValidator) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { - managedClusters := &hmcv1alpha1.ManagedClusterList{} - err := v.Client.List(ctx, managedClusters, client.Limit(1)) + clusterDeployments := &hmcv1alpha1.ClusterDeploymentList{} + err := v.Client.List(ctx, clusterDeployments, client.Limit(1)) if err != nil { return nil, err } - if len(managedClusters.Items) > 0 { - return admission.Warnings{"The Management object can't be removed if ManagedCluster objects still exist"}, errManagementDeletionForbidden + if len(clusterDeployments.Items) > 0 { + return admission.Warnings{"The Management object can't be removed if ClusterDeployment objects still exist"}, errManagementDeletionForbidden } return nil, nil } diff --git a/internal/webhook/management_webhook_test.go b/internal/webhook/management_webhook_test.go index 054bed6ec..6c2085d7d 100644 --- a/internal/webhook/management_webhook_test.go +++ b/internal/webhook/management_webhook_test.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/objects/managedcluster" + "github.com/Mirantis/hmc/test/objects/clusterdeployment" "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/objects/release" "github.com/Mirantis/hmc/test/objects/template" @@ -197,10 +197,10 @@ func TestManagementValidateUpdate(t *testing.T) { template.NewProviderTemplate(template.WithProvidersStatus(infraAWSProvider)), template.NewProviderTemplate(template.WithName(release.DefaultCAPITemplateName)), template.NewClusterTemplate(template.WithProvidersStatus(infraAWSProvider)), - managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(template.DefaultName)), + clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(template.DefaultName)), }, warnings: admission.Warnings{"Some of the providers cannot be removed"}, - err: fmt.Sprintf(`Management "%s" is invalid: spec.providers: Forbidden: provider %s is required by at least one ManagedCluster (%s/%s) and cannot be removed from the Management %s`, management.DefaultName, infraAWSProvider, managedcluster.DefaultNamespace, managedcluster.DefaultName, management.DefaultName), + err: fmt.Sprintf(`Management "%s" is invalid: spec.providers: Forbidden: provider %s is required by at least one ClusterDeployment (%s/%s) and cannot be removed from the Management %s`, management.DefaultName, infraAWSProvider, clusterdeployment.DefaultNamespace, clusterdeployment.DefaultName, management.DefaultName), }, { name: "managed cluster does not use the removed provider, should succeed", @@ -216,7 +216,7 @@ func TestManagementValidateUpdate(t *testing.T) { template.NewProviderTemplate(template.WithProvidersStatus(infraAWSProvider)), template.NewProviderTemplate(template.WithName(release.DefaultCAPITemplateName)), template.NewClusterTemplate(template.WithProvidersStatus(infraOtherProvider)), - managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(template.DefaultName)), + clusterdeployment.NewClusterDeployment(clusterdeployment.WithClusterTemplate(template.DefaultName)), }, }, { @@ -372,7 +372,7 @@ func TestManagementValidateUpdate(t *testing.T) { WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). WithIndex(&v1alpha1.ClusterTemplate{}, v1alpha1.ClusterTemplateProvidersIndexKey, v1alpha1.ExtractProvidersFromClusterTemplate). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterTemplateIndexKey, v1alpha1.ExtractTemplateNameFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentTemplateIndexKey, v1alpha1.ExtractTemplateNameFromClusterDeployment). Build() validator := &ManagementValidator{Client: c} @@ -402,10 +402,10 @@ func TestManagementValidateDelete(t *testing.T) { warnings admission.Warnings }{ { - name: "should fail if ManagedCluster objects exist", + name: "should fail if ClusterDeployment objects exist", management: management.NewManagement(), - existingObjects: []runtime.Object{managedcluster.NewManagedCluster()}, - warnings: admission.Warnings{"The Management object can't be removed if ManagedCluster objects still exist"}, + existingObjects: []runtime.Object{clusterdeployment.NewClusterDeployment()}, + warnings: admission.Warnings{"The Management object can't be removed if ClusterDeployment objects still exist"}, err: "management deletion is forbidden", }, { diff --git a/internal/webhook/template_webhook.go b/internal/webhook/template_webhook.go index 9e73d7791..0b4061bfb 100644 --- a/internal/webhook/template_webhook.go +++ b/internal/webhook/template_webhook.go @@ -82,7 +82,7 @@ func (v *ClusterTemplateValidator) ValidateDelete(ctx context.Context, obj runti return nil, err } if inUseByCluster { - return admission.Warnings{fmt.Sprintf("The %s object can't be removed if ManagedCluster objects referencing it still exist", v.templateKind)}, errTemplateDeletionForbidden + return admission.Warnings{fmt.Sprintf("The %s object can't be removed if ClusterDeployment objects referencing it still exist", v.templateKind)}, errTemplateDeletionForbidden } owners := getOwnersWithKind(template, v.templateChainKind) @@ -141,7 +141,7 @@ func (v *ServiceTemplateValidator) ValidateDelete(ctx context.Context, obj runti return nil, fmt.Errorf("failed to check if the ServiceTemplate %s/%s is in use: %w", tmpl.Namespace, tmpl.Name, err) } if inUseByCluster { - return admission.Warnings{fmt.Sprintf("The %s object can't be removed if ManagedCluster objects referencing it still exist", v.templateKind)}, errTemplateDeletionForbidden + return admission.Warnings{fmt.Sprintf("The %s object can't be removed if ClusterDeployment objects referencing it still exist", v.templateKind)}, errTemplateDeletionForbidden } owners := getOwnersWithKind(tmpl, v.templateChainKind) @@ -238,21 +238,21 @@ func (v TemplateValidator) templateIsInUseByCluster(ctx context.Context, templat switch v.templateKind { case v1alpha1.ClusterTemplateKind: - key = v1alpha1.ManagedClusterTemplateIndexKey + key = v1alpha1.ClusterDeploymentTemplateIndexKey case v1alpha1.ServiceTemplateKind: - key = v1alpha1.ManagedClusterServiceTemplatesIndexKey + key = v1alpha1.ClusterDeploymentServiceTemplatesIndexKey default: return false, fmt.Errorf("invalid Template kind %s. Supported values are: %s and %s", v.templateKind, v1alpha1.ClusterTemplateKind, v1alpha1.ServiceTemplateKind) } - managedClusters := &v1alpha1.ManagedClusterList{} - if err := v.Client.List(ctx, managedClusters, + clusterDeployments := &v1alpha1.ClusterDeploymentList{} + if err := v.Client.List(ctx, clusterDeployments, client.InNamespace(template.GetNamespace()), client.MatchingFields{key: template.GetName()}, client.Limit(1)); err != nil { return false, err } - if len(managedClusters.Items) > 0 { + if len(clusterDeployments.Items) > 0 { return true, nil } return false, nil diff --git a/internal/webhook/template_webhook_test.go b/internal/webhook/template_webhook_test.go index 0c8982e61..d64bf7f6c 100644 --- a/internal/webhook/template_webhook_test.go +++ b/internal/webhook/template_webhook_test.go @@ -26,7 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/objects/managedcluster" + "github.com/Mirantis/hmc/test/objects/clusterdeployment" "github.com/Mirantis/hmc/test/objects/management" "github.com/Mirantis/hmc/test/objects/multiclusterservice" "github.com/Mirantis/hmc/test/objects/release" @@ -116,7 +116,7 @@ func TestProviderTemplateValidateDelete(t *testing.T) { NewClientBuilder(). WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterServiceTemplatesIndexKey, v1alpha1.ExtractServiceTemplateNamesFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentServiceTemplatesIndexKey, v1alpha1.ExtractServiceTemplateNamesFromClusterDeployment). Build() validator := &ProviderTemplateValidator{ @@ -161,13 +161,13 @@ func TestClusterTemplateValidateDelete(t *testing.T) { warnings admission.Warnings }{ { - title: "should fail if ManagedCluster object referencing the template exists in the same namespace", + title: "should fail if ClusterDeployment object referencing the template exists in the same namespace", template: tpl, - existingObjects: []runtime.Object{managedcluster.NewManagedCluster( - managedcluster.WithNamespace(templateNamespace), - managedcluster.WithClusterTemplate(templateName), + existingObjects: []runtime.Object{clusterdeployment.NewClusterDeployment( + clusterdeployment.WithNamespace(templateNamespace), + clusterdeployment.WithClusterTemplate(tpl.Name), )}, - warnings: admission.Warnings{"The ClusterTemplate object can't be removed if ManagedCluster objects referencing it still exist"}, + warnings: admission.Warnings{"The ClusterTemplate object can't be removed if ClusterDeployment objects referencing it still exist"}, err: "template deletion is forbidden", }, { @@ -198,17 +198,17 @@ func TestClusterTemplateValidateDelete(t *testing.T) { err: "template deletion is forbidden", }, { - title: "should succeed if some ManagedCluster from another namespace references the template with the same name", + title: "should succeed if some ClusterDeployment from another namespace references the template with the same name", template: tpl, - existingObjects: []runtime.Object{managedcluster.NewManagedCluster( - managedcluster.WithNamespace("new"), - managedcluster.WithClusterTemplate(templateName), + existingObjects: []runtime.Object{clusterdeployment.NewClusterDeployment( + clusterdeployment.WithNamespace("new"), + clusterdeployment.WithClusterTemplate(templateName), )}, }, { - title: "should succeed because no ManagedCluster or ClusterTemplateChain references the template", + title: "should succeed because no ClusterDeployment or ClusterTemplateChain references the template", template: tpl, - existingObjects: []runtime.Object{managedcluster.NewManagedCluster()}, + existingObjects: []runtime.Object{clusterdeployment.NewClusterDeployment()}, }, } @@ -219,7 +219,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { c := fake.NewClientBuilder(). WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterTemplateIndexKey, v1alpha1.ExtractTemplateNameFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentTemplateIndexKey, v1alpha1.ExtractTemplateNameFromClusterDeployment). Build() validator := &ClusterTemplateValidator{ TemplateValidator: TemplateValidator{ @@ -263,15 +263,15 @@ func TestServiceTemplateValidateDelete(t *testing.T) { err string }{ { - title: "should fail if ManagedCluster exists in same namespace", + title: "should fail if ClusterDeployment exists in same namespace", template: tmpl, existingObjects: []runtime.Object{ - managedcluster.NewManagedCluster( - managedcluster.WithNamespace(templateNamespace), - managedcluster.WithServiceTemplate(templateName), + clusterdeployment.NewClusterDeployment( + clusterdeployment.WithNamespace(templateNamespace), + clusterdeployment.WithServiceTemplate(templateName), ), }, - warnings: admission.Warnings{"The ServiceTemplate object can't be removed if ManagedCluster objects referencing it still exist"}, + warnings: admission.Warnings{"The ServiceTemplate object can't be removed if ClusterDeployment objects referencing it still exist"}, err: errTemplateDeletionForbidden.Error(), }, { @@ -302,19 +302,19 @@ func TestServiceTemplateValidateDelete(t *testing.T) { err: "template deletion is forbidden", }, { - title: "should succeed if managedCluster referencing ServiceTemplate is another namespace", + title: "should succeed if ClusterDeployment referencing ServiceTemplate is another namespace", template: tmpl, existingObjects: []runtime.Object{ - managedcluster.NewManagedCluster( - managedcluster.WithNamespace("someothernamespace"), - managedcluster.WithServiceTemplate(templateName), + clusterdeployment.NewClusterDeployment( + clusterdeployment.WithNamespace("someothernamespace"), + clusterdeployment.WithServiceTemplate(tmpl.Name), ), }, }, { title: "should succeed because no cluster references the template", template: tmpl, - existingObjects: []runtime.Object{managedcluster.NewManagedCluster()}, + existingObjects: []runtime.Object{clusterdeployment.NewClusterDeployment()}, }, { title: "should fail if a MultiClusterService is referencing serviceTemplate in system namespace", @@ -338,7 +338,7 @@ func TestServiceTemplateValidateDelete(t *testing.T) { NewClientBuilder(). WithScheme(scheme.Scheme). WithRuntimeObjects(tt.existingObjects...). - WithIndex(&v1alpha1.ManagedCluster{}, v1alpha1.ManagedClusterServiceTemplatesIndexKey, v1alpha1.ExtractServiceTemplateNamesFromManagedCluster). + WithIndex(&v1alpha1.ClusterDeployment{}, v1alpha1.ClusterDeploymentServiceTemplatesIndexKey, v1alpha1.ExtractServiceTemplateNamesFromClusterDeployment). WithIndex(&v1alpha1.MultiClusterService{}, v1alpha1.MultiClusterServiceTemplatesIndexKey, v1alpha1.ExtractServiceTemplateNamesFromMultiClusterService). Build() diff --git a/scripts/airgap-push.sh b/scripts/airgap-push.sh index 0f894c364..3e9fc5ae7 100755 --- a/scripts/airgap-push.sh +++ b/scripts/airgap-push.sh @@ -14,7 +14,7 @@ # limitations under the License. # # This script can be used to help users re-tag and push images and Helm charts -# into a private registry for use when deploying HMC ManagedClusters into an +# into a private registry for use when deploying HMC ClusterDeployments into an # air-gapped environment. This script is packaged as part of the airgap bundle # for convenience. diff --git a/templates/cluster/adopted-cluster/.helmignore b/templates/cluster/adopted-cluster/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/templates/cluster/adopted-cluster/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/templates/cluster/adopted-cluster/Chart.yaml b/templates/cluster/adopted-cluster/Chart.yaml new file mode 100644 index 000000000..53fa99c13 --- /dev/null +++ b/templates/cluster/adopted-cluster/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: adopted-cluster +description: | + An HMC template to adopt an already existing kubernetes cluster +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.4 +annotations: + cluster.x-k8s.io/provider: infrastructure-internal diff --git a/templates/cluster/adopted-cluster/templates/_helpers.tpl b/templates/cluster/adopted-cluster/templates/_helpers.tpl new file mode 100644 index 000000000..50922cc27 --- /dev/null +++ b/templates/cluster/adopted-cluster/templates/_helpers.tpl @@ -0,0 +1,4 @@ +{{- define "cluster.name" -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} + diff --git a/templates/cluster/adopted-cluster/templates/sveltoscluster.yaml b/templates/cluster/adopted-cluster/templates/sveltoscluster.yaml new file mode 100644 index 000000000..dd189255a --- /dev/null +++ b/templates/cluster/adopted-cluster/templates/sveltoscluster.yaml @@ -0,0 +1,10 @@ +apiVersion: lib.projectsveltos.io/v1beta1 +kind: SveltosCluster +metadata: + labels: + projectsveltos.io/k8s-version: v1.31.1 + name: {{ include "cluster.name" . }} + namespace: {{ .Release.Namespace }} +spec: + consecutiveFailureThreshold: {{ .Values.consecutiveFailureThreshold }} + kubeconfigName: {{ .Values.clusterIdentity.name }} diff --git a/templates/cluster/adopted-cluster/values.schema.json b/templates/cluster/adopted-cluster/values.schema.json new file mode 100644 index 000000000..d54c0bd57 --- /dev/null +++ b/templates/cluster/adopted-cluster/values.schema.json @@ -0,0 +1,33 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "An HMC template to adopt an existing k8s cluster.", + "type": "object", + "required": [ + "clusterIdentity" + ], + "properties": { + "consecutiveFailureThreshold": { + "description": "The number of the failures prior to setting the status condition", + "type": "integer", + "minimum": 1 + }, + "clusterIdentity": { + "type": "object", + "description": "Adopted Cluster Identity object reference", + "required": [ + "name", + "kind" + ], + "properties": { + "name": { + "description": "Adopted ClusterIdentity object name", + "type": "string" + }, + "kind": { + "description": "Adopted ClusterIdentity object kind", + "type": "string" + } + } + } + } +} diff --git a/templates/cluster/adopted-cluster/values.yaml b/templates/cluster/adopted-cluster/values.yaml new file mode 100644 index 000000000..bb189cedf --- /dev/null +++ b/templates/cluster/adopted-cluster/values.yaml @@ -0,0 +1,7 @@ +# Cluster parameters + +clusterIdentity: + name: "adopted-cluster-identity" + kind: "Secret" + +consecutiveFailureThreshold: 3 diff --git a/templates/cluster/aws-eks/values.schema.json b/templates/cluster/aws-eks/values.schema.json index acabd799c..756857410 100644 --- a/templates/cluster/aws-eks/values.schema.json +++ b/templates/cluster/aws-eks/values.schema.json @@ -1,6 +1,6 @@ { "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "An HMC template to deploy a ManagedCluster on EKS.", + "description": "An HMC template to deploy a ClusterDeployment on EKS.", "type": "object", "required": [ "workersNumber", diff --git a/templates/provider/hmc-templates/files/templates/adopted-cluster-0-0-4.yaml b/templates/provider/hmc-templates/files/templates/adopted-cluster-0-0-4.yaml new file mode 100644 index 000000000..f0e66bf28 --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/adopted-cluster-0-0-4.yaml @@ -0,0 +1,10 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ClusterTemplate +metadata: + name: adopted-cluster-0-0-4 + annotations: + helm.sh/resource-policy: keep +spec: + helm: + chartName: adopted-cluster + chartVersion: 0.0.4 diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clusterdeployments.yaml similarity index 96% rename from templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml rename to templates/provider/hmc/templates/crds/hmc.mirantis.com_clusterdeployments.yaml index 8e0ec18a3..1c76d3062 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clusterdeployments.yaml @@ -4,17 +4,17 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.3 - name: managedclusters.hmc.mirantis.com + name: clusterdeployments.hmc.mirantis.com spec: group: hmc.mirantis.com names: - kind: ManagedCluster - listKind: ManagedClusterList - plural: managedclusters + kind: ClusterDeployment + listKind: ClusterDeploymentList + plural: clusterdeployments shortNames: - mcluster - mcl - singular: managedcluster + singular: clusterdeployment scope: Namespaced versions: - additionalPrinterColumns: @@ -34,7 +34,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ManagedCluster is the Schema for the managedclusters API + description: ClusterDeployment is the Schema for the ClusterDeployments API properties: apiVersion: description: |- @@ -54,7 +54,7 @@ spec: metadata: type: object spec: - description: ManagedClusterSpec defines the desired state of ManagedCluster + description: ClusterDeploymentSpec defines the desired state of ClusterDeployment properties: config: description: |- @@ -135,7 +135,7 @@ spec: - template type: object status: - description: ManagedClusterStatus defines the observed state of ManagedCluster + description: ClusterDeploymentStatus defines the observed state of ClusterDeployment properties: availableUpgrades: description: |- @@ -147,7 +147,7 @@ spec: type: array conditions: description: Conditions contains details for the current state of - the ManagedCluster. + the ClusterDeployment. items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index 94bfcbb20..5e18840f6 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -167,7 +167,7 @@ spec: config: description: |- Config demonstrates available parameters for template customization, - that can be used when creating ManagedCluster objects. + that can be used when creating ClusterDeployment objects. x-kubernetes-preserve-unknown-fields: true description: description: Description contains information about the template. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index 3157fb79c..df81fb45a 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -172,7 +172,7 @@ spec: config: description: |- Config demonstrates available parameters for template customization, - that can be used when creating ManagedCluster objects. + that can be used when creating ClusterDeployment objects. x-kubernetes-preserve-unknown-fields: true description: description: Description contains information about the template. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml index 84c17f57f..21e701f09 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml @@ -156,7 +156,7 @@ spec: config: description: |- Config demonstrates available parameters for template customization, - that can be used when creating ManagedCluster objects. + that can be used when creating ClusterDeployment objects. x-kubernetes-preserve-unknown-fields: true description: description: Description contains information about the template. diff --git a/templates/provider/hmc/templates/deployment.yaml b/templates/provider/hmc/templates/deployment.yaml index 31ae1004a..e95b1cd95 100644 --- a/templates/provider/hmc/templates/deployment.yaml +++ b/templates/provider/hmc/templates/deployment.yaml @@ -54,6 +54,7 @@ spec: port: 8081 initialDelaySeconds: 15 periodSeconds: 20 + timeoutSeconds: 6000 name: manager readinessProbe: httpGet: @@ -61,6 +62,7 @@ spec: port: 8081 initialDelaySeconds: 5 periodSeconds: 10 + timeoutSeconds: 6000 resources: {{- toYaml .Values.resources | nindent 10 }} securityContext: {{- toYaml .Values.containerSecurityContext diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 4d8be9556..2ba80fb59 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -33,18 +33,18 @@ rules: - apiGroups: - hmc.mirantis.com resources: - - managedclusters + - clusterdeployments verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com resources: - - managedclusters/finalizers + - clusterdeployments/finalizers verbs: - update - apiGroups: - hmc.mirantis.com resources: - - managedclusters/status + - clusterdeployments/status verbs: - get - patch diff --git a/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml index 9beeeab3e..b09f4f9f3 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml @@ -8,5 +8,5 @@ rules: - apiGroups: - hmc.mirantis.com resources: - - managedclusters + - clusterdeployments verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml index f3a971c5b..a6226dba4 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml @@ -8,5 +8,5 @@ rules: - apiGroups: - hmc.mirantis.com resources: - - managedclusters + - clusterdeployments verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/webhooks.yaml b/templates/provider/hmc/templates/webhooks.yaml index 804307334..97e8443af 100644 --- a/templates/provider/hmc/templates/webhooks.yaml +++ b/templates/provider/hmc/templates/webhooks.yaml @@ -13,10 +13,10 @@ webhooks: service: name: {{ include "hmc.webhook.serviceName" . }} namespace: {{ include "hmc.webhook.serviceNamespace" . }} - path: /mutate-hmc-mirantis-com-v1alpha1-managedcluster + path: /mutate-hmc-mirantis-com-v1alpha1-clusterdeployment failurePolicy: Fail matchPolicy: Equivalent - name: mutation.managedcluster.hmc.mirantis.com + name: mutation.clusterdeployment.hmc.mirantis.com rules: - apiGroups: - hmc.mirantis.com @@ -26,7 +26,7 @@ webhooks: - CREATE - UPDATE resources: - - managedclusters + - clusterdeployments sideEffects: None - admissionReviewVersions: - v1 @@ -65,10 +65,10 @@ webhooks: service: name: {{ include "hmc.webhook.serviceName" . }} namespace: {{ include "hmc.webhook.serviceNamespace" . }} - path: /validate-hmc-mirantis-com-v1alpha1-managedcluster + path: /validate-hmc-mirantis-com-v1alpha1-clusterdeployment failurePolicy: Fail matchPolicy: Equivalent - name: validation.managedcluster.hmc.mirantis.com + name: validation.clusterdeployment.hmc.mirantis.com rules: - apiGroups: - hmc.mirantis.com @@ -79,7 +79,7 @@ webhooks: - UPDATE - DELETE resources: - - managedclusters + - clusterdeployments sideEffects: None - admissionReviewVersions: - v1 diff --git a/test/e2e/managedcluster/aws/aws.go b/test/e2e/clusterdeployment/aws/aws.go similarity index 88% rename from test/e2e/managedcluster/aws/aws.go rename to test/e2e/clusterdeployment/aws/aws.go index 441cfc499..33b6f6830 100644 --- a/test/e2e/managedcluster/aws/aws.go +++ b/test/e2e/clusterdeployment/aws/aws.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" ) // PopulateHostedTemplateVars populates the environment variables required for @@ -66,8 +66,8 @@ func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster security group ID") Expect(found).To(BeTrue(), "AWS cluster has no security group ID") - GinkgoT().Setenv(managedcluster.EnvVarAWSVPCID, vpcID) - GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetID, subnetID) - GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetAvailabilityZone, subnetAZ) - GinkgoT().Setenv(managedcluster.EnvVarAWSSecurityGroupID, securityGroupID) + GinkgoT().Setenv(clusterdeployment.EnvVarAWSVPCID, vpcID) + GinkgoT().Setenv(clusterdeployment.EnvVarAWSSubnetID, subnetID) + GinkgoT().Setenv(clusterdeployment.EnvVarAWSSubnetAvailabilityZone, subnetAZ) + GinkgoT().Setenv(clusterdeployment.EnvVarAWSSecurityGroupID, securityGroupID) } diff --git a/test/e2e/managedcluster/azure/azure.go b/test/e2e/clusterdeployment/azure/azure.go similarity index 100% rename from test/e2e/managedcluster/azure/azure.go rename to test/e2e/clusterdeployment/azure/azure.go diff --git a/test/e2e/managedcluster/managedcluster.go b/test/e2e/clusterdeployment/clusterdeployment.go similarity index 72% rename from test/e2e/managedcluster/managedcluster.go rename to test/e2e/clusterdeployment/clusterdeployment.go index 72efa96f8..4492f4dc0 100644 --- a/test/e2e/managedcluster/managedcluster.go +++ b/test/e2e/clusterdeployment/clusterdeployment.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( _ "embed" @@ -53,22 +53,22 @@ const ( ) //go:embed resources/aws-standalone-cp.yaml.tpl -var awsStandaloneCPManagedClusterTemplateBytes []byte +var awsStandaloneCPClusterDeploymentTemplateBytes []byte //go:embed resources/aws-hosted-cp.yaml.tpl -var awsHostedCPManagedClusterTemplateBytes []byte +var awsHostedCPClusterDeploymentTemplateBytes []byte //go:embed resources/azure-standalone-cp.yaml.tpl -var azureStandaloneCPManagedClusterTemplateBytes []byte +var azureStandaloneCPClusterDeploymentTemplateBytes []byte //go:embed resources/azure-hosted-cp.yaml.tpl -var azureHostedCPManagedClusterTemplateBytes []byte +var azureHostedCPClusterDeploymentTemplateBytes []byte //go:embed resources/vsphere-standalone-cp.yaml.tpl -var vsphereStandaloneCPManagedClusterTemplateBytes []byte +var vsphereStandaloneCPClusterDeploymentTemplateBytes []byte //go:embed resources/vsphere-hosted-cp.yaml.tpl -var vsphereHostedCPManagedClusterTemplateBytes []byte +var vsphereHostedCPClusterDeploymentTemplateBytes []byte func FilterAllProviders() []string { return []string{ @@ -87,7 +87,7 @@ func GetProviderLabel(provider ProviderType) string { func setClusterName(templateName Template) { var generatedName string - mcName := os.Getenv(EnvVarManagedClusterName) + mcName := os.Getenv(EnvVarClusterDeploymentName) if mcName == "" { mcName = "e2e-test-" + uuid.New().String()[:8] } @@ -95,26 +95,26 @@ func setClusterName(templateName Template) { providerName := strings.Split(string(templateName), "-")[0] // Append the provider name to the cluster name to ensure uniqueness between - // different deployed ManagedClusters. + // different deployed ClusterDeployments. generatedName = fmt.Sprintf("%s-%s", mcName, providerName) if strings.Contains(string(templateName), "hosted") { generatedName = fmt.Sprintf("%s-%s", mcName, "hosted") } - GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) + GinkgoT().Setenv(EnvVarClusterDeploymentName, generatedName) } -// GetUnstructured returns an unstructured ManagedCluster object based on the +// GetUnstructured returns an unstructured ClusterDeployment object based on the // provider and template. func GetUnstructured(templateName Template) *unstructured.Unstructured { GinkgoHelper() setClusterName(templateName) - var managedClusterTemplateBytes []byte + var clusterDeploymentTemplateBytes []byte switch templateName { case TemplateAWSStandaloneCP: - managedClusterTemplateBytes = awsStandaloneCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = awsStandaloneCPClusterDeploymentTemplateBytes case TemplateAWSHostedCP: // Validate environment vars that do not have defaults are populated. // We perform this validation here instead of within a Before block @@ -125,28 +125,28 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { EnvVarAWSSubnetAvailabilityZone, EnvVarAWSSecurityGroupID, }) - managedClusterTemplateBytes = awsHostedCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = awsHostedCPClusterDeploymentTemplateBytes case TemplateVSphereStandaloneCP: - managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = vsphereStandaloneCPClusterDeploymentTemplateBytes case TemplateVSphereHostedCP: - managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = vsphereHostedCPClusterDeploymentTemplateBytes case TemplateAzureHostedCP: - managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = azureHostedCPClusterDeploymentTemplateBytes case TemplateAzureStandaloneCP: - managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes + clusterDeploymentTemplateBytes = azureStandaloneCPClusterDeploymentTemplateBytes default: Fail(fmt.Sprintf("Unsupported template: %s", templateName)) } - managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) + clusterDeploymentConfigBytes, err := envsubst.Bytes(clusterDeploymentTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") - var managedClusterConfig map[string]any + var clusterDeploymentConfig map[string]any - err = yaml.Unmarshal(managedClusterConfigBytes, &managedClusterConfig) + err = yaml.Unmarshal(clusterDeploymentConfigBytes, &clusterDeploymentConfig) Expect(err).NotTo(HaveOccurred(), "failed to unmarshal deployment config") - return &unstructured.Unstructured{Object: managedClusterConfig} + return &unstructured.Unstructured{Object: clusterDeploymentConfig} } func ValidateDeploymentVars(v []string) { diff --git a/test/e2e/managedcluster/clusteridentity/clusteridentity.go b/test/e2e/clusterdeployment/clusteridentity/clusteridentity.go similarity index 90% rename from test/e2e/managedcluster/clusteridentity/clusteridentity.go rename to test/e2e/clusterdeployment/clusteridentity/clusteridentity.go index 41e01c77e..461e629cb 100644 --- a/test/e2e/managedcluster/clusteridentity/clusteridentity.go +++ b/test/e2e/clusterdeployment/clusteridentity/clusteridentity.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" ) type ClusterIdentity struct { @@ -45,7 +45,7 @@ type ClusterIdentity struct { // New creates a ClusterIdentity resource, credential and associated secret for // the given provider using the provided KubeClient and returns details about // the created ClusterIdentity. -func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *ClusterIdentity { +func New(kc *kubeclient.KubeClient, provider clusterdeployment.ProviderType) *ClusterIdentity { GinkgoHelper() var ( @@ -61,13 +61,13 @@ func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *Clust identityName := fmt.Sprintf("%s-cluster-identity", provider) switch provider { - case managedcluster.ProviderAWS: + case clusterdeployment.ProviderAWS: resource = "awsclusterstaticidentities" kind = "AWSClusterStaticIdentity" version = "v1beta2" secretStringData = map[string]string{ - "AccessKeyID": os.Getenv(managedcluster.EnvVarAWSAccessKeyID), - "SecretAccessKey": os.Getenv(managedcluster.EnvVarAWSSecretAccessKey), + "AccessKeyID": os.Getenv(clusterdeployment.EnvVarAWSAccessKeyID), + "SecretAccessKey": os.Getenv(clusterdeployment.EnvVarAWSSecretAccessKey), } spec = map[string]any{ "secretRef": secretName, @@ -77,31 +77,31 @@ func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *Clust }, }, } - case managedcluster.ProviderAzure: + case clusterdeployment.ProviderAzure: resource = "azureclusteridentities" kind = "AzureClusterIdentity" version = "v1beta1" secretStringData = map[string]string{ - "clientSecret": os.Getenv(managedcluster.EnvVarAzureClientSecret), + "clientSecret": os.Getenv(clusterdeployment.EnvVarAzureClientSecret), } spec = map[string]any{ "allowedNamespaces": make(map[string]any), - "clientID": os.Getenv(managedcluster.EnvVarAzureClientID), + "clientID": os.Getenv(clusterdeployment.EnvVarAzureClientID), "clientSecret": map[string]any{ "name": secretName, "namespace": kc.Namespace, }, - "tenantID": os.Getenv(managedcluster.EnvVarAzureTenantID), + "tenantID": os.Getenv(clusterdeployment.EnvVarAzureTenantID), "type": "ServicePrincipal", } namespaced = true - case managedcluster.ProviderVSphere: + case clusterdeployment.ProviderVSphere: resource = "vsphereclusteridentities" kind = "VSphereClusterIdentity" version = "v1beta1" secretStringData = map[string]string{ - "username": os.Getenv(managedcluster.EnvVarVSphereUser), - "password": os.Getenv(managedcluster.EnvVarVSpherePassword), + "username": os.Getenv(clusterdeployment.EnvVarVSphereUser), + "password": os.Getenv(clusterdeployment.EnvVarVSpherePassword), } spec = map[string]any{ "secretName": secretName, diff --git a/test/e2e/managedcluster/common.go b/test/e2e/clusterdeployment/common.go similarity index 97% rename from test/e2e/managedcluster/common.go rename to test/e2e/clusterdeployment/common.go index 5fabf7fde..9eb59211c 100644 --- a/test/e2e/managedcluster/common.go +++ b/test/e2e/clusterdeployment/common.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( "context" @@ -33,7 +33,7 @@ import ( // as Ready depending on the given provider. // See: https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider // Use Eventually as the resource might not be available immediately following -// a ManagedCluster creation. +// a ClusterDeployment creation. func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, clusterName string) { GinkgoHelper() diff --git a/test/e2e/managedcluster/constants.go b/test/e2e/clusterdeployment/constants.go similarity index 88% rename from test/e2e/managedcluster/constants.go rename to test/e2e/clusterdeployment/constants.go index 4f18a7832..f5a1e2037 100644 --- a/test/e2e/managedcluster/constants.go +++ b/test/e2e/clusterdeployment/constants.go @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment const ( // Common - EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" - EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" - EnvVarWorkerNumber = "WORKER_NUMBER" - EnvVarNamespace = "NAMESPACE" + EnvVarClusterDeploymentName = "CLUSTER_DEPLOYMENT_NAME" + EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" + EnvVarWorkerNumber = "WORKER_NUMBER" + EnvVarNamespace = "NAMESPACE" // EnvVarNoCleanup disables After* cleanup in provider specs to allow for // debugging of test failures. EnvVarNoCleanup = "NO_CLEANUP" diff --git a/test/e2e/managedcluster/providervalidator.go b/test/e2e/clusterdeployment/providervalidator.go similarity index 99% rename from test/e2e/managedcluster/providervalidator.go rename to test/e2e/clusterdeployment/providervalidator.go index 4df0fa84b..7ca81d1d2 100644 --- a/test/e2e/managedcluster/providervalidator.go +++ b/test/e2e/clusterdeployment/providervalidator.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( "context" diff --git a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/aws-hosted-cp.yaml.tpl similarity index 88% rename from test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/aws-hosted-cp.yaml.tpl index 8a2700c63..62fd8685e 100644 --- a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/aws-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} spec: template: aws-hosted-cp-0-0-3 credential: ${AWS_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/aws-standalone-cp.yaml.tpl similarity index 89% rename from test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/aws-standalone-cp.yaml.tpl index 24c449bc0..bc5390722 100644 --- a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/aws-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} spec: template: aws-standalone-cp-0-0-3 credential: ${AWS_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/azure-hosted-cp.yaml.tpl similarity index 92% rename from test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/azure-hosted-cp.yaml.tpl index 76da17cbb..d650b5426 100644 --- a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/azure-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} namespace: ${NAMESPACE} spec: template: azure-hosted-cp-0-0-3 diff --git a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/azure-standalone-cp.yaml.tpl similarity index 91% rename from test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/azure-standalone-cp.yaml.tpl index 3894b7b39..d16321106 100644 --- a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/azure-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} namespace: ${NAMESPACE} spec: template: azure-standalone-cp-0-0-3 diff --git a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/vsphere-hosted-cp.yaml.tpl similarity index 94% rename from test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/vsphere-hosted-cp.yaml.tpl index c0475f3f4..62a7f3e93 100644 --- a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/vsphere-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} spec: template: vsphere-hosted-cp-0-0-3 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/clusterdeployment/resources/vsphere-standalone-cp.yaml.tpl similarity index 95% rename from test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl rename to test/e2e/clusterdeployment/resources/vsphere-standalone-cp.yaml.tpl index cc5fa87b3..a94f78c4b 100644 --- a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/clusterdeployment/resources/vsphere-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 -kind: ManagedCluster +kind: ClusterDeployment metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${CLUSTER_DEPLOYMENT_NAME} spec: template: vsphere-standalone-cp-0-0-3 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred diff --git a/test/e2e/managedcluster/validate_deleted.go b/test/e2e/clusterdeployment/validate_deleted.go similarity index 99% rename from test/e2e/managedcluster/validate_deleted.go rename to test/e2e/clusterdeployment/validate_deleted.go index e09d4c254..1789ec879 100644 --- a/test/e2e/managedcluster/validate_deleted.go +++ b/test/e2e/clusterdeployment/validate_deleted.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( "context" diff --git a/test/e2e/managedcluster/validate_deployed.go b/test/e2e/clusterdeployment/validate_deployed.go similarity index 99% rename from test/e2e/managedcluster/validate_deployed.go rename to test/e2e/clusterdeployment/validate_deployed.go index bae823f75..7f750ecc1 100644 --- a/test/e2e/managedcluster/validate_deployed.go +++ b/test/e2e/clusterdeployment/validate_deployed.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( "context" diff --git a/test/e2e/managedcluster/vsphere/vsphere.go b/test/e2e/clusterdeployment/vsphere/vsphere.go similarity index 89% rename from test/e2e/managedcluster/vsphere/vsphere.go rename to test/e2e/clusterdeployment/vsphere/vsphere.go index 0d5db9ca0..a708ef0e4 100644 --- a/test/e2e/managedcluster/vsphere/vsphere.go +++ b/test/e2e/clusterdeployment/vsphere/vsphere.go @@ -15,11 +15,11 @@ package vsphere import ( - "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" ) func CheckEnv() { - managedcluster.ValidateDeploymentVars([]string{ + clusterdeployment.ValidateDeploymentVars([]string{ "VSPHERE_USER", "VSPHERE_PASSWORD", "VSPHERE_SERVER", diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index e76a4c245..79466577d 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -33,8 +33,8 @@ import ( "k8s.io/utils/ptr" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" "github.com/Mirantis/hmc/test/utils" ) @@ -46,7 +46,7 @@ func TestE2E(t *testing.T) { } var _ = BeforeSuite(func() { - GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) + GinkgoT().Setenv(clusterdeployment.EnvVarNamespace, internalutils.DefaultSystemNamespace) By("building and deploying the controller-manager") cmd := exec.Command("make", "kind-deploy") @@ -88,16 +88,16 @@ func verifyControllersUp(kc *kubeclient.KubeClient) error { return err } - providers := []managedcluster.ProviderType{ - managedcluster.ProviderCAPI, - managedcluster.ProviderAWS, - managedcluster.ProviderAzure, - managedcluster.ProviderVSphere, + providers := []clusterdeployment.ProviderType{ + clusterdeployment.ProviderCAPI, + clusterdeployment.ProviderAWS, + clusterdeployment.ProviderAzure, + clusterdeployment.ProviderVSphere, } for _, provider := range providers { // Ensure only one controller pod is running. - if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { + if err := validateController(kc, clusterdeployment.GetProviderLabel(provider), string(provider)); err != nil { return err } } @@ -107,7 +107,7 @@ func verifyControllersUp(kc *kubeclient.KubeClient) error { func validateController(kc *kubeclient.KubeClient, labelSelector, name string) error { controllerItems := 1 - if strings.Contains(labelSelector, managedcluster.GetProviderLabel(managedcluster.ProviderAzure)) { + if strings.Contains(labelSelector, clusterdeployment.GetProviderLabel(clusterdeployment.ProviderAzure)) { // Azure provider has two controllers. controllerItems = 2 } @@ -144,7 +144,7 @@ func validateController(kc *kubeclient.KubeClient, labelSelector, name string) e // templateBy wraps a Ginkgo By with a block describing the template being // tested. -func templateBy(t managedcluster.Template, description string) { +func templateBy(t clusterdeployment.Template, description string) { GinkgoHelper() By(fmt.Sprintf("[%s] %s", t, description)) } @@ -155,7 +155,7 @@ func templateBy(t managedcluster.Template, description string) { // optionally provided, passing an empty string will prevent clusterctl output // from being fetched. If collectLogArtifacts fails it produces a warning // message to the GinkgoWriter, but does not fail the test. -func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { +func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...clusterdeployment.ProviderType) { GinkgoHelper() filterLabels := []string{utils.HMCControllerLabel} @@ -169,10 +169,10 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider } if providerTypes == nil { - filterLabels = managedcluster.FilterAllProviders() + filterLabels = clusterdeployment.FilterAllProviders() } else { for _, providerType := range providerTypes { - filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) + filterLabels = append(filterLabels, clusterdeployment.GetProviderLabel(providerType)) } } @@ -228,9 +228,9 @@ func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, provider } func noCleanup() bool { - noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) + noCleanup := os.Getenv(clusterdeployment.EnvVarNoCleanup) if noCleanup != "" { - By(fmt.Sprintf("skipping After node as %s is set", managedcluster.EnvVarNoCleanup)) + By(fmt.Sprintf("skipping After node as %s is set", clusterdeployment.EnvVarNoCleanup)) } return noCleanup != "" diff --git a/test/e2e/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go index e3801e4e0..45e8fdb97 100644 --- a/test/e2e/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -177,30 +177,30 @@ func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionRe } } -// CreateManagedCluster creates a managedcluster.hmc.mirantis.com in the given +// CreateClusterDeployment creates a clusterdeployment.hmc.mirantis.com in the given // namespace and returns a DeleteFunc to clean up the deployment. // The DeleteFunc is a no-op if the deployment has already been deleted. -func (kc *KubeClient) CreateManagedCluster( - ctx context.Context, managedcluster *unstructured.Unstructured, +func (kc *KubeClient) CreateClusterDeployment( + ctx context.Context, clusterDeployment *unstructured.Unstructured, ) func() error { GinkgoHelper() - kind := managedcluster.GetKind() - Expect(kind).To(Equal("ManagedCluster")) + kind := clusterDeployment.GetKind() + Expect(kind).To(Equal("ClusterDeployment")) client := kc.GetDynamicClient(schema.GroupVersionResource{ Group: "hmc.mirantis.com", Version: "v1alpha1", - Resource: "managedclusters", + Resource: "clusterdeployments", }, true) - _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) + _, err := client.Create(ctx, clusterDeployment, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { Expect(err).NotTo(HaveOccurred(), "failed to create %s", kind) } return func() error { - err := client.Delete(ctx, managedcluster.GetName(), metav1.DeleteOptions{}) + err := client.Delete(ctx, clusterDeployment.GetName(), metav1.DeleteOptions{}) if apierrors.IsNotFound(err) { return nil } diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go index 6614698b4..276689032 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/provider_aws_test.go @@ -25,10 +25,10 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/aws" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/clusteridentity" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" - "github.com/Mirantis/hmc/test/e2e/managedcluster/aws" - "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" "github.com/Mirantis/hmc/test/utils" ) @@ -45,8 +45,8 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order BeforeAll(func() { By("providing cluster identity") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) - ci := clusteridentity.New(kc, managedcluster.ProviderAWS) - Expect(os.Setenv(managedcluster.EnvVarAWSClusterIdentity, ci.IdentityName)).Should(Succeed()) + ci := clusteridentity.New(kc, clusterdeployment.ProviderAWS) + Expect(os.Setenv(clusterdeployment.EnvVarAWSClusterIdentity, ci.IdentityName)).Should(Succeed()) }) AfterAll(func() { @@ -55,7 +55,7 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order if CurrentSpecReport().Failed() && !noCleanup() { if standaloneClient != nil { By("collecting failure logs from hosted controllers") - collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) + collectLogArtifacts(standaloneClient, clusterName, clusterdeployment.ProviderAWS, clusterdeployment.ProviderCAPI) } } @@ -76,26 +76,26 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order // Deploy a standalone cluster and verify it is running/ready. // Deploy standalone with an xlarge instance since it will also be // hosting the hosted cluster. - GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") + GinkgoT().Setenv(clusterdeployment.EnvVarAWSInstanceType, "t3.xlarge") - templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) + templateBy(clusterdeployment.TemplateAWSStandaloneCP, "creating a ClusterDeployment") + sd := clusterdeployment.GetUnstructured(clusterdeployment.TemplateAWSStandaloneCP) clusterName = sd.GetName() - standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFunc = kc.CreateClusterDeployment(context.Background(), sd) - templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, + templateBy(clusterdeployment.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAWSStandaloneCP, clusterName, - managedcluster.ValidationActionDeploy, + clusterdeployment.ValidationActionDeploy, ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") + templateBy(clusterdeployment.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") // Download the KUBECONFIG for the standalone cluster and load it // so we can call Make targets against this cluster. @@ -111,74 +111,74 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order Expect(err).NotTo(HaveOccurred()) Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") + templateBy(clusterdeployment.TemplateAWSHostedCP, "validating that the controller is ready") standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, clusterName) Eventually(func() error { err := verifyControllersUp(standaloneClient) if err != nil { _, _ = fmt.Fprintf( GinkgoWriter, "[%s] controller validation failed: %v\n", - string(managedcluster.TemplateAWSHostedCP), err) + string(clusterdeployment.TemplateAWSHostedCP), err) return err } return nil }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) // Ensure AWS credentials are set in the standalone cluster. - clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) + clusteridentity.New(standaloneClient, clusterdeployment.ProviderAWS) // Populate the environment variables required for the hosted // cluster. aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) - templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) + templateBy(clusterdeployment.TemplateAWSHostedCP, "creating a clusterdeployment") + hd := clusterdeployment.GetUnstructured(clusterdeployment.TemplateAWSHostedCP) hdName := hd.GetName() // Deploy the hosted cluster on top of the standalone cluster. - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + hostedDeleteFunc = standaloneClient.CreateClusterDeployment(context.Background(), hd) - templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, hdName) + templateBy(clusterdeployment.TemplateAWSHostedCP, "Patching AWSCluster to ready") + clusterdeployment.PatchHostedClusterReady(standaloneClient, clusterdeployment.ProviderAWS, hdName) // Verify the hosted cluster is running/ready. - templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, + templateBy(clusterdeployment.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAWSHostedCP, hdName, - managedcluster.ValidationActionDeploy, + clusterdeployment.ValidationActionDeploy, ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), standaloneClient) }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - // Delete the hosted ManagedCluster and verify it is removed. - templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") + // Delete the hosted clusterdeployment and verify it is removed. + templateBy(clusterdeployment.TemplateAWSHostedCP, "deleting the clusterdeployment") err = hostedDeleteFunc() Expect(err).NotTo(HaveOccurred()) - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, + deletionValidator := clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAWSHostedCP, hdName, - managedcluster.ValidationActionDelete, + clusterdeployment.ValidationActionDelete, ) Eventually(func() error { return deletionValidator.Validate(context.Background(), standaloneClient) }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - // Now delete the standalone ManagedCluster and verify it is + // Now delete the standalone clusterdeployment and verify it is // removed, it is deleted last since it is the basis for the hosted // cluster. /* FIXME(#339): This is currently disabled as the deletion of the standalone cluster is failing due to outstanding issues. - templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") + templateBy(clusterdeployment.TemplateAWSStandaloneCP, "deleting the clusterdeployment") err = standaloneDeleteFunc() Expect(err).NotTo(HaveOccurred()) - deletionValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, + deletionValidator = clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAWSStandaloneCP, clusterName, - managedcluster.ValidationActionDelete, + clusterdeployment.ValidationActionDelete, ) Eventually(func() error { return deletionValidator.Validate(context.Background(), kc) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go index fcdbe27c7..2b1bbb5b1 100644 --- a/test/e2e/provider_azure_test.go +++ b/test/e2e/provider_azure_test.go @@ -25,10 +25,10 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/azure" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/clusteridentity" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" - "github.com/Mirantis/hmc/test/e2e/managedcluster/azure" - "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" "github.com/Mirantis/hmc/test/utils" ) @@ -46,8 +46,8 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or BeforeAll(func() { By("ensuring Azure credentials are set") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) - ci := clusteridentity.New(kc, managedcluster.ProviderAzure) - Expect(os.Setenv(managedcluster.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) + ci := clusteridentity.New(kc, clusterdeployment.ProviderAzure) + Expect(os.Setenv(clusterdeployment.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) }) AfterEach(func() { @@ -56,10 +56,10 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or if CurrentSpecReport().Failed() && !noCleanup() { By("collecting failure logs from controllers") if kc != nil { - collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + collectLogArtifacts(kc, sdName, clusterdeployment.ProviderAzure, clusterdeployment.ProviderCAPI) } if standaloneClient != nil { - collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + collectLogArtifacts(standaloneClient, sdName, clusterdeployment.ProviderAzure, clusterdeployment.ProviderCAPI) } } @@ -78,20 +78,20 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or }) It("should work with an Azure provider", func() { - templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + templateBy(clusterdeployment.TemplateAzureStandaloneCP, "creating a clusterdeployment") + sd := clusterdeployment.GetUnstructured(clusterdeployment.TemplateAzureStandaloneCP) sdName = sd.GetName() - standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + standaloneDeleteFunc := kc.CreateClusterDeployment(context.Background(), sd) // verify the standalone cluster is deployed correctly - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, + deploymentValidator := clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAzureStandaloneCP, sdName, - managedcluster.ValidationActionDeploy, + clusterdeployment.ValidationActionDeploy, ) - templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + templateBy(clusterdeployment.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) @@ -99,7 +99,7 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or // setup environment variables for deploying the hosted template (subnet name, etc) azure.SetAzureEnvironmentVariables(sdName, kc) - hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hd := clusterdeployment.GetUnstructured(clusterdeployment.TemplateAzureHostedCP) hdName := hd.GetName() var kubeCfgPath string @@ -124,22 +124,22 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) By("Create azure credential secret") - clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) + clusteridentity.New(standaloneClient, clusterdeployment.ProviderAzure) By("Create default storage class for azure-disk CSI driver") azure.CreateDefaultStorageClass(standaloneClient) - templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + templateBy(clusterdeployment.TemplateAzureHostedCP, "creating a clusterdeployment") + hostedDeleteFunc = standaloneClient.CreateClusterDeployment(context.Background(), hd) - templateBy(managedcluster.TemplateAzureHostedCP, "Patching AzureCluster to ready") - managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, hdName) + templateBy(clusterdeployment.TemplateAzureHostedCP, "Patching AzureCluster to ready") + clusterdeployment.PatchHostedClusterReady(standaloneClient, clusterdeployment.ProviderAzure, hdName) - templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, + templateBy(clusterdeployment.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAzureHostedCP, hdName, - managedcluster.ValidationActionDeploy, + clusterdeployment.ValidationActionDeploy, ) Eventually(func() error { @@ -153,20 +153,20 @@ var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Or err = standaloneDeleteFunc() Expect(err).NotTo(HaveOccurred()) - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, + deploymentValidator = clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAzureHostedCP, hdName, - managedcluster.ValidationActionDelete, + clusterdeployment.ValidationActionDelete, ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), standaloneClient) }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, + deploymentValidator = clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateAzureStandaloneCP, hdName, - managedcluster.ValidationActionDelete, + clusterdeployment.ValidationActionDelete, ) Eventually(func() error { diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go index 202d7deda..ea9ef55fe 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/provider_vsphere_test.go @@ -23,10 +23,10 @@ import ( . "github.com/onsi/gomega" internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/clusterdeployment/vsphere" "github.com/Mirantis/hmc/test/e2e/kubeclient" - "github.com/Mirantis/hmc/test/e2e/managedcluster" - "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" - "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" ) var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { @@ -43,9 +43,9 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" By("creating kube client") kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) By("providing cluster identity") - ci := clusteridentity.New(kc, managedcluster.ProviderVSphere) + ci := clusteridentity.New(kc, clusterdeployment.ProviderVSphere) By("setting VSPHERE_CLUSTER_IDENTITY env variable") - Expect(os.Setenv(managedcluster.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) + Expect(os.Setenv(clusterdeployment.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) }) AfterEach(func() { @@ -53,7 +53,7 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" // as well as the output of clusterctl to store as artifacts. if CurrentSpecReport().Failed() { By("collecting failure logs from controllers") - collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) + collectLogArtifacts(kc, clusterName, clusterdeployment.ProviderVSphere, clusterdeployment.ProviderCAPI) } // Run the deletion as part of the cleanup and validate it here. @@ -64,10 +64,10 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" // 'dev-aws-nuke' to clean up resources in the event that the test // fails to do so. if deleteFunc != nil && !noCleanup() { - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, + deletionValidator := clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateVSphereStandaloneCP, clusterName, - managedcluster.ValidationActionDelete, + clusterdeployment.ValidationActionDelete, ) err = deleteFunc() @@ -80,16 +80,16 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" It("should deploy standalone managed cluster", func() { By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + d := clusterdeployment.GetUnstructured(clusterdeployment.TemplateVSphereStandaloneCP) clusterName = d.GetName() - deleteFunc = kc.CreateManagedCluster(context.Background(), d) + deleteFunc = kc.CreateClusterDeployment(context.Background(), d) By("waiting for infrastructure providers to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, + deploymentValidator := clusterdeployment.NewProviderValidator( + clusterdeployment.TemplateVSphereStandaloneCP, clusterName, - managedcluster.ValidationActionDeploy, + clusterdeployment.ValidationActionDeploy, ) Eventually(func() error { return deploymentValidator.Validate(context.Background(), kc) diff --git a/test/objects/managedcluster/managedcluster.go b/test/objects/clusterdeployment/clusterdeployment.go similarity index 74% rename from test/objects/managedcluster/managedcluster.go rename to test/objects/clusterdeployment/clusterdeployment.go index 7310cb49e..2fcfb134f 100644 --- a/test/objects/managedcluster/managedcluster.go +++ b/test/objects/clusterdeployment/clusterdeployment.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package managedcluster +package clusterdeployment import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -22,14 +22,14 @@ import ( ) const ( - DefaultName = "managedcluster" + DefaultName = "clusterdeployment" DefaultNamespace = metav1.NamespaceDefault ) -type Opt func(managedCluster *v1alpha1.ManagedCluster) +type Opt func(ClusterDeployment *v1alpha1.ClusterDeployment) -func NewManagedCluster(opts ...Opt) *v1alpha1.ManagedCluster { - p := &v1alpha1.ManagedCluster{ +func NewClusterDeployment(opts ...Opt) *v1alpha1.ClusterDeployment { + p := &v1alpha1.ClusterDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultName, Namespace: DefaultNamespace, @@ -43,31 +43,31 @@ func NewManagedCluster(opts ...Opt) *v1alpha1.ManagedCluster { } func WithName(name string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Name = name } } func WithNamespace(namespace string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Namespace = namespace } } func WithDryRun(dryRun bool) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Spec.DryRun = dryRun } } func WithClusterTemplate(templateName string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Spec.Template = templateName } } func WithConfig(config string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Spec.Config = &apiextensionsv1.JSON{ Raw: []byte(config), } @@ -75,7 +75,7 @@ func WithConfig(config string) Opt { } func WithServiceTemplate(templateName string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Spec.Services = append(p.Spec.Services, v1alpha1.ServiceSpec{ Template: templateName, }) @@ -83,13 +83,13 @@ func WithServiceTemplate(templateName string) Opt { } func WithCredential(credName string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Spec.Credential = credName } } func WithAvailableUpgrades(availableUpgrades []string) Opt { - return func(p *v1alpha1.ManagedCluster) { + return func(p *v1alpha1.ClusterDeployment) { p.Status.AvailableUpgrades = availableUpgrades } }