From 359b099953fb4464451ceea7a407e67fa5767eaa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:26:10 +0000 Subject: [PATCH 01/29] Bump helm.sh/helm/v3 from 3.16.1 to 3.16.2 Bumps [helm.sh/helm/v3](https://github.com/helm/helm) from 3.16.1 to 3.16.2. - [Release notes](https://github.com/helm/helm/releases) - [Commits](https://github.com/helm/helm/compare/v3.16.1...v3.16.2) --- updated-dependencies: - dependency-name: helm.sh/helm/v3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 02500fa44..53fd2330f 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/segmentio/analytics-go v3.1.0+incompatible github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.16.1 + helm.sh/helm/v3 v3.16.2 k8s.io/api v0.31.1 k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 diff --git a/go.sum b/go.sum index 345e1c75a..d25c40733 100644 --- a/go.sum +++ b/go.sum @@ -530,8 +530,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.16.1 h1:cER6tI/8PgUAsaJaQCVBUg3VI9KN4oVaZJgY60RIc0c= -helm.sh/helm/v3 v3.16.1/go.mod h1:r+xBHHP20qJeEqtvBXMf7W35QDJnzY/eiEBzt+TfHps= +helm.sh/helm/v3 v3.16.2 h1:Y9v7ry+ubQmi+cb5zw1Llx8OKHU9Hk9NQ/+P+LGBe2o= +helm.sh/helm/v3 v3.16.2/go.mod h1:SyTXgKBjNqi2NPsHCW5dDAsHqvGIu0kdNYNH9gQaw70= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= From aba826a73ce27cd78b600633ab19a131322ba968 Mon Sep 17 00:00:00 2001 From: slysunkin <72824793+slysunkin@users.noreply.github.com> Date: Thu, 10 Oct 2024 20:42:49 -0500 Subject: [PATCH 02/29] Added EKS template (#430) --- Makefile | 2 + config/dev/eks-managedcluster.yaml | 11 ++ docs/dev.md | 8 + templates/cluster/aws-eks/.helmignore | 23 +++ templates/cluster/aws-eks/Chart.yaml | 13 ++ .../cluster/aws-eks/templates/_helpers.tpl | 19 +++ .../templates/awsmachinetemplate-worker.yaml | 22 +++ .../aws-eks/templates/awsmanagedcluster.yaml | 7 + .../templates/awsmanagedcontrolplane.yaml | 14 ++ .../cluster/aws-eks/templates/cluster.yaml | 17 ++ .../aws-eks/templates/eksconfigtemplate.yaml | 6 + .../aws-eks/templates/machinedeployment.yaml | 26 ++++ templates/cluster/aws-eks/values.schema.json | 146 ++++++++++++++++++ templates/cluster/aws-eks/values.yaml | 34 ++++ .../cluster-api-provider-aws/Chart.yaml | 2 + .../files/templates/aws-eks-0-0-1.yaml | 10 ++ 16 files changed, 360 insertions(+) create mode 100644 config/dev/eks-managedcluster.yaml create mode 100644 templates/cluster/aws-eks/.helmignore create mode 100644 templates/cluster/aws-eks/Chart.yaml create mode 100644 templates/cluster/aws-eks/templates/_helpers.tpl create mode 100644 templates/cluster/aws-eks/templates/awsmachinetemplate-worker.yaml create mode 100644 templates/cluster/aws-eks/templates/awsmanagedcluster.yaml create mode 100644 templates/cluster/aws-eks/templates/awsmanagedcontrolplane.yaml create mode 100644 templates/cluster/aws-eks/templates/cluster.yaml create mode 100644 templates/cluster/aws-eks/templates/eksconfigtemplate.yaml create mode 100644 templates/cluster/aws-eks/templates/machinedeployment.yaml create mode 100644 templates/cluster/aws-eks/values.schema.json create mode 100644 templates/cluster/aws-eks/values.yaml create mode 100644 templates/provider/hmc-templates/files/templates/aws-eks-0-0-1.yaml diff --git a/Makefile b/Makefile index 41da926f6..51247bdd0 100644 --- a/Makefile +++ b/Makefile @@ -319,6 +319,8 @@ dev-azure-creds: envsubst dev-vsphere-creds: envsubst @NAMESPACE=$(NAMESPACE) $(ENVSUBST) -no-unset -i config/dev/vsphere-credentials.yaml | $(KUBECTL) apply -f - +dev-eks-creds: dev-aws-creds + .PHONY: dev-apply ## Apply the development environment by deploying the kind cluster, local registry and the HMC helm chart. dev-apply: kind-deploy registry-deploy dev-push dev-deploy dev-templates dev-release diff --git a/config/dev/eks-managedcluster.yaml b/config/dev/eks-managedcluster.yaml new file mode 100644 index 000000000..a6731163a --- /dev/null +++ b/config/dev/eks-managedcluster.yaml @@ -0,0 +1,11 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: eks-dev + namespace: ${NAMESPACE} +spec: + config: + region: us-east-2 + workersNumber: 1 + template: aws-eks-0-0-1 + credential: "aws-cluster-identity-cred" diff --git a/docs/dev.md b/docs/dev.md index 19b40cec4..395fc2e6e 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -67,6 +67,14 @@ full explanation for each parameter visit [vSphere cluster parameters](cluster-parameters.md) and [vSphere machine parameters](machine-parameters.md). +### EKS Provider Setup + +To properly deploy dev cluster you need to have the following variable set: + +- `DEV_PROVIDER` - should be "eks" + +The rest of deployment procedure is the same as for other providers. + ## Deploy HMC Default provider which will be used to deploy cluster is AWS, if you want to use diff --git a/templates/cluster/aws-eks/.helmignore b/templates/cluster/aws-eks/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/templates/cluster/aws-eks/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/templates/cluster/aws-eks/Chart.yaml b/templates/cluster/aws-eks/Chart.yaml new file mode 100644 index 000000000..56513523a --- /dev/null +++ b/templates/cluster/aws-eks/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: aws-eks +description: | + An HMC template to deploy a cluster on EKS with bootstrapped control plane nodes. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 +annotations: + hmc.mirantis.com/infrastructure-providers: aws + hmc.mirantis.com/controlplane-providers: eks + hmc.mirantis.com/bootstrap-providers: eks diff --git a/templates/cluster/aws-eks/templates/_helpers.tpl b/templates/cluster/aws-eks/templates/_helpers.tpl new file mode 100644 index 000000000..84d7d35f3 --- /dev/null +++ b/templates/cluster/aws-eks/templates/_helpers.tpl @@ -0,0 +1,19 @@ +{{- define "cluster.name" -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "awsmachinetemplate.worker.name" -}} + {{- include "cluster.name" . }}-worker-mt +{{- end }} + +{{- define "machinedeployment.name" -}} + {{- include "cluster.name" . }}-md +{{- end }} + +{{- define "awsmanagedcontrolplane.name" -}} + {{- include "cluster.name" . }}-cp +{{- end }} + +{{- define "eksconfigtemplate.name" -}} + {{- include "cluster.name" . }}-machine-config +{{- end }} diff --git a/templates/cluster/aws-eks/templates/awsmachinetemplate-worker.yaml b/templates/cluster/aws-eks/templates/awsmachinetemplate-worker.yaml new file mode 100644 index 000000000..4cf5d7b13 --- /dev/null +++ b/templates/cluster/aws-eks/templates/awsmachinetemplate-worker.yaml @@ -0,0 +1,22 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachineTemplate +metadata: + name: {{ include "awsmachinetemplate.worker.name" . }} +spec: + template: + spec: + {{- if not (quote .Values.worker.amiID | empty) }} + ami: + id: {{ .Values.worker.amiID }} + {{- end }} + imageLookupFormat: {{ .Values.worker.imageLookup.format }} + imageLookupOrg: {{ .Values.worker.imageLookup.org }} + imageLookupBaseOS: {{ .Values.worker.imageLookup.baseOS }} + instanceType: {{ .Values.worker.instanceType }} + iamInstanceProfile: {{ .Values.worker.iamInstanceProfile }} + publicIP: {{ .Values.publicIP }} + rootVolume: + size: {{ .Values.worker.rootVolumeSize }} + {{- if not (quote .Values.sshKeyName | empty) }} + sshKeyName: {{ .Values.sshKeyName | quote }} + {{- end }} diff --git a/templates/cluster/aws-eks/templates/awsmanagedcluster.yaml b/templates/cluster/aws-eks/templates/awsmanagedcluster.yaml new file mode 100644 index 000000000..4eabcd383 --- /dev/null +++ b/templates/cluster/aws-eks/templates/awsmanagedcluster.yaml @@ -0,0 +1,7 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSManagedCluster +metadata: + name: {{ include "cluster.name" . }} + annotations: + aws.cluster.x-k8s.io/external-resource-gc: "true" +spec: {} diff --git a/templates/cluster/aws-eks/templates/awsmanagedcontrolplane.yaml b/templates/cluster/aws-eks/templates/awsmanagedcontrolplane.yaml new file mode 100644 index 000000000..6807e5033 --- /dev/null +++ b/templates/cluster/aws-eks/templates/awsmanagedcontrolplane.yaml @@ -0,0 +1,14 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: AWSManagedControlPlane +metadata: + name: {{ include "awsmanagedcontrolplane.name" . }} + namespace: hmc-system +spec: + region: {{ .Values.region }} + {{- if not (quote .Values.sshKeyName | empty) }} + sshKeyName: {{ .Values.sshKeyName | quote }} + {{- end }} + version: {{ .Values.kubernetes.version }} + identityRef: + kind: {{ .Values.clusterIdentity.kind }} + name: {{ .Values.clusterIdentity.name }} diff --git a/templates/cluster/aws-eks/templates/cluster.yaml b/templates/cluster/aws-eks/templates/cluster.yaml new file mode 100644 index 000000000..dca896de1 --- /dev/null +++ b/templates/cluster/aws-eks/templates/cluster.yaml @@ -0,0 +1,17 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: {{ include "cluster.name" . }} +spec: + {{- with .Values.clusterNetwork }} + clusterNetwork: + {{- toYaml . | nindent 4 }} + {{- end }} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: AWSManagedControlPlane + name: {{ include "awsmanagedcontrolplane.name" . }} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSManagedCluster + name: {{ include "cluster.name" . }} diff --git a/templates/cluster/aws-eks/templates/eksconfigtemplate.yaml b/templates/cluster/aws-eks/templates/eksconfigtemplate.yaml new file mode 100644 index 000000000..cabd6204c --- /dev/null +++ b/templates/cluster/aws-eks/templates/eksconfigtemplate.yaml @@ -0,0 +1,6 @@ +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: EKSConfigTemplate +metadata: + name: {{ include "eksconfigtemplate.name" . }} +spec: + template: {} diff --git a/templates/cluster/aws-eks/templates/machinedeployment.yaml b/templates/cluster/aws-eks/templates/machinedeployment.yaml new file mode 100644 index 000000000..d177494eb --- /dev/null +++ b/templates/cluster/aws-eks/templates/machinedeployment.yaml @@ -0,0 +1,26 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: {{ include "machinedeployment.name" . }} +spec: + clusterName: {{ include "cluster.name" . }} + replicas: {{ .Values.workersNumber }} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: {{ include "cluster.name" . }} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: {{ include "cluster.name" . }} + spec: + version: {{ .Values.kubernetes.version }} + clusterName: {{ include "cluster.name" . }} + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: EKSConfigTemplate + name: {{ include "eksconfigtemplate.name" . }} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachineTemplate + name: {{ include "awsmachinetemplate.worker.name" . }} diff --git a/templates/cluster/aws-eks/values.schema.json b/templates/cluster/aws-eks/values.schema.json new file mode 100644 index 000000000..acabd799c --- /dev/null +++ b/templates/cluster/aws-eks/values.schema.json @@ -0,0 +1,146 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "An HMC template to deploy a ManagedCluster on EKS.", + "type": "object", + "required": [ + "workersNumber", + "region", + "clusterIdentity" + ], + "properties": { + "workersNumber": { + "description": "The number of the worker machines", + "type": "number", + "minimum": 1 + }, + "clusterNetwork": { + "type": "object", + "properties": { + "pods": { + "type": "object", + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true + } + } + }, + "services": { + "type": "object", + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "uniqueItems": true + } + } + } + } + }, + "region": { + "description": "AWS region to deploy the cluster in", + "type": "string" + }, + "sshKeyName": { + "description": "The name of the key pair to securely connect to your instances. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)", + "type": [ + "string", + "null" + ] + }, + "publicIP": { + "description": "Specifies whether the instance should get a public IP", + "type": "boolean" + }, + "clusterIdentity": { + "type": "object", + "description": "AWS Cluster Identity object reference", + "required": [ + "name", + "kind" + ], + "properties": { + "name": { + "description": "AWS ClusterIdentity object name", + "type": "string" + }, + "kind": { + "description": "AWS ClusterIdentity object kind", + "type": "string" + } + } + }, + "worker": { + "description": "The configuration of the worker machines", + "type": "object", + "required": [ + "iamInstanceProfile", + "instanceType" + ], + "properties": { + "amiID": { + "description": "The ID of Amazon Machine Image", + "type": "string" + }, + "iamInstanceProfile": { + "description": "The name of an IAM instance profile to assign to the instance", + "type": "string" + }, + "instanceType": { + "description": "The type of instance to create", + "type": "string" + }, + "additionalSecurityGroupIDs": { + "description": "An array of references to security groups that should be applied to the instance", + "type": "array" + }, + "rootVolumeSize": { + "description": "The size of the root volume of the instance (GB)", + "type": "integer" + }, + "imageLookup": { + "description": "AMI lookup parameters", + "type": "object", + "required": [ + "format", + "org" + ], + "properties": { + "format": { + "description": "Format string which will be used for image lookup", + "type": "string" + }, + "org": { + "description": "AWS org ID which owns the AMI", + "type": "string" + }, + "baseOS": { + "description": "OS name which can be used in format string", + "type": "string" + } + } + } + } + }, + "kubernetes": { + "description": "Kubernetes parameters", + "type": "object", + "required": [ + "version" + ], + "properties": { + "version": { + "description": "Kubernetes version to use", + "type": "string" + } + } + } + } +} diff --git a/templates/cluster/aws-eks/values.yaml b/templates/cluster/aws-eks/values.yaml new file mode 100644 index 000000000..188b7818b --- /dev/null +++ b/templates/cluster/aws-eks/values.yaml @@ -0,0 +1,34 @@ +# Cluster parameters +workersNumber: 1 + +clusterNetwork: + pods: + cidrBlocks: + - "10.244.0.0/16" + services: + cidrBlocks: + - "10.96.0.0/12" + +# EKS cluster parameters +region: "" +sshKeyName: "" +publicIP: false + +clusterIdentity: + name: "aws-cluster-identity" + kind: "AWSClusterStaticIdentity" + +# EKS machines parameters +worker: + amiID: "" + iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io + instanceType: "t3.small" + rootVolumeSize: 30 + imageLookup: + format: "" + org: "" + baseOS: "" + +# Kubernetes version +kubernetes: + version: v1.30.4 diff --git a/templates/provider/cluster-api-provider-aws/Chart.yaml b/templates/provider/cluster-api-provider-aws/Chart.yaml index 1b5fb9bdd..1645247aa 100644 --- a/templates/provider/cluster-api-provider-aws/Chart.yaml +++ b/templates/provider/cluster-api-provider-aws/Chart.yaml @@ -21,3 +21,5 @@ version: 0.0.1 appVersion: "2.6.1" annotations: hmc.mirantis.com/infrastructure-providers: aws + hmc.mirantis.com/controlplane-providers: eks + hmc.mirantis.com/bootstrap-providers: eks diff --git a/templates/provider/hmc-templates/files/templates/aws-eks-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/aws-eks-0-0-1.yaml new file mode 100644 index 000000000..453af94ae --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/aws-eks-0-0-1.yaml @@ -0,0 +1,10 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ClusterTemplate +metadata: + name: aws-eks-0-0-1 + annotations: + helm.sh/resource-policy: keep +spec: + helm: + chartName: aws-eks + chartVersion: 0.0.1 From 41b36beece0ac4843e69394f26bb8721258ffb87 Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Fri, 11 Oct 2024 21:36:23 +0700 Subject: [PATCH 03/29] Add rbac roles for new resources (#469) --- .../user-facing/capi-identities-editor.yaml | 15 ++++++++++++++ .../user-facing/capi-identities-viewer.yaml | 15 ++++++++++++++ .../rbac/user-facing/clusters-editor.yaml | 7 ++----- .../rbac/user-facing/clusters-viewer.yaml | 7 ++----- .../clustertemplatechains-creator.yaml | 19 ++++++++++++++++++ .../clustertemplatechains-viewer.yaml | 14 +++++++++++++ ...tor.yaml => clustertemplates-creator.yaml} | 6 ++++-- ...ewer.yaml => clustertemplates-viewer.yaml} | 6 ++++-- .../rbac/user-facing/credentials-editor.yaml | 12 +++++++++++ .../rbac/user-facing/credentials-viewer.yaml | 13 ++++++++++++ .../rbac/user-facing/hmc-global-admin.yaml | 12 +++++++++++ .../rbac/user-facing/hmc-global-viewer.yaml | 10 ++++++++++ .../rbac/user-facing/hmc-namespace-admin.yaml | 10 ++++++++++ .../user-facing/hmc-namespace-editor.yaml | 8 ++++++++ .../user-facing/hmc-namespace-viewer.yaml | 8 ++++++++ .../rbac/user-facing/management-editor.yaml | 3 +++ .../rbac/user-facing/management-viewer.yaml | 2 ++ .../multiclusterservices-editor.yaml | 17 ++++++++++++++++ .../multiclusterservices-viewer.yaml | 13 ++++++++++++ .../rbac/user-facing/namespaces-editor.yaml | 12 +++++++++++ .../rbac/user-facing/namespaces-viewer.yaml | 12 +++++++++++ .../rbac/user-facing/secrets-editor.yaml | 12 +++++++++++ .../servicetemplatechains-creator.yaml | 19 ++++++++++++++++++ .../servicetemplatechains-viewer.yaml | 14 +++++++++++++ .../user-facing/servicetemplates-creator.yaml | 20 +++++++++++++++++++ .../user-facing/servicetemplates-viewer.yaml | 19 ++++++++++++++++++ .../templatemanagement-editor.yaml | 9 ++------- .../templatemanagement-viewer.yaml | 4 ++-- 28 files changed, 295 insertions(+), 23 deletions(-) create mode 100644 templates/provider/hmc/templates/rbac/user-facing/capi-identities-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/capi-identities-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-creator.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-viewer.yaml rename templates/provider/hmc/templates/rbac/user-facing/{templates-creator.yaml => clustertemplates-creator.yaml} (72%) rename templates/provider/hmc/templates/rbac/user-facing/{templates-viewer.yaml => clustertemplates-viewer.yaml} (65%) create mode 100644 templates/provider/hmc/templates/rbac/user-facing/credentials-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/credentials-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/hmc-global-admin.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/hmc-global-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-admin.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/namespaces-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/namespaces-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/secrets-editor.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-creator.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-viewer.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/servicetemplates-creator.yaml create mode 100644 templates/provider/hmc/templates/rbac/user-facing/servicetemplates-viewer.yaml diff --git a/templates/provider/hmc/templates/rbac/user-facing/capi-identities-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/capi-identities-editor.yaml new file mode 100644 index 000000000..bc1749db6 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/capi-identities-editor.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-capi-identities-editor-role + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" +rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclusterroleidentities + - awsclusterstaticidentities + - azureclusteridentities + - vsphereclusteridentities + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/capi-identities-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/capi-identities-viewer.yaml new file mode 100644 index 000000000..0cd348179 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/capi-identities-viewer.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-capi-identities-viewer-role + labels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" +rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclusterroleidentities + - awsclusterstaticidentities + - azureclusteridentities + - vsphereclusteridentities + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml index 73ffcf190..9beeeab3e 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clusters-editor.yaml @@ -2,14 +2,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-clusters-editor-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" rules: - apiGroups: - hmc.mirantis.com resources: - managedclusters verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} - - apiGroups: - - hmc.mirantis.com - resources: - - multiclusterservices - verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml index d55724fa7..f3a971c5b 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clusters-viewer.yaml @@ -2,14 +2,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-clusters-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" rules: - apiGroups: - hmc.mirantis.com resources: - managedclusters verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} - - apiGroups: - - hmc.mirantis.com - resources: - - multiclusterservices - verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-creator.yaml b/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-creator.yaml new file mode 100644 index 000000000..a3a186286 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-creator.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-clustertemplatechains-creator-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - clustertemplatechains + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} + - create + - delete + - apiGroups: + - hmc.mirantis.com + resources: + - clustertemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-viewer.yaml new file mode 100644 index 000000000..c795d4876 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/clustertemplatechains-viewer.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-clustertemplatechains-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - clustertemplatechains + - clustertemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/templates-creator.yaml b/templates/provider/hmc/templates/rbac/user-facing/clustertemplates-creator.yaml similarity index 72% rename from templates/provider/hmc/templates/rbac/user-facing/templates-creator.yaml rename to templates/provider/hmc/templates/rbac/user-facing/clustertemplates-creator.yaml index a0f68e913..3824a4df7 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/templates-creator.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clustertemplates-creator.yaml @@ -1,15 +1,17 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "hmc.fullname" . }}-templates-creator-role + name: {{ include "hmc.fullname" . }}-clustertemplates-creator-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" rules: - apiGroups: - hmc.mirantis.com resources: - clustertemplates - - servicetemplates verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} - create + - delete - apiGroups: - helm.toolkit.fluxcd.io resources: diff --git a/templates/provider/hmc/templates/rbac/user-facing/templates-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/clustertemplates-viewer.yaml similarity index 65% rename from templates/provider/hmc/templates/rbac/user-facing/templates-viewer.yaml rename to templates/provider/hmc/templates/rbac/user-facing/clustertemplates-viewer.yaml index 53e4c245e..f427f76ca 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/templates-viewer.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/clustertemplates-viewer.yaml @@ -1,13 +1,15 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "hmc.fullname" . }}-templates-viewer-role + name: {{ include "hmc.fullname" . }}-clustertemplates-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" rules: - apiGroups: - hmc.mirantis.com resources: - clustertemplates - - servicetemplates verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} - apiGroups: - helm.toolkit.fluxcd.io diff --git a/templates/provider/hmc/templates/rbac/user-facing/credentials-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/credentials-editor.yaml new file mode 100644 index 000000000..5f8b3b780 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/credentials-editor.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-credentials-editor-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - credentials + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/credentials-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/credentials-viewer.yaml new file mode 100644 index 000000000..e20688ce8 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/credentials-viewer.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-credentials-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - credentials + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/hmc-global-admin.yaml b/templates/provider/hmc/templates/rbac/user-facing/hmc-global-admin.yaml new file mode 100644 index 000000000..8585779da --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/hmc-global-admin.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-global-admin-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + hmc.mirantis.com/aggregate-to-global-admin: "true" + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" diff --git a/templates/provider/hmc/templates/rbac/user-facing/hmc-global-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/hmc-global-viewer.yaml new file mode 100644 index 000000000..477ffc772 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/hmc-global-viewer.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-global-viewer-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" diff --git a/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-admin.yaml b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-admin.yaml new file mode 100644 index 000000000..f777031aa --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-admin.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-namespace-admin-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" diff --git a/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-editor.yaml new file mode 100644 index 000000000..317e8e6cd --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-editor.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-namespace-editor-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" diff --git a/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-viewer.yaml new file mode 100644 index 000000000..197d6b05e --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/hmc-namespace-viewer.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-namespace-viewer-role +aggregationRule: + clusterRoleSelectors: + - matchLabels: + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" diff --git a/templates/provider/hmc/templates/rbac/user-facing/management-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/management-editor.yaml index 4ff8ea863..7a91e7874 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/management-editor.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/management-editor.yaml @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-management-editor-role + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" rules: - apiGroups: - hmc.mirantis.com @@ -14,3 +16,4 @@ rules: - providertemplates verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} - create + - delete diff --git a/templates/provider/hmc/templates/rbac/user-facing/management-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/management-viewer.yaml index eb8a6308d..0cc05827f 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/management-viewer.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/management-viewer.yaml @@ -2,6 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-management-viewer-role + labels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" rules: - apiGroups: - hmc.mirantis.com diff --git a/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-editor.yaml new file mode 100644 index 000000000..da489e758 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-editor.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-multiclusterservices-editor-role + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - multiclusterservices + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-viewer.yaml new file mode 100644 index 000000000..565b0e294 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/multiclusterservices-viewer.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-multiclusterservices-viewer-role + labels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - multiclusterservices + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/namespaces-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/namespaces-editor.yaml new file mode 100644 index 000000000..1eeeb02ec --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/namespaces-editor.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-namespaces-editor-role + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/namespaces-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/namespaces-viewer.yaml new file mode 100644 index 000000000..37f6afbce --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/namespaces-viewer.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-namespaces-viewer-role + labels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/secrets-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/secrets-editor.yaml new file mode 100644 index 000000000..9295372dc --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/secrets-editor.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-secrets-editor-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-creator.yaml b/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-creator.yaml new file mode 100644 index 000000000..3a7412126 --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-creator.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-servicetemplatechains-creator-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplatechains + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} + - create + - delete + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-viewer.yaml new file mode 100644 index 000000000..5796088de --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/servicetemplatechains-viewer.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-servicetemplatechains-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplatechains + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-creator.yaml b/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-creator.yaml new file mode 100644 index 000000000..8aa8420dd --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-creator.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-servicetemplates-creator-role + labels: + hmc.mirantis.com/aggregate-to-namespace-admin: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} + - create + - delete + - apiGroups: + - helm.toolkit.fluxcd.io + resources: + - helmcharts + - helmrepositories + verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-viewer.yaml new file mode 100644 index 000000000..8b1d2927e --- /dev/null +++ b/templates/provider/hmc/templates/rbac/user-facing/servicetemplates-viewer.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "hmc.fullname" . }}-servicetemplates-viewer-role + labels: + hmc.mirantis.com/aggregate-to-namespace-editor: "true" + hmc.mirantis.com/aggregate-to-namespace-viewer: "true" +rules: + - apiGroups: + - hmc.mirantis.com + resources: + - servicetemplates + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} + - apiGroups: + - helm.toolkit.fluxcd.io + resources: + - helmcharts + - helmrepositories + verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} diff --git a/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-editor.yaml b/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-editor.yaml index 81a19d634..441ae882b 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-editor.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-editor.yaml @@ -2,16 +2,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-templatemanagement-editor-role + labels: + hmc.mirantis.com/aggregate-to-global-admin: "true" rules: - apiGroups: - hmc.mirantis.com resources: - templatemanagements verbs: {{ include "rbac.editorVerbs" . | nindent 6 }} - - apiGroups: - - hmc.mirantis.com - resources: - - servicetemplatechains - - clustertemplatechains - verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} - - create diff --git a/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-viewer.yaml b/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-viewer.yaml index 35d795f22..67d4713f4 100644 --- a/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-viewer.yaml +++ b/templates/provider/hmc/templates/rbac/user-facing/templatemanagement-viewer.yaml @@ -2,11 +2,11 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "hmc.fullname" . }}-templatemanagement-viewer-role + labels: + hmc.mirantis.com/aggregate-to-global-viewer: "true" rules: - apiGroups: - hmc.mirantis.com resources: - templatemanagements - - clustertemplatechains - - servicetemplatechains verbs: {{ include "rbac.viewerVerbs" . | nindent 6 }} From 50b5e8886324077601af37f7e0ccd78434b05c6c Mon Sep 17 00:00:00 2001 From: Ekaterina Kazakova <41469478+eromanova@users.noreply.github.com> Date: Mon, 14 Oct 2024 21:49:07 +0400 Subject: [PATCH 04/29] Fix chartRef for delivered Templates (#478) Now the chart name also contains the chart version. Also fix error handling and adapt testing --- .../controller/templatechain_controller.go | 15 ++++++------- .../templatechain_controller_test.go | 21 +++++++++++-------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/internal/controller/templatechain_controller.go b/internal/controller/templatechain_controller.go index ca2406e7b..da788732a 100644 --- a/internal/controller/templatechain_controller.go +++ b/internal/controller/templatechain_controller.go @@ -19,8 +19,6 @@ import ( "errors" "fmt" - helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" - sourcev1 "github.com/fluxcd/source-controller/api/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -115,14 +113,14 @@ func (r *TemplateChainReconciler) ReconcileTemplateChain(ctx context.Context, te errs = errors.Join(errs, fmt.Errorf("source %s %s/%s is not found", templateChain.TemplateKind(), r.SystemNamespace, supportedTemplate.Name)) continue } + if source.GetStatus().ChartRef == nil { + errs = errors.Join(errs, fmt.Errorf("source %s %s/%s does not have chart reference yet", templateChain.TemplateKind(), r.SystemNamespace, supportedTemplate.Name)) + continue + } templateSpec := hmc.TemplateSpecCommon{ Helm: hmc.HelmSpec{ - ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{ - Kind: sourcev1.HelmChartKind, - Name: source.GetSpec().Helm.ChartName, - Namespace: r.SystemNamespace, - }, + ChartRef: source.GetStatus().ChartRef, }, } @@ -161,6 +159,9 @@ func (r *TemplateChainReconciler) ReconcileTemplateChain(ctx context.Context, te } } } + if errs != nil { + return ctrl.Result{}, errs + } return ctrl.Result{}, nil } diff --git a/internal/controller/templatechain_controller_test.go b/internal/controller/templatechain_controller_test.go index 7b3048e9f..a4813fd4c 100644 --- a/internal/controller/templatechain_controller_test.go +++ b/internal/controller/templatechain_controller_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" + helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -49,7 +50,13 @@ var _ = Describe("Template Chain Controller", func() { }, } - templateHelmSpec := hmcmirantiscomv1alpha1.HelmSpec{ChartName: "test"} + chartName := "test" + templateHelmSpec := hmcmirantiscomv1alpha1.HelmSpec{ChartName: chartName} + chartRef := &helmcontrollerv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Namespace: utils.DefaultSystemNamespace, + Name: chartName, + } ctTemplates := map[string]*hmcmirantiscomv1alpha1.ClusterTemplate{ // Should be created in target namespace @@ -125,10 +132,6 @@ var _ = Describe("Template Chain Controller", func() { { Name: "ct0", }, - // Does not exist in the system namespace - { - Name: "ct3", - }, }, ctChain2Name: {}, } @@ -140,10 +143,6 @@ var _ = Describe("Template Chain Controller", func() { { Name: "st0", }, - // Does not exist in the system namespace - { - Name: "st3", - }, }, stChain2Name: {}, } @@ -199,6 +198,8 @@ var _ = Describe("Template Chain Controller", func() { if err != nil && errors.IsNotFound(err) { Expect(k8sClient.Create(ctx, template)).To(Succeed()) } + template.Status.ChartRef = chartRef + Expect(k8sClient.Status().Update(ctx, template)).To(Succeed()) } for name, template := range stTemplates { st := &hmcmirantiscomv1alpha1.ServiceTemplate{} @@ -206,6 +207,8 @@ var _ = Describe("Template Chain Controller", func() { if err != nil && errors.IsNotFound(err) { Expect(k8sClient.Create(ctx, template)).To(Succeed()) } + template.Status.ChartRef = chartRef + Expect(k8sClient.Status().Update(ctx, template)).To(Succeed()) } }) From a6f07003da5eac4634a82d791eca73672e73470f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 17:39:54 +0000 Subject: [PATCH 05/29] Bump github.com/projectsveltos/libsveltos from 0.39.0 to 0.40.0 Bumps [github.com/projectsveltos/libsveltos](https://github.com/projectsveltos/libsveltos) from 0.39.0 to 0.40.0. - [Release notes](https://github.com/projectsveltos/libsveltos/releases) - [Commits](https://github.com/projectsveltos/libsveltos/compare/v0.39.0...v0.40.0) --- updated-dependencies: - dependency-name: github.com/projectsveltos/libsveltos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 53fd2330f..a7aaff0fe 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/onsi/gomega v1.34.2 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 github.com/projectsveltos/addon-controller v0.39.0 - github.com/projectsveltos/libsveltos v0.39.0 + github.com/projectsveltos/libsveltos v0.40.0 github.com/segmentio/analytics-go v3.1.0+incompatible github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 diff --git a/go.sum b/go.sum index d25c40733..4d022ffcf 100644 --- a/go.sum +++ b/go.sum @@ -315,8 +315,8 @@ github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/projectsveltos/addon-controller v0.39.0 h1:hHSqrNICthK/8ZJA+GwDEgRyxg2MthMqwrxclc4tHOs= github.com/projectsveltos/addon-controller v0.39.0/go.mod h1:/ZL/TaEWhqMk6rr3946wPsOdQIJGRdPcAgPcD75m4jM= -github.com/projectsveltos/libsveltos v0.39.0 h1:wSxFKHx1L9gA9g7auKdpCdyCug5vDd4pdkKt/bDZBWQ= -github.com/projectsveltos/libsveltos v0.39.0/go.mod h1:e/E3vkU4Ph1ZcyJ+FaIwTJQOVn2XH0Y/7pLDHhMnWPY= +github.com/projectsveltos/libsveltos v0.40.0 h1:tPu+dWGBi42mTfujXGwXhMvwyfU6FYd8A+olSapJKwI= +github.com/projectsveltos/libsveltos v0.40.0/go.mod h1:e/E3vkU4Ph1ZcyJ+FaIwTJQOVn2XH0Y/7pLDHhMnWPY= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= From 33c5919d96716ea58f76f2f0ba0d62d609780d77 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 01:12:19 +0000 Subject: [PATCH 06/29] Bump github.com/projectsveltos/addon-controller from 0.39.0 to 0.40.0 Bumps [github.com/projectsveltos/addon-controller](https://github.com/projectsveltos/addon-controller) from 0.39.0 to 0.40.0. - [Release notes](https://github.com/projectsveltos/addon-controller/releases) - [Commits](https://github.com/projectsveltos/addon-controller/compare/v0.39.0...v0.40.0) --- updated-dependencies: - dependency-name: github.com/projectsveltos/addon-controller dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 15 +++++++-------- go.sum | 30 ++++++++++++++---------------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index a7aaff0fe..477018054 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 github.com/opencontainers/go-digest v1.0.1-0.20231025023718-d50d2fec9c98 - github.com/projectsveltos/addon-controller v0.39.0 + github.com/projectsveltos/addon-controller v0.40.0 github.com/projectsveltos/libsveltos v0.40.0 github.com/segmentio/analytics-go v3.1.0+incompatible github.com/stretchr/testify v1.9.0 @@ -87,7 +87,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241001023024-f4c0cfd0cf1d // indirect + github.com/google/pprof v0.0.0-20241008150032-332c0e1a4a34 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.3 // indirect @@ -150,7 +150,6 @@ require ( go.opentelemetry.io/otel v1.30.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/otel/trace v1.30.0 // indirect - go.starlark.net v0.0.0-20240725214946-42030a7cedce // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.28.0 // indirect @@ -161,12 +160,12 @@ require ( golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.6.0 // indirect + golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -174,12 +173,12 @@ require ( k8s.io/cli-runtime v0.31.1 // indirect k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 // indirect k8s.io/kubectl v0.31.1 // indirect oras.land/oras-go v1.2.6 // indirect sigs.k8s.io/gateway-api v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.3 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 4d022ffcf..cc338049b 100644 --- a/go.sum +++ b/go.sum @@ -176,8 +176,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241001023024-f4c0cfd0cf1d h1:Jaz2JzpQaQXyET0AjLBXShrthbpqMkhGiEfkcQAiAUs= -github.com/google/pprof v0.0.0-20241001023024-f4c0cfd0cf1d/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241008150032-332c0e1a4a34 h1:4iExbL0TFWhkSCZx6nfKwjM+CbnBySx18KssYmdL1fc= +github.com/google/pprof v0.0.0-20241008150032-332c0e1a4a34/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -313,8 +313,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/projectsveltos/addon-controller v0.39.0 h1:hHSqrNICthK/8ZJA+GwDEgRyxg2MthMqwrxclc4tHOs= -github.com/projectsveltos/addon-controller v0.39.0/go.mod h1:/ZL/TaEWhqMk6rr3946wPsOdQIJGRdPcAgPcD75m4jM= +github.com/projectsveltos/addon-controller v0.40.0 h1:nbE2Yfjg8XVzZOzq84xWYKadX5Syc8NcES1vEmPqbbk= +github.com/projectsveltos/addon-controller v0.40.0/go.mod h1:xV7/SOBvRIX58vlKblRzJ+TQPeXywhvTnQairvoHIEU= github.com/projectsveltos/libsveltos v0.40.0 h1:tPu+dWGBi42mTfujXGwXhMvwyfU6FYd8A+olSapJKwI= github.com/projectsveltos/libsveltos v0.40.0/go.mod h1:e/E3vkU4Ph1ZcyJ+FaIwTJQOVn2XH0Y/7pLDHhMnWPY= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -433,8 +433,6 @@ go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8d go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.starlark.net v0.0.0-20240725214946-42030a7cedce h1:YyGqCjZtGZJ+mRPaenEiB87afEO2MFRzLiJNZ0Z0bPw= -go.starlark.net v0.0.0-20240725214946-42030a7cedce/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -488,8 +486,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -509,8 +507,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -548,8 +546,8 @@ k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= -k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 h1:MErs8YA0abvOqJ8gIupA1Tz6PKXYUw34XsGlA7uSL1k= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094/go.mod h1:7ioBJr1A6igWjsR2fxq2EZ0mlMwYLejazSIc2bzMp2U= k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= @@ -564,10 +562,10 @@ sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= -sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= -sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= -sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= From 835a64046edeea5754ce1ff48937d1bd85e382ec Mon Sep 17 00:00:00 2001 From: zerospiel Date: Fri, 27 Sep 2024 17:25:01 +0200 Subject: [PATCH 07/29] Add support for compatibility attributes * Cluster/Service/Provider Templates now have support for compatibility attributes * Managed/Management Clusters now have support to report the compatibility attributes * amends to the templates controller in regards of the API changes * amends to other parts of the code Closes #354 --- .golangci.yml | 2 +- api/v1alpha1/clustertemplate_types.go | 54 ++++++- api/v1alpha1/common.go | 73 ++++++++-- api/v1alpha1/managedcluster_types.go | 3 + api/v1alpha1/management_types.go | 18 +-- api/v1alpha1/providertemplate_types.go | 54 ++++++- api/v1alpha1/servicetemplate_types.go | 62 +++++++- api/v1alpha1/templates_common.go | 103 +++++++++++-- api/v1alpha1/zz_generated.deepcopy.go | 79 +++++++--- cmd/main.go | 1 - go.mod | 2 +- .../controller/managedcluster_controller.go | 8 +- .../managedcluster_controller_test.go | 14 +- internal/controller/management_controller.go | 8 +- .../controller/management_controller_test.go | 6 +- internal/controller/release_controller.go | 5 +- internal/controller/template_controller.go | 119 ++++++--------- .../controller/template_controller_test.go | 24 ++- .../controller/templatechain_controller.go | 83 ++++++----- .../templatemanagement_controller.go | 36 +++-- internal/telemetry/tracker.go | 6 +- internal/utils/util.go | 38 ----- internal/webhook/managedcluster_webhook.go | 19 ++- .../webhook/managedcluster_webhook_test.go | 30 ++-- .../hmc.mirantis.com_clustertemplates.yaml | 118 +++++++++++---- .../hmc.mirantis.com_managedclusters.yaml | 5 + .../crds/hmc.mirantis.com_managements.yaml | 53 +++++-- .../hmc.mirantis.com_providertemplates.yaml | 115 +++++++++++---- .../hmc.mirantis.com_servicetemplates.yaml | 20 ++- test/objects/management/management.go | 2 +- test/objects/template/template.go | 137 +++++++++++------- 31 files changed, 878 insertions(+), 419 deletions(-) delete mode 100644 internal/utils/util.go diff --git a/.golangci.yml b/.golangci.yml index a2a5e7ed6..f47fe05ba 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -68,7 +68,7 @@ linters-settings: - pattern: "interface{}" replacement: "any" stylecheck: - checks: ["all", "-ST1000", "-ST1001"] + checks: ["all", "-ST1000", "-ST1001", "-ST1021"] revive: enable-all-rules: true rules: diff --git a/api/v1alpha1/clustertemplate_types.go b/api/v1alpha1/clustertemplate_types.go index 36db285e9..0d4127ffa 100644 --- a/api/v1alpha1/clustertemplate_types.go +++ b/api/v1alpha1/clustertemplate_types.go @@ -15,6 +15,9 @@ package v1alpha1 import ( + "fmt" + + "github.com/Masterminds/semver/v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,19 +25,62 @@ const ClusterTemplateKind = "ClusterTemplate" // ClusterTemplateSpec defines the desired state of ClusterTemplate type ClusterTemplateSpec struct { - TemplateSpecCommon `json:",inline"` + Helm HelmSpec `json:"helm"` + // Compatible K8S version of the cluster set in the SemVer format. + KubertenesVersion string `json:"k8sVersion,omitempty"` + // Providers represent required CAPI providers with constrainted compatibility versions set. Should be set if not present in the Helm chart metadata. + Providers ProvidersTupled `json:"providers,omitempty"` } // ClusterTemplateStatus defines the observed state of ClusterTemplate type ClusterTemplateStatus struct { + // Compatible K8S version of the cluster set in the SemVer format. + KubertenesVersion string `json:"k8sVersion,omitempty"` + // Providers represent exposed CAPI providers with constrainted compatibility versions set. + Providers ProvidersTupled `json:"providers,omitempty"` + TemplateStatusCommon `json:",inline"` } -func (t *ClusterTemplate) GetSpec() *TemplateSpecCommon { - return &t.Spec.TemplateSpecCommon +// FillStatusWithProviders sets the status of the template with providers +// either from the spec or from the given annotations. +func (t *ClusterTemplate) FillStatusWithProviders(annotations map[string]string) error { + var err error + t.Status.Providers.BootstrapProviders, err = parseProviders(t, bootstrapProvidersType, annotations, semver.NewConstraint) + if err != nil { + return fmt.Errorf("failed to parse ClusterTemplate bootstrap providers: %v", err) + } + + t.Status.Providers.ControlPlaneProviders, err = parseProviders(t, controlPlaneProvidersType, annotations, semver.NewConstraint) + if err != nil { + return fmt.Errorf("failed to parse ClusterTemplate controlPlane providers: %v", err) + } + + t.Status.Providers.InfrastructureProviders, err = parseProviders(t, infrastructureProvidersType, annotations, semver.NewConstraint) + if err != nil { + return fmt.Errorf("failed to parse ClusterTemplate infrastructure providers: %v", err) + } + + return nil +} + +// GetSpecProviders returns .spec.providers of the Template. +func (t *ClusterTemplate) GetSpecProviders() ProvidersTupled { + return t.Spec.Providers +} + +// GetStatusProviders returns .status.providers of the Template. +func (t *ClusterTemplate) GetStatusProviders() ProvidersTupled { + return t.Status.Providers +} + +// GetHelmSpec returns .spec.helm of the Template. +func (t *ClusterTemplate) GetHelmSpec() *HelmSpec { + return &t.Spec.Helm } -func (t *ClusterTemplate) GetStatus() *TemplateStatusCommon { +// GetCommonStatus returns common status of the Template. +func (t *ClusterTemplate) GetCommonStatus() *TemplateStatusCommon { return &t.Status.TemplateStatusCommon } diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 7ec364f62..936b442b7 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -21,15 +21,37 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// Providers is a structure holding different types of CAPI providers -type Providers struct { - // InfrastructureProviders is the list of CAPI infrastructure providers - InfrastructureProviders []string `json:"infrastructure,omitempty"` - // BootstrapProviders is the list of CAPI bootstrap providers - BootstrapProviders []string `json:"bootstrap,omitempty"` - // ControlPlaneProviders is the list of CAPI control plane providers - ControlPlaneProviders []string `json:"controlPlane,omitempty"` -} +type ( + // Providers hold different types of CAPI providers. + Providers struct { + // InfrastructureProviders is the list of CAPI infrastructure providers + InfrastructureProviders []string `json:"infrastructure,omitempty"` + // BootstrapProviders is the list of CAPI bootstrap providers + BootstrapProviders []string `json:"bootstrap,omitempty"` + // ControlPlaneProviders is the list of CAPI control plane providers + ControlPlaneProviders []string `json:"controlPlane,omitempty"` + } + + // Holds different types of CAPI providers with either + // an exact or constrainted version in the SemVer format. The requirement + // is determined by a consumer this type. + ProvidersTupled struct { + // List of CAPI infrastructure providers with either an exact or constrainted version in the SemVer format. + InfrastructureProviders []ProviderTuple `json:"infrastructure,omitempty"` + // List of CAPI bootstrap providers with either an exact or constrainted version in the SemVer format. + BootstrapProviders []ProviderTuple `json:"bootstrap,omitempty"` + // List of CAPI control plane providers with either an exact or constrainted version in the SemVer format. + ControlPlaneProviders []ProviderTuple `json:"controlPlane,omitempty"` + } + + // Represents name of the provider with either an exact or constrainted version in the SemVer format. + ProviderTuple struct { + // Name of the provider. + Name string `json:"name,omitempty"` + // Compatibility restriction in the SemVer format (exact or constrainted version) + VersionOrContraint string `json:"versionOrContraint,omitempty"` + } +) const ( // Provider CAPA @@ -104,3 +126,36 @@ func ExtractServiceTemplateName(rawObj client.Object) []string { return templates } + +func (c ProvidersTupled) BootstrapProvidersNames() []string { + return c.names(bootstrapProvidersType) +} + +func (c ProvidersTupled) ControlPlaneProvidersNames() []string { + return c.names(bootstrapProvidersType) +} + +func (c ProvidersTupled) InfrastructureProvidersNames() []string { + return c.names(bootstrapProvidersType) +} + +func (c ProvidersTupled) names(typ providersType) []string { + f := func(nn []ProviderTuple) []string { + res := make([]string, len(nn)) + for i, v := range nn { + res[i] = v.Name + } + return res + } + + switch typ { + case bootstrapProvidersType: + return f(c.BootstrapProviders) + case controlPlaneProvidersType: + return f(c.ControlPlaneProviders) + case infrastructureProvidersType: + return f(c.InfrastructureProviders) + default: + return []string{} + } +} diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 4acbbf6f6..7a4915488 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -100,6 +100,9 @@ type ManagedClusterSpec struct { // ManagedClusterStatus defines the observed state of ManagedCluster type ManagedClusterStatus struct { + // Currently compatible K8S version of the cluster. Being set only if + // the corresponding ClusterTemplate provided it in the spec. + KubertenesVersion string `json:"k8sVersion,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` // ObservedGeneration is the last observed generation. diff --git a/api/v1alpha1/management_types.go b/api/v1alpha1/management_types.go index 0763ac01a..7c036ab3b 100644 --- a/api/v1alpha1/management_types.go +++ b/api/v1alpha1/management_types.go @@ -75,37 +75,37 @@ func (in *Component) HelmValues() (values map[string]any, err error) { return values, err } -func (m *ManagementSpec) SetProvidersDefaults() error { - m.Providers = []Provider{ +func GetDefaultProviders() []Provider { + return []Provider{ {Name: ProviderK0smotronName}, {Name: ProviderCAPAName}, {Name: ProviderAzureName}, {Name: ProviderVSphereName}, {Name: ProviderSveltosName}, } - return nil } // ManagementStatus defines the observed state of Management type ManagementStatus struct { - // ObservedGeneration is the last observed generation. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // AvailableProviders holds all CAPI providers available on the Management cluster. - AvailableProviders Providers `json:"availableProviders,omitempty"` // Components indicates the status of installed HMC components and CAPI providers. Components map[string]ComponentStatus `json:"components,omitempty"` // Release indicates the current Release object. Release string `json:"release,omitempty"` + // AvailableProviders holds all CAPI providers available along with + // their exact compatibility versions if specified in ProviderTemplates on the Management cluster. + AvailableProviders ProvidersTupled `json:"availableProviders,omitempty"` + // ObservedGeneration is the last observed generation. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` } // ComponentStatus is the status of Management component installation type ComponentStatus struct { // Template is the name of the Template associated with this component. Template string `json:"template,omitempty"` - // Success represents if a component installation was successful - Success bool `json:"success,omitempty"` // Error stores as error message in case of failed installation Error string `json:"error,omitempty"` + // Success represents if a component installation was successful + Success bool `json:"success,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/providertemplate_types.go b/api/v1alpha1/providertemplate_types.go index b0e30a97d..e35cbd7ff 100644 --- a/api/v1alpha1/providertemplate_types.go +++ b/api/v1alpha1/providertemplate_types.go @@ -15,24 +15,70 @@ package v1alpha1 import ( + "fmt" + + "github.com/Masterminds/semver/v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ProviderTemplateSpec defines the desired state of ProviderTemplate type ProviderTemplateSpec struct { - TemplateSpecCommon `json:",inline"` + Helm HelmSpec `json:"helm"` + // Compatible CAPI provider version set in the SemVer format. + CAPIVersion string `json:"capiVersion,omitempty"` + // Represents required CAPI providers with exact compatibility versions set. Should be set if not present in the Helm chart metadata. + Providers ProvidersTupled `json:"providers,omitempty"` } // ProviderTemplateStatus defines the observed state of ProviderTemplate type ProviderTemplateStatus struct { + // Compatible CAPI provider version in the SemVer format. + CAPIVersion string `json:"capiVersion,omitempty"` + // Providers represent exposed CAPI providers with exact compatibility versions set. + Providers ProvidersTupled `json:"providers,omitempty"` + TemplateStatusCommon `json:",inline"` } -func (t *ProviderTemplate) GetSpec() *TemplateSpecCommon { - return &t.Spec.TemplateSpecCommon +// FillStatusWithProviders sets the status of the template with providers +// either from the spec or from the given annotations. +func (t *ProviderTemplate) FillStatusWithProviders(annotations map[string]string) error { + var err error + t.Status.Providers.BootstrapProviders, err = parseProviders(t, bootstrapProvidersType, annotations, semver.NewVersion) + if err != nil { + return fmt.Errorf("failed to parse ProviderTemplate bootstrap providers: %v", err) + } + + t.Status.Providers.ControlPlaneProviders, err = parseProviders(t, controlPlaneProvidersType, annotations, semver.NewVersion) + if err != nil { + return fmt.Errorf("failed to parse ProviderTemplate controlPlane providers: %v", err) + } + + t.Status.Providers.InfrastructureProviders, err = parseProviders(t, infrastructureProvidersType, annotations, semver.NewVersion) + if err != nil { + return fmt.Errorf("failed to parse ProviderTemplate infrastructure providers: %v", err) + } + + return nil +} + +// GetSpecProviders returns .spec.providers of the Template. +func (t *ProviderTemplate) GetSpecProviders() ProvidersTupled { + return t.Spec.Providers +} + +// GetStatusProviders returns .status.providers of the Template. +func (t *ProviderTemplate) GetStatusProviders() ProvidersTupled { + return t.Status.Providers +} + +// GetHelmSpec returns .spec.helm of the Template. +func (t *ProviderTemplate) GetHelmSpec() *HelmSpec { + return &t.Spec.Helm } -func (t *ProviderTemplate) GetStatus() *TemplateStatusCommon { +// GetCommonStatus returns common status of the Template. +func (t *ProviderTemplate) GetCommonStatus() *TemplateStatusCommon { return &t.Status.TemplateStatusCommon } diff --git a/api/v1alpha1/servicetemplate_types.go b/api/v1alpha1/servicetemplate_types.go index 731486192..10c3152fc 100644 --- a/api/v1alpha1/servicetemplate_types.go +++ b/api/v1alpha1/servicetemplate_types.go @@ -15,6 +15,8 @@ package v1alpha1 import ( + "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,19 +24,71 @@ const ServiceTemplateKind = "ServiceTemplate" // ServiceTemplateSpec defines the desired state of ServiceTemplate type ServiceTemplateSpec struct { - TemplateSpecCommon `json:",inline"` + Helm HelmSpec `json:"helm"` + // Constraint describing compatible K8S versions of the cluster set in the SemVer format. + KubertenesConstraint string `json:"k8sConstraint,omitempty"` + // Represents required CAPI providers. Should be set if not present in the Helm chart metadata. + Providers Providers `json:"providers,omitempty"` } // ServiceTemplateStatus defines the observed state of ServiceTemplate type ServiceTemplateStatus struct { + // Constraint describing compatible K8S versions of the cluster set in the SemVer format. + KubertenesConstraint string `json:"k8sConstraint,omitempty"` + // Represents exposed CAPI providers. + Providers Providers `json:"providers,omitempty"` + TemplateStatusCommon `json:",inline"` } -func (t *ServiceTemplate) GetSpec() *TemplateSpecCommon { - return &t.Spec.TemplateSpecCommon +// FillStatusWithProviders sets the status of the template with providers +// either from the spec or from the given annotations. +// +// The return parameter is noop and is always nil. +func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) error { + parseProviders := func(typ providersType) []string { + var ( + pspec, pstatus []string + anno string + ) + switch typ { + case bootstrapProvidersType: + pspec, pstatus = t.Spec.Providers.BootstrapProviders, t.Status.Providers.BootstrapProviders + anno = ChartAnnotationBootstrapProviders + case controlPlaneProvidersType: + pspec, pstatus = t.Spec.Providers.ControlPlaneProviders, t.Status.Providers.ControlPlaneProviders + anno = ChartAnnotationControlPlaneProviders + case infrastructureProvidersType: + pspec, pstatus = t.Spec.Providers.InfrastructureProviders, t.Status.Providers.InfrastructureProviders + anno = ChartAnnotationInfraProviders + } + + if len(pspec) > 0 { + return pstatus + } + + providers := annotations[anno] + if len(providers) == 0 { + return []string{} + } + + return strings.Split(providers, ",") + } + + t.Status.Providers.BootstrapProviders = parseProviders(bootstrapProvidersType) + t.Status.Providers.ControlPlaneProviders = parseProviders(controlPlaneProvidersType) + t.Status.Providers.InfrastructureProviders = parseProviders(infrastructureProvidersType) + + return nil +} + +// GetHelmSpec returns .spec.helm of the Template. +func (t *ServiceTemplate) GetHelmSpec() *HelmSpec { + return &t.Spec.Helm } -func (t *ServiceTemplate) GetStatus() *TemplateStatusCommon { +// GetCommonStatus returns common status of the Template. +func (t *ServiceTemplate) GetCommonStatus() *TemplateStatusCommon { return &t.Status.TemplateStatusCommon } diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go index 98bd9ccf0..5ea92ccaa 100644 --- a/api/v1alpha1/templates_common.go +++ b/api/v1alpha1/templates_common.go @@ -15,6 +15,10 @@ package v1alpha1 import ( + "errors" + "fmt" + "strings" + helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) @@ -28,15 +32,6 @@ const ( ChartAnnotationControlPlaneProviders = "hmc.mirantis.com/control-plane-providers" ) -// TemplateSpecCommon is a Template configuration common for all Template types -type TemplateSpecCommon struct { - // Helm holds a reference to a Helm chart representing the HMC template - Helm HelmSpec `json:"helm"` - // Providers represent required/exposed CAPI providers depending on the template type. - // Should be set if not present in the Helm chart metadata. - Providers Providers `json:"providers,omitempty"` -} - // +kubebuilder:validation:XValidation:rule="(has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef))", message="either chartName or chartRef must be set" // HelmSpec references a Helm chart representing the HMC template @@ -50,19 +45,27 @@ type HelmSpec struct { ChartVersion string `json:"chartVersion,omitempty"` } +func (s *HelmSpec) String() string { + if s.ChartRef != nil { + return s.ChartRef.Namespace + "/" + s.ChartRef.Name + ", Kind=" + s.ChartRef.Kind + } + + return s.ChartName + ": " + s.ChartVersion +} + // TemplateStatusCommon defines the observed state of Template common for all Template types type TemplateStatusCommon struct { - TemplateValidationStatus `json:",inline"` - // Description contains information about the template. - Description string `json:"description,omitempty"` // Config demonstrates available parameters for template customization, // that can be used when creating ManagedCluster objects. Config *apiextensionsv1.JSON `json:"config,omitempty"` // ChartRef is a reference to a source controller resource containing the // Helm chart representing the template. ChartRef *helmcontrollerv2.CrossNamespaceSourceReference `json:"chartRef,omitempty"` - // Providers represent required/exposed CAPI providers depending on the template type. - Providers Providers `json:"providers,omitempty"` + // Description contains information about the template. + Description string `json:"description,omitempty"` + + TemplateValidationStatus `json:",inline"` + // ObservedGeneration is the last observed generation. ObservedGeneration int64 `json:"observedGeneration,omitempty"` } @@ -73,3 +76,75 @@ type TemplateValidationStatus struct { // Valid indicates whether the template passed validation or not. Valid bool `json:"valid"` } + +type providersType int + +const ( + bootstrapProvidersType providersType = iota + controlPlaneProvidersType + infrastructureProvidersType +) + +func parseProviders[T any](providersGetter interface { + GetSpecProviders() ProvidersTupled + GetStatusProviders() ProvidersTupled +}, typ providersType, annotations map[string]string, validationFn func(string) (T, error), +) ([]ProviderTuple, error) { + pspec, pstatus, anno := getProvidersSpecStatusAnno(providersGetter, typ) + if len(pspec) > 0 { + return pstatus, nil + } + + providers := annotations[anno] + if len(providers) == 0 { + return []ProviderTuple{}, nil + } + + var ( + splitted = strings.Split(providers, ",") + merr error + ) + + pstatus = make([]ProviderTuple, 0, len(splitted)) + + for _, v := range splitted { + nVerOrC := strings.SplitN(v, " ", 1) + if len(nVerOrC) == 0 { // BCE (bound check elimination) + continue + } + + n := ProviderTuple{Name: nVerOrC[0]} + if len(nVerOrC) < 2 { + pstatus = append(pstatus, n) + continue + } + + ver := strings.TrimSpace(nVerOrC[1]) + if _, err := validationFn(ver); err != nil { // validation + merr = errors.Join(merr, fmt.Errorf("failed to parse version %s in the %s: %v", ver, v, err)) + continue + } + + n.VersionOrContraint = ver + pstatus = append(pstatus, n) + } + + return pstatus, merr +} + +func getProvidersSpecStatusAnno(providersGetter interface { + GetSpecProviders() ProvidersTupled + GetStatusProviders() ProvidersTupled +}, typ providersType, +) (spec, status []ProviderTuple, annotation string) { + switch typ { + case bootstrapProvidersType: + return providersGetter.GetSpecProviders().BootstrapProviders, providersGetter.GetStatusProviders().BootstrapProviders, ChartAnnotationBootstrapProviders + case controlPlaneProvidersType: + return providersGetter.GetSpecProviders().ControlPlaneProviders, providersGetter.GetStatusProviders().ControlPlaneProviders, ChartAnnotationControlPlaneProviders + case infrastructureProvidersType: + return providersGetter.GetSpecProviders().InfrastructureProviders, providersGetter.GetStatusProviders().InfrastructureProviders, ChartAnnotationInfraProviders + default: + return []ProviderTuple{}, []ProviderTuple{}, "" + } +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 5447bd7af..a0d3c518a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -187,7 +187,8 @@ func (in *ClusterTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterTemplateSpec) DeepCopyInto(out *ClusterTemplateSpec) { *out = *in - in.TemplateSpecCommon.DeepCopyInto(&out.TemplateSpecCommon) + in.Helm.DeepCopyInto(&out.Helm) + in.Providers.DeepCopyInto(&out.Providers) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTemplateSpec. @@ -203,6 +204,7 @@ func (in *ClusterTemplateSpec) DeepCopy() *ClusterTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterTemplateStatus) DeepCopyInto(out *ClusterTemplateStatus) { *out = *in + in.Providers.DeepCopyInto(&out.Providers) in.TemplateStatusCommon.DeepCopyInto(&out.TemplateStatusCommon) } @@ -594,7 +596,6 @@ func (in *ManagementSpec) DeepCopy() *ManagementSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagementStatus) DeepCopyInto(out *ManagementStatus) { *out = *in - in.AvailableProviders.DeepCopyInto(&out.AvailableProviders) if in.Components != nil { in, out := &in.Components, &out.Components *out = make(map[string]ComponentStatus, len(*in)) @@ -602,6 +603,7 @@ func (in *ManagementStatus) DeepCopyInto(out *ManagementStatus) { (*out)[key] = val } } + in.AvailableProviders.DeepCopyInto(&out.AvailableProviders) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementStatus. @@ -805,7 +807,8 @@ func (in *ProviderTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProviderTemplateSpec) DeepCopyInto(out *ProviderTemplateSpec) { *out = *in - in.TemplateSpecCommon.DeepCopyInto(&out.TemplateSpecCommon) + in.Helm.DeepCopyInto(&out.Helm) + in.Providers.DeepCopyInto(&out.Providers) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderTemplateSpec. @@ -821,6 +824,7 @@ func (in *ProviderTemplateSpec) DeepCopy() *ProviderTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProviderTemplateStatus) DeepCopyInto(out *ProviderTemplateStatus) { *out = *in + in.Providers.DeepCopyInto(&out.Providers) in.TemplateStatusCommon.DeepCopyInto(&out.TemplateStatusCommon) } @@ -834,6 +838,21 @@ func (in *ProviderTemplateStatus) DeepCopy() *ProviderTemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderTuple) DeepCopyInto(out *ProviderTuple) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderTuple. +func (in *ProviderTuple) DeepCopy() *ProviderTuple { + if in == nil { + return nil + } + out := new(ProviderTuple) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Providers) DeepCopyInto(out *Providers) { *out = *in @@ -864,6 +883,36 @@ func (in *Providers) DeepCopy() *Providers { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvidersTupled) DeepCopyInto(out *ProvidersTupled) { + *out = *in + if in.InfrastructureProviders != nil { + in, out := &in.InfrastructureProviders, &out.InfrastructureProviders + *out = make([]ProviderTuple, len(*in)) + copy(*out, *in) + } + if in.BootstrapProviders != nil { + in, out := &in.BootstrapProviders, &out.BootstrapProviders + *out = make([]ProviderTuple, len(*in)) + copy(*out, *in) + } + if in.ControlPlaneProviders != nil { + in, out := &in.ControlPlaneProviders, &out.ControlPlaneProviders + *out = make([]ProviderTuple, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvidersTupled. +func (in *ProvidersTupled) DeepCopy() *ProvidersTupled { + if in == nil { + return nil + } + out := new(ProvidersTupled) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Release) DeepCopyInto(out *Release) { *out = *in @@ -1108,7 +1157,8 @@ func (in *ServiceTemplateList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceTemplateSpec) DeepCopyInto(out *ServiceTemplateSpec) { *out = *in - in.TemplateSpecCommon.DeepCopyInto(&out.TemplateSpecCommon) + in.Helm.DeepCopyInto(&out.Helm) + in.Providers.DeepCopyInto(&out.Providers) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceTemplateSpec. @@ -1124,6 +1174,7 @@ func (in *ServiceTemplateSpec) DeepCopy() *ServiceTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceTemplateStatus) DeepCopyInto(out *ServiceTemplateStatus) { *out = *in + in.Providers.DeepCopyInto(&out.Providers) in.TemplateStatusCommon.DeepCopyInto(&out.TemplateStatusCommon) } @@ -1307,27 +1358,9 @@ func (in *TemplateManagementStatus) DeepCopy() *TemplateManagementStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateSpecCommon) DeepCopyInto(out *TemplateSpecCommon) { - *out = *in - in.Helm.DeepCopyInto(&out.Helm) - in.Providers.DeepCopyInto(&out.Providers) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateSpecCommon. -func (in *TemplateSpecCommon) DeepCopy() *TemplateSpecCommon { - if in == nil { - return nil - } - out := new(TemplateSpecCommon) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateStatusCommon) DeepCopyInto(out *TemplateStatusCommon) { *out = *in - out.TemplateValidationStatus = in.TemplateValidationStatus if in.Config != nil { in, out := &in.Config, &out.Config *out = new(apiextensionsv1.JSON) @@ -1338,7 +1371,7 @@ func (in *TemplateStatusCommon) DeepCopyInto(out *TemplateStatusCommon) { *out = new(v2.CrossNamespaceSourceReference) **out = **in } - in.Providers.DeepCopyInto(&out.Providers) + out.TemplateValidationStatus = in.TemplateValidationStatus } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateStatusCommon. diff --git a/cmd/main.go b/cmd/main.go index 695d4b400..21af5d9e0 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -187,7 +187,6 @@ func main() { templateReconciler := controller.TemplateReconciler{ Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), SystemNamespace: currentNamespace, DefaultRegistryConfig: helm.DefaultRegistryConfig{ URL: defaultRegistryURL, diff --git a/go.mod b/go.mod index 477018054..7e6c43301 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/Mirantis/hmc go 1.22.7 require ( + github.com/Masterminds/semver/v3 v3.3.0 github.com/a8m/envsubst v1.4.2 github.com/cert-manager/cert-manager v1.15.3 github.com/fluxcd/helm-controller/api v1.1.0 @@ -38,7 +39,6 @@ require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index 16af664ec..24476c1ff 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -45,7 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" hmc "github.com/Mirantis/hmc/api/v1alpha1" @@ -374,7 +373,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma // updateServices reconciles services provided in ManagedCluster.Spec.Services. // TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places. func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ManagedClusterController", fmt.Sprintf("%s/%s", mc.Namespace, mc.Name)) + l := ctrl.LoggerFrom(ctx) opts := []sveltos.HelmChartOpts{} // NOTE: The Profile object will be updated with no helm @@ -606,7 +605,7 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace // Associate the provider with it's GVK for _, provider := range providers { - gvk, ok := providerGVKs[provider] + gvk, ok := providerGVKs[provider.Name] if !ok { continue } @@ -629,13 +628,14 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace return nil } -func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNamespace, templateName string) ([]string, error) { +func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNamespace, templateName string) ([]hmc.ProviderTuple, error) { template := &hmc.ClusterTemplate{} templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "namespace", templateNamespace, "name", templateName) return nil, err } + return template.Status.Providers.InfrastructureProviders, nil } diff --git a/internal/controller/managedcluster_controller_test.go b/internal/controller/managedcluster_controller_test.go index 72f2eddec..96f77e23c 100644 --- a/internal/controller/managedcluster_controller_test.go +++ b/internal/controller/managedcluster_controller_test.go @@ -73,13 +73,11 @@ var _ = Describe("ManagedCluster Controller", func() { Namespace: managedClusterNamespace, }, Spec: hmc.ClusterTemplateSpec{ - TemplateSpecCommon: hmc.TemplateSpecCommon{ - Helm: hmc.HelmSpec{ - ChartRef: &hcv2.CrossNamespaceSourceReference{ - Kind: "HelmChart", - Name: "ref-test", - Namespace: "default", - }, + Helm: hmc.HelmSpec{ + ChartRef: &hcv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: "ref-test", + Namespace: "default", }, }, }, @@ -154,8 +152,6 @@ var _ = Describe("ManagedCluster Controller", func() { NamespacedName: typeNamespacedName, }) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/management_controller.go b/internal/controller/management_controller.go index 5d332820d..41a1f6723 100644 --- a/internal/controller/management_controller.go +++ b/internal/controller/management_controller.go @@ -105,7 +105,7 @@ func (r *ManagementReconciler) Update(ctx context.Context, management *hmc.Manag } var errs error - detectedProviders := hmc.Providers{} + detectedProviders := hmc.ProvidersTupled{} detectedComponents := make(map[string]hmc.ComponentStatus) err := r.enableAdditionalComponents(ctx, management) @@ -313,6 +313,7 @@ func wrappedComponents(mgmt *hmc.Management, release *hmc.Release) ([]component, } hmcComp.Config = hmcConfig components = append(components, hmcComp) + capiComp := component{ Component: mgmt.Spec.Core.CAPI, helmReleaseName: hmc.CoreCAPIName, dependsOn: []meta.NamespacedObjectReference{{Name: hmc.CoreHMCName}}, @@ -336,6 +337,7 @@ func wrappedComponents(mgmt *hmc.Management, release *hmc.Release) ([]component, c.targetNamespace = hmc.ProviderSveltosTargetNamespace c.createNamespace = hmc.ProviderSveltosCreateNamespace } + components = append(components, c) } @@ -410,10 +412,10 @@ func (r *ManagementReconciler) enableAdditionalComponents(ctx context.Context, m func updateComponentsStatus( components map[string]hmc.ComponentStatus, - providers *hmc.Providers, + providers *hmc.ProvidersTupled, componentName string, templateName string, - templateProviders hmc.Providers, + templateProviders hmc.ProvidersTupled, err string, ) { components[componentName] = hmc.ComponentStatus{ diff --git a/internal/controller/management_controller_test.go b/internal/controller/management_controller_test.go index 57dadd683..23c8c405d 100644 --- a/internal/controller/management_controller_test.go +++ b/internal/controller/management_controller_test.go @@ -36,7 +36,7 @@ var _ = Describe("Management Controller", func() { typeNamespacedName := types.NamespacedName{ Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + Namespace: "default", } management := &hmcmirantiscomv1alpha1.Management{} @@ -49,14 +49,12 @@ var _ = Describe("Management Controller", func() { Name: resourceName, Namespace: "default", }, - // TODO(user): Specify other spec details if needed. } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } }) AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. resource := &hmcmirantiscomv1alpha1.Management{} err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) @@ -75,8 +73,6 @@ var _ = Describe("Management Controller", func() { NamespacedName: typeNamespacedName, }) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/release_controller.go b/internal/controller/release_controller.go index 8736e148d..b341d84fc 100644 --- a/internal/controller/release_controller.go +++ b/internal/controller/release_controller.go @@ -110,9 +110,8 @@ func (r *ReleaseReconciler) ensureManagement(ctx context.Context) error { if err != nil { return err } - if err := mgmtObj.Spec.SetProvidersDefaults(); err != nil { - return err - } + mgmtObj.Spec.Providers = hmc.GetDefaultProviders() + getter := helm.NewMemoryRESTClientGetter(r.Config, r.RESTMapper()) actionConfig := new(action.Configuration) err = actionConfig.Init(getter, r.SystemNamespace, "secret", l.Info) diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index da312028f..8b1cf9799 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -18,7 +18,6 @@ import ( "context" "encoding/json" "fmt" - "strings" helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" sourcev1 "github.com/fluxcd/source-controller/api/v1" @@ -26,7 +25,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -38,10 +36,9 @@ const ( defaultRepoName = "hmc-templates" ) -// TemplateReconciler reconciles a Template object +// TemplateReconciler reconciles a *Template object type TemplateReconciler struct { client.Client - Scheme *runtime.Scheme SystemNamespace string DefaultRegistryConfig helm.DefaultRegistryConfig @@ -65,26 +62,26 @@ func (r *ClusterTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Requ l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ClusterTemplate") - clusterTemplate := &hmc.ClusterTemplate{} - err := r.Get(ctx, req.NamespacedName, clusterTemplate) - if err != nil { + clusterTemplate := new(hmc.ClusterTemplate) + if err := r.Get(ctx, req.NamespacedName, clusterTemplate); err != nil { if apierrors.IsNotFound(err) { l.Info("ClusterTemplate not found, ignoring since object must be deleted") return ctrl.Result{}, nil } + l.Error(err, "Failed to get ClusterTemplate") return ctrl.Result{}, err } + return r.ReconcileTemplate(ctx, clusterTemplate) } func (r *ServiceTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := ctrl.LoggerFrom(ctx).WithValues("ServiceTemplateReconciler", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ServiceTemplate") - serviceTemplate := &hmc.ServiceTemplate{} - err := r.Get(ctx, req.NamespacedName, serviceTemplate) - if err != nil { + serviceTemplate := new(hmc.ServiceTemplate) + if err := r.Get(ctx, req.NamespacedName, serviceTemplate); err != nil { if apierrors.IsNotFound(err) { l.Info("ServiceTemplate not found, ignoring since object must be deleted") return ctrl.Result{}, nil @@ -96,44 +93,45 @@ func (r *ServiceTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Requ } func (r *ProviderTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := ctrl.LoggerFrom(ctx).WithValues("ProviderTemplateReconciler", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ProviderTemplate") - providerTemplate := &hmc.ProviderTemplate{} - err := r.Get(ctx, req.NamespacedName, providerTemplate) - if err != nil { + providerTemplate := new(hmc.ProviderTemplate) + if err := r.Get(ctx, req.NamespacedName, providerTemplate); err != nil { if apierrors.IsNotFound(err) { l.Info("ProviderTemplate not found, ignoring since object must be deleted") return ctrl.Result{}, nil } + l.Error(err, "Failed to get ProviderTemplate") return ctrl.Result{}, err } + return r.ReconcileTemplate(ctx, providerTemplate) } -// Template is the interface defining a list of methods to interact with templates -type Template interface { +type templateCommon interface { client.Object - GetSpec() *hmc.TemplateSpecCommon - GetStatus() *hmc.TemplateStatusCommon + GetHelmSpec() *hmc.HelmSpec + GetCommonStatus() *hmc.TemplateStatusCommon + FillStatusWithProviders(map[string]string) error } -func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template Template) (ctrl.Result, error) { +func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template templateCommon) (ctrl.Result, error) { l := ctrl.LoggerFrom(ctx) - spec := template.GetSpec() - status := template.GetStatus() + helmSpec := template.GetHelmSpec() + status := template.GetCommonStatus() var err error var hcChart *sourcev1.HelmChart - if spec.Helm.ChartRef != nil { - hcChart, err = r.getHelmChartFromChartRef(ctx, spec.Helm.ChartRef) + if helmSpec.ChartRef != nil { + hcChart, err = r.getHelmChartFromChartRef(ctx, helmSpec.ChartRef) if err != nil { - l.Error(err, "failed to get artifact from chartRef", "kind", spec.Helm.ChartRef.Kind, "namespace", spec.Helm.ChartRef.Namespace, "name", spec.Helm.ChartRef.Name) + l.Error(err, "failed to get artifact from chartRef", "chartRef", helmSpec.String()) return ctrl.Result{}, err } } else { - if spec.Helm.ChartName == "" { + if helmSpec.ChartName == "" { err := fmt.Errorf("neither chartName nor chartRef is set") l.Error(err, "invalid helm chart reference") return ctrl.Result{}, err @@ -189,19 +187,23 @@ func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template Tem _ = r.updateStatus(ctx, template, err.Error()) return ctrl.Result{}, err } + l.Info("Validating Helm chart") - if err := parseChartMetadata(template, helmChart); err != nil { - l.Error(err, "Failed to parse Helm chart metadata") + if err = helmChart.Validate(); err != nil { + l.Error(err, "Helm chart validation failed") _ = r.updateStatus(ctx, template, err.Error()) return ctrl.Result{}, err } - if err = helmChart.Validate(); err != nil { - l.Error(err, "Helm chart validation failed") + + l.Info("Parsing Helm chart metadata") + if err := fillStatusWithProviders(template, helmChart); err != nil { + l.Error(err, "Failed to fill status with providers") _ = r.updateStatus(ctx, template, err.Error()) return ctrl.Result{}, err } status.Description = helmChart.Metadata.Description + rawValues, err := json.Marshal(helmChart.Values) if err != nil { l.Error(err, "Failed to parse Helm chart values") @@ -210,52 +212,26 @@ func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template Tem return ctrl.Result{}, err } status.Config = &apiextensionsv1.JSON{Raw: rawValues} + l.Info("Chart validation completed successfully") return ctrl.Result{}, r.updateStatus(ctx, template, "") } -func templateManagedByHMC(template Template) bool { +func templateManagedByHMC(template templateCommon) bool { return template.GetLabels()[hmc.HMCManagedLabelKey] == hmc.HMCManagedLabelValue } -func parseChartMetadata(template Template, inChart *chart.Chart) error { - if inChart.Metadata == nil { +func fillStatusWithProviders(template templateCommon, helmChart *chart.Chart) error { + if helmChart.Metadata == nil { return fmt.Errorf("chart metadata is empty") } - spec := template.GetSpec() - status := template.GetStatus() - // the value in spec has higher priority - if len(spec.Providers.InfrastructureProviders) > 0 { - status.Providers.InfrastructureProviders = spec.Providers.InfrastructureProviders - } else { - infraProviders := inChart.Metadata.Annotations[hmc.ChartAnnotationInfraProviders] - if infraProviders != "" { - status.Providers.InfrastructureProviders = strings.Split(infraProviders, ",") - } - } - if len(spec.Providers.BootstrapProviders) > 0 { - status.Providers.BootstrapProviders = spec.Providers.BootstrapProviders - } else { - bootstrapProviders := inChart.Metadata.Annotations[hmc.ChartAnnotationBootstrapProviders] - if bootstrapProviders != "" { - status.Providers.BootstrapProviders = strings.Split(bootstrapProviders, ",") - } - } - if len(spec.Providers.ControlPlaneProviders) > 0 { - status.Providers.ControlPlaneProviders = spec.Providers.ControlPlaneProviders - } else { - cpProviders := inChart.Metadata.Annotations[hmc.ChartAnnotationControlPlaneProviders] - if cpProviders != "" { - status.Providers.ControlPlaneProviders = strings.Split(cpProviders, ",") - } - } - return nil + return template.FillStatusWithProviders(helmChart.Metadata.Annotations) } -func (r *TemplateReconciler) updateStatus(ctx context.Context, template Template, validationError string) error { - status := template.GetStatus() +func (r *TemplateReconciler) updateStatus(ctx context.Context, template templateCommon, validationError string) error { + status := template.GetCommonStatus() status.ObservedGeneration = template.GetGeneration() status.ValidationError = validationError status.Valid = validationError == "" @@ -266,8 +242,7 @@ func (r *TemplateReconciler) updateStatus(ctx context.Context, template Template return nil } -func (r *TemplateReconciler) reconcileHelmChart(ctx context.Context, template Template) (*sourcev1.HelmChart, error) { - spec := template.GetSpec() +func (r *TemplateReconciler) reconcileHelmChart(ctx context.Context, template templateCommon) (*sourcev1.HelmChart, error) { namespace := template.GetNamespace() if namespace == "" { namespace = r.SystemNamespace @@ -279,10 +254,12 @@ func (r *TemplateReconciler) reconcileHelmChart(ctx context.Context, template Te }, } + helmSpec := template.GetHelmSpec() _, err := ctrl.CreateOrUpdate(ctx, r.Client, helmChart, func() error { if helmChart.Labels == nil { helmChart.Labels = make(map[string]string) } + helmChart.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue helmChart.OwnerReferences = []metav1.OwnerReference{ { @@ -292,21 +269,21 @@ func (r *TemplateReconciler) reconcileHelmChart(ctx context.Context, template Te UID: template.GetUID(), }, } + helmChart.Spec = sourcev1.HelmChartSpec{ - Chart: spec.Helm.ChartName, - Version: spec.Helm.ChartVersion, + Chart: helmSpec.ChartName, + Version: helmSpec.ChartVersion, SourceRef: sourcev1.LocalHelmChartSourceReference{ Kind: sourcev1.HelmRepositoryKind, Name: defaultRepoName, }, Interval: metav1.Duration{Duration: helm.DefaultReconcileInterval}, } + return nil }) - if err != nil { - return nil, err - } - return helmChart, nil + + return helmChart, err } func (r *TemplateReconciler) getHelmChartFromChartRef(ctx context.Context, chartRef *helmcontrollerv2.CrossNamespaceSourceReference) (*sourcev1.HelmChart, error) { diff --git a/internal/controller/template_controller_test.go b/internal/controller/template_controller_test.go index 8a95ed2e8..e206ea993 100644 --- a/internal/controller/template_controller_test.go +++ b/internal/controller/template_controller_test.go @@ -52,7 +52,7 @@ var _ = Describe("Template Controller", func() { typeNamespacedName := types.NamespacedName{ Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + Namespace: "default", } clusterTemplate := &hmcmirantiscomv1alpha1.ClusterTemplate{} serviceTemplate := &hmcmirantiscomv1alpha1.ServiceTemplate{} @@ -60,13 +60,11 @@ var _ = Describe("Template Controller", func() { helmRepo := &sourcev1.HelmRepository{} helmChart := &sourcev1.HelmChart{} - templateSpec := hmcmirantiscomv1alpha1.TemplateSpecCommon{ - Helm: hmcmirantiscomv1alpha1.HelmSpec{ - ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{ - Kind: "HelmChart", - Name: helmChartName, - Namespace: helmRepoNamespace, - }, + helmSpec := hmcmirantiscomv1alpha1.HelmSpec{ + ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: helmChartName, + Namespace: helmRepoNamespace, }, } @@ -120,7 +118,7 @@ var _ = Describe("Template Controller", func() { Name: resourceName, Namespace: "default", }, - Spec: hmcmirantiscomv1alpha1.ClusterTemplateSpec{TemplateSpecCommon: templateSpec}, + Spec: hmcmirantiscomv1alpha1.ClusterTemplateSpec{Helm: helmSpec}, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } @@ -132,7 +130,7 @@ var _ = Describe("Template Controller", func() { Name: resourceName, Namespace: "default", }, - Spec: hmcmirantiscomv1alpha1.ServiceTemplateSpec{TemplateSpecCommon: templateSpec}, + Spec: hmcmirantiscomv1alpha1.ServiceTemplateSpec{Helm: helmSpec}, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } @@ -144,14 +142,13 @@ var _ = Describe("Template Controller", func() { Name: resourceName, Namespace: "default", }, - Spec: hmcmirantiscomv1alpha1.ProviderTemplateSpec{TemplateSpecCommon: templateSpec}, + Spec: hmcmirantiscomv1alpha1.ProviderTemplateSpec{Helm: helmSpec}, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } }) AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. clusterTemplateResource := &hmcmirantiscomv1alpha1.ClusterTemplate{} err := k8sClient.Get(ctx, typeNamespacedName, clusterTemplateResource) Expect(err).NotTo(HaveOccurred()) @@ -176,7 +173,6 @@ var _ = Describe("Template Controller", func() { It("should successfully reconcile the resource", func() { templateReconciler := TemplateReconciler{ Client: k8sClient, - Scheme: k8sClient.Scheme(), downloadHelmChartFunc: fakeDownloadHelmChartFunc, } By("Reconciling the ClusterTemplate resource") @@ -193,8 +189,6 @@ var _ = Describe("Template Controller", func() { providerTemplateReconciler := &ProviderTemplateReconciler{TemplateReconciler: templateReconciler} _, err = providerTemplateReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/templatechain_controller.go b/internal/controller/templatechain_controller.go index da788732a..42e3035f7 100644 --- a/internal/controller/templatechain_controller.go +++ b/internal/controller/templatechain_controller.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" hmc "github.com/Mirantis/hmc/api/v1alpha1" ) @@ -44,8 +43,8 @@ type ServiceTemplateChainReconciler struct { TemplateChainReconciler } -// TemplateChain is the interface defining a list of methods to interact with templatechains -type TemplateChain interface { +// templateChain is the interface defining a list of methods to interact with *templatechains +type templateChain interface { client.Object Kind() string TemplateKind() string @@ -53,7 +52,7 @@ type TemplateChain interface { } func (r *ClusterTemplateChainReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ClusterTemplateChainController", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ClusterTemplateChain") clusterTemplateChain := &hmc.ClusterTemplateChain{} @@ -70,7 +69,7 @@ func (r *ClusterTemplateChainReconciler) Reconcile(ctx context.Context, req ctrl } func (r *ServiceTemplateChainReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - l := log.FromContext(ctx).WithValues("ServiceTemplateChainReconciler", req.NamespacedName) + l := ctrl.LoggerFrom(ctx) l.Info("Reconciling ServiceTemplateChain") serviceTemplateChain := &hmc.ServiceTemplateChain{} @@ -86,17 +85,18 @@ func (r *ServiceTemplateChainReconciler) Reconcile(ctx context.Context, req ctrl return r.ReconcileTemplateChain(ctx, serviceTemplateChain) } -func (r *TemplateChainReconciler) ReconcileTemplateChain(ctx context.Context, templateChain TemplateChain) (ctrl.Result, error) { - l := log.FromContext(ctx) +func (r *TemplateChainReconciler) ReconcileTemplateChain(ctx context.Context, templateChain templateChain) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) systemTemplates, managedTemplates, err := getCurrentTemplates(ctx, r.Client, templateChain.TemplateKind(), r.SystemNamespace, templateChain.GetNamespace(), templateChain.GetName()) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to get current templates: %v", err) } - var errs error - - keepTemplate := make(map[string]bool) + var ( + errs error + keepTemplate = make(map[string]struct{}, len(templateChain.GetSpec().SupportedTemplates)) + ) for _, supportedTemplate := range templateChain.GetSpec().SupportedTemplates { meta := metav1.ObjectMeta{ Name: supportedTemplate.Name, @@ -106,67 +106,68 @@ func (r *TemplateChainReconciler) ReconcileTemplateChain(ctx context.Context, te HMCManagedByChainLabelKey: templateChain.GetName(), }, } - keepTemplate[supportedTemplate.Name] = true + keepTemplate[supportedTemplate.Name] = struct{}{} source, found := systemTemplates[supportedTemplate.Name] if !found { errs = errors.Join(errs, fmt.Errorf("source %s %s/%s is not found", templateChain.TemplateKind(), r.SystemNamespace, supportedTemplate.Name)) continue } - if source.GetStatus().ChartRef == nil { + if source.GetCommonStatus().ChartRef == nil { errs = errors.Join(errs, fmt.Errorf("source %s %s/%s does not have chart reference yet", templateChain.TemplateKind(), r.SystemNamespace, supportedTemplate.Name)) continue } - templateSpec := hmc.TemplateSpecCommon{ - Helm: hmc.HelmSpec{ - ChartRef: source.GetStatus().ChartRef, - }, + helmSpec := hmc.HelmSpec{ + ChartRef: source.GetCommonStatus().ChartRef, } var target client.Object switch templateChain.Kind() { case hmc.ClusterTemplateChainKind: target = &hmc.ClusterTemplate{ObjectMeta: meta, Spec: hmc.ClusterTemplateSpec{ - TemplateSpecCommon: templateSpec, + Helm: helmSpec, }} case hmc.ServiceTemplateChainKind: target = &hmc.ServiceTemplate{ObjectMeta: meta, Spec: hmc.ServiceTemplateSpec{ - TemplateSpecCommon: templateSpec, + Helm: helmSpec, }} default: return ctrl.Result{}, fmt.Errorf("invalid TemplateChain kind. Supported kinds are %s and %s", hmc.ClusterTemplateChainKind, hmc.ServiceTemplateChainKind) } - err := r.Create(ctx, target) - if err == nil { - l.Info(fmt.Sprintf("%s was successfully created", templateChain.TemplateKind()), "namespace", templateChain.GetNamespace(), "name", supportedTemplate) + + if err := r.Create(ctx, target); err == nil { + l.Info(fmt.Sprintf("%s was successfully created", templateChain.TemplateKind()), "template namespace", templateChain.GetNamespace(), "template name", supportedTemplate.Name) continue } + if !apierrors.IsAlreadyExists(err) { errs = errors.Join(errs, err) } } + for _, template := range managedTemplates { - if !keepTemplate[template.GetName()] { - l.Info(fmt.Sprintf("Deleting %s", templateChain.TemplateKind()), "namespace", templateChain.GetNamespace(), "name", template.GetName()) - err := r.Delete(ctx, template) - if err == nil { - l.Info(fmt.Sprintf("%s was deleted", templateChain.TemplateKind()), "namespace", templateChain.GetNamespace(), "name", template.GetName()) - continue - } - if !apierrors.IsNotFound(err) { - errs = errors.Join(errs, err) - } + templateName := template.GetName() + if _, keep := keepTemplate[templateName]; keep { + continue } + + ll := l.WithValues("template kind", templateChain.TemplateKind(), "template namespace", templateChain.GetNamespace(), "template name", templateName) + ll.Info("Deleting Template") + + if err := r.Delete(ctx, template); client.IgnoreNotFound(err) != nil { + errs = errors.Join(errs, err) + continue + } + + ll.Info("Template has been deleted") } - if errs != nil { - return ctrl.Result{}, errs - } - return ctrl.Result{}, nil + + return ctrl.Result{}, errs } -func getCurrentTemplates(ctx context.Context, cl client.Client, templateKind, systemNamespace, targetNamespace, templateChainName string) (map[string]Template, []Template, error) { - var templates []Template +func getCurrentTemplates(ctx context.Context, cl client.Client, templateKind, systemNamespace, targetNamespace, templateChainName string) (systemTemplates map[string]templateCommon, managedTemplates []templateCommon, _ error) { + var templates []templateCommon switch templateKind { case hmc.ClusterTemplateKind: @@ -190,21 +191,23 @@ func getCurrentTemplates(ctx context.Context, cl client.Client, templateKind, sy default: return nil, nil, fmt.Errorf("invalid Template kind. Supported kinds are %s and %s", hmc.ClusterTemplateKind, hmc.ServiceTemplateKind) } - systemTemplates := make(map[string]Template) - var managedTemplates []Template + systemTemplates = make(map[string]templateCommon, len(templates)) + managedTemplates = make([]templateCommon, 0, len(templates)) for _, template := range templates { if template.GetNamespace() == systemNamespace { systemTemplates[template.GetName()] = template continue } + labels := template.GetLabels() if template.GetNamespace() == targetNamespace && - labels[hmc.HMCManagedLabelKey] == "true" && + labels[hmc.HMCManagedLabelKey] == hmc.HMCManagedLabelValue && labels[HMCManagedByChainLabelKey] == templateChainName { managedTemplates = append(managedTemplates, template) } } + return systemTemplates, managedTemplates, nil } diff --git a/internal/controller/templatemanagement_controller.go b/internal/controller/templatemanagement_controller.go index 0384f6982..40f69a2b8 100644 --- a/internal/controller/templatemanagement_controller.go +++ b/internal/controller/templatemanagement_controller.go @@ -142,8 +142,8 @@ func getNamespacedName(namespace, name string) string { return fmt.Sprintf("%s/%s", namespace, name) } -func (r *TemplateManagementReconciler) getCurrentTemplateChains(ctx context.Context, templateChainKind string) (map[string]TemplateChain, []TemplateChain, error) { - var templateChains []TemplateChain +func (r *TemplateManagementReconciler) getCurrentTemplateChains(ctx context.Context, templateChainKind string) (map[string]templateChain, []templateChain, error) { + var templateChains []templateChain switch templateChainKind { case hmc.ClusterTemplateChainKind: ctChainList := &hmc.ClusterTemplateChainList{} @@ -167,17 +167,21 @@ func (r *TemplateManagementReconciler) getCurrentTemplateChains(ctx context.Cont return nil, nil, fmt.Errorf("invalid TemplateChain kind. Supported kinds are %s and %s", hmc.ClusterTemplateChainKind, hmc.ServiceTemplateChainKind) } - systemTemplateChains := make(map[string]TemplateChain) - var managedTemplateChains []TemplateChain + var ( + systemTemplateChains = make(map[string]templateChain, len(templateChains)) + managedTemplateChains = make([]templateChain, 0, len(templateChains)) + ) for _, chain := range templateChains { if chain.GetNamespace() == r.SystemNamespace { systemTemplateChains[chain.GetName()] = chain continue } + if chain.GetLabels()[hmc.HMCManagedLabelKey] == hmc.HMCManagedLabelValue { managedTemplateChains = append(managedTemplateChains, chain) } } + return systemTemplateChains, managedTemplateChains, nil } @@ -199,23 +203,27 @@ func getTargetNamespaces(ctx context.Context, cl client.Client, targetNamespaces } } - namespaces := &corev1.NamespaceList{} - listOpts := &client.ListOptions{} - if selector.String() != "" { - listOpts = &client.ListOptions{LabelSelector: selector} + var ( + namespaces = new(corev1.NamespaceList) + listOpts = new(client.ListOptions) + ) + if !selector.Empty() { + listOpts.LabelSelector = selector } - err = cl.List(ctx, namespaces, listOpts) - if err != nil { - return []string{}, err + + if err := cl.List(ctx, namespaces, listOpts); err != nil { + return nil, err } + result := make([]string, len(namespaces.Items)) for i, ns := range namespaces.Items { result[i] = ns.Name } + return result, nil } -func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, source TemplateChain, targetNamespace string) error { +func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, source templateChain, targetNamespace string) error { l := ctrl.LoggerFrom(ctx) meta := metav1.ObjectMeta{ @@ -225,7 +233,7 @@ func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue, }, } - var target TemplateChain + var target templateChain switch source.Kind() { case hmc.ClusterTemplateChainKind: target = &hmc.ClusterTemplateChain{ObjectMeta: meta, Spec: *source.GetSpec()} @@ -244,7 +252,7 @@ func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, return nil } -func (r *TemplateManagementReconciler) deleteTemplateChain(ctx context.Context, chain TemplateChain) error { +func (r *TemplateManagementReconciler) deleteTemplateChain(ctx context.Context, chain templateChain) error { l := ctrl.LoggerFrom(ctx) err := r.Delete(ctx, chain) diff --git a/internal/telemetry/tracker.go b/internal/telemetry/tracker.go index a9b578bdf..83d9b858e 100644 --- a/internal/telemetry/tracker.go +++ b/internal/telemetry/tracker.go @@ -93,9 +93,9 @@ func (t *Tracker) trackManagedClusterHeartbeat(ctx context.Context) error { clusterID, managedCluster.Spec.Template, template.Spec.Helm.ChartVersion, - strings.Join(template.Status.Providers.InfrastructureProviders, ","), - strings.Join(template.Status.Providers.BootstrapProviders, ","), - strings.Join(template.Status.Providers.ControlPlaneProviders, ","), + strings.Join(template.Status.Providers.InfrastructureProvidersNames(), ","), + strings.Join(template.Status.Providers.BootstrapProvidersNames(), ","), + strings.Join(template.Status.Providers.ControlPlaneProvidersNames(), ","), ) if err != nil { errs = errors.Join(errs, fmt.Errorf("failed to track the heartbeat of the managedcluster %s/%s", managedCluster.Namespace, managedCluster.Name)) diff --git a/internal/utils/util.go b/internal/utils/util.go deleted file mode 100644 index 35c60ea36..000000000 --- a/internal/utils/util.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -// SliceToMapKeys converts a given slice to a map with slice's values -// as the map's keys zeroing value for each. -func SliceToMapKeys[S ~[]K, M ~map[K]V, K comparable, V any](s S) M { - m := make(M) - for i := range s { - m[s[i]] = *new(V) - } - return m -} - -// DiffSliceSubset finds missing items of a given slice in a given map. -// If the slice is a subset of the map, returns empty slice. -// Boolean return argument indicates whether the slice is a subset. -func DiffSliceSubset[S ~[]K, M ~map[K]V, K comparable, V any](s S, m M) (diff S, isSubset bool) { - for _, v := range s { - if _, ok := m[v]; !ok { - diff = append(diff, v) - } - } - - return diff, len(diff) == 0 -} diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index 79cc2b2a5..113a89f60 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -29,7 +29,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/internal/utils" ) type ManagedClusterValidator struct { @@ -170,11 +169,17 @@ func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template return nil } -func getMissingProviders(exposedProviders []string, requiredProviders []string) []string { - exposedBootstrapProviders := utils.SliceToMapKeys[[]string, map[string]struct{}](exposedProviders) - diff, isSubset := utils.DiffSliceSubset(requiredProviders, exposedBootstrapProviders) - if !isSubset { - return diff +func getMissingProviders(exposedProviders, requiredProviders []v1alpha1.ProviderTuple) (missing []string) { + exposedSet := make(map[string]struct{}, len(requiredProviders)) + for _, v := range exposedProviders { + exposedSet[v.Name] = struct{}{} } - return []string{} + + for _, v := range requiredProviders { + if _, ok := exposedSet[v.Name]; !ok { + missing = append(missing, v.Name) + } + } + + return missing } diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go index 61e5a02d2..32557cfea 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -36,10 +36,10 @@ var ( testNamespace = "test" mgmt = management.NewManagement( - management.WithAvailableProviders(v1alpha1.Providers{ - InfrastructureProviders: []string{"aws"}, - BootstrapProviders: []string{"k0s"}, - ControlPlaneProviders: []string{"k0s"}, + management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, }), ) @@ -87,17 +87,17 @@ var ( managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), existingObjects: []runtime.Object{ management.NewManagement( - management.WithAvailableProviders(v1alpha1.Providers{ - InfrastructureProviders: []string{"aws"}, - BootstrapProviders: []string{"k0s"}, + management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, }), ), template.NewClusterTemplate( template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.Providers{ - InfrastructureProviders: []string{"azure"}, - BootstrapProviders: []string{"k0s"}, - ControlPlaneProviders: []string{"k0s"}, + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "azure"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, }), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), @@ -111,10 +111,10 @@ var ( mgmt, template.NewClusterTemplate( template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.Providers{ - InfrastructureProviders: []string{"aws"}, - BootstrapProviders: []string{"k0s"}, - ControlPlaneProviders: []string{"k0s"}, + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, }), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index ab9fe08dd..4dc8aadd6 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -57,8 +57,8 @@ spec: description: ClusterTemplateSpec defines the desired state of ClusterTemplate properties: helm: - description: Helm holds a reference to a Helm chart representing the - HMC template + description: HelmSpec references a Helm chart representing the HMC + template properties: chartName: description: ChartName is a name of a Helm chart representing @@ -103,28 +103,62 @@ spec: - message: either chartName or chartRef must be set rule: (has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef)) + k8sVersion: + description: Compatible K8S version of the cluster set in the SemVer + format. + type: string providers: - description: |- - Providers represent required/exposed CAPI providers depending on the template type. - Should be set if not present in the Helm chart metadata. + description: Providers represent required CAPI providers with constrainted + compatibility versions set. Should be set if not present in the + Helm chart metadata. properties: bootstrap: - description: BootstrapProviders is the list of CAPI bootstrap - providers + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array controlPlane: - description: ControlPlaneProviders is the list of CAPI control - plane providers + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array infrastructure: - description: InfrastructureProviders is the list of CAPI infrastructure - providers + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array type: object required: @@ -174,31 +208,65 @@ spec: description: description: Description contains information about the template. type: string + k8sVersion: + description: Compatible K8S version of the cluster set in the SemVer + format. + type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer providers: - description: Providers represent required/exposed CAPI providers depending - on the template type. + description: Providers represent exposed CAPI providers with constrainted + compatibility versions set. properties: bootstrap: - description: BootstrapProviders is the list of CAPI bootstrap - providers + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array controlPlane: - description: ControlPlaneProviders is the list of CAPI control - plane providers + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array infrastructure: - description: InfrastructureProviders is the list of CAPI infrastructure - providers + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array type: object valid: diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 83c58b480..37d55cab1 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -188,6 +188,11 @@ spec: - type type: object type: array + k8sVersion: + description: |- + Currently compatible K8S version of the cluster. Being set only if + the corresponding ClusterTemplate provided it in the spec. + type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml index a0971a3cb..04c98a061 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml @@ -112,26 +112,57 @@ spec: description: ManagementStatus defines the observed state of Management properties: availableProviders: - description: AvailableProviders holds all CAPI providers available - on the Management cluster. + description: |- + AvailableProviders holds all CAPI providers available along with + their exact compatibility versions if specified in ProviderTemplates on the Management cluster. properties: bootstrap: - description: BootstrapProviders is the list of CAPI bootstrap - providers + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array controlPlane: - description: ControlPlaneProviders is the list of CAPI control - plane providers + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array infrastructure: - description: InfrastructureProviders is the list of CAPI infrastructure - providers + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array type: object components: diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index 5249d0bd6..8442a2e15 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -56,9 +56,12 @@ spec: spec: description: ProviderTemplateSpec defines the desired state of ProviderTemplate properties: + capiVersion: + description: Compatible CAPI provider version set in the SemVer format. + type: string helm: - description: Helm holds a reference to a Helm chart representing the - HMC template + description: HelmSpec references a Helm chart representing the HMC + template properties: chartName: description: ChartName is a name of a Helm chart representing @@ -104,27 +107,56 @@ spec: rule: (has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef)) providers: - description: |- - Providers represent required/exposed CAPI providers depending on the template type. - Should be set if not present in the Helm chart metadata. + description: Represents required CAPI providers with exact compatibility + versions set. Should be set if not present in the Helm chart metadata. properties: bootstrap: - description: BootstrapProviders is the list of CAPI bootstrap - providers + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array controlPlane: - description: ControlPlaneProviders is the list of CAPI control - plane providers + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array infrastructure: - description: InfrastructureProviders is the list of CAPI infrastructure - providers + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array type: object required: @@ -136,6 +168,9 @@ spec: status: description: ProviderTemplateStatus defines the observed state of ProviderTemplate properties: + capiVersion: + description: Compatible CAPI provider version in the SemVer format. + type: string chartRef: description: |- ChartRef is a reference to a source controller resource containing the @@ -179,26 +214,56 @@ spec: format: int64 type: integer providers: - description: Providers represent required/exposed CAPI providers depending - on the template type. + description: Providers represent exposed CAPI providers with exact + compatibility versions set. properties: bootstrap: - description: BootstrapProviders is the list of CAPI bootstrap - providers + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array controlPlane: - description: ControlPlaneProviders is the list of CAPI control - plane providers + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array infrastructure: - description: InfrastructureProviders is the list of CAPI infrastructure - providers + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. items: - type: string + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrContraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object type: array type: object valid: diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml index e3747b9d9..465f57233 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml @@ -57,8 +57,8 @@ spec: description: ServiceTemplateSpec defines the desired state of ServiceTemplate properties: helm: - description: Helm holds a reference to a Helm chart representing the - HMC template + description: HelmSpec references a Helm chart representing the HMC + template properties: chartName: description: ChartName is a name of a Helm chart representing @@ -103,10 +103,13 @@ spec: - message: either chartName or chartRef must be set rule: (has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef)) + k8sConstraint: + description: Constraint describing compatible K8S versions of the + cluster set in the SemVer format. + type: string providers: - description: |- - Providers represent required/exposed CAPI providers depending on the template type. - Should be set if not present in the Helm chart metadata. + description: Represents required CAPI providers. Should be set if + not present in the Helm chart metadata. properties: bootstrap: description: BootstrapProviders is the list of CAPI bootstrap @@ -174,13 +177,16 @@ spec: description: description: Description contains information about the template. type: string + k8sConstraint: + description: Constraint describing compatible K8S versions of the + cluster set in the SemVer format. + type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer providers: - description: Providers represent required/exposed CAPI providers depending - on the template type. + description: Represents exposed CAPI providers. properties: bootstrap: description: BootstrapProviders is the list of CAPI bootstrap diff --git a/test/objects/management/management.go b/test/objects/management/management.go index 26a31899f..98c5ade15 100644 --- a/test/objects/management/management.go +++ b/test/objects/management/management.go @@ -64,7 +64,7 @@ func WithProviders(providers []v1alpha1.Provider) Opt { } } -func WithAvailableProviders(providers v1alpha1.Providers) Opt { +func WithAvailableProviders(providers v1alpha1.ProvidersTupled) Opt { return func(p *v1alpha1.Management) { p.Status.AvailableProviders = providers } diff --git a/test/objects/template/template.go b/test/objects/template/template.go index f3184be39..6fa1e5bd8 100644 --- a/test/objects/template/template.go +++ b/test/objects/template/template.go @@ -15,8 +15,11 @@ package template import ( + "fmt" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/Mirantis/hmc/api/v1alpha1" ) @@ -26,108 +29,136 @@ const ( DefaultNamespace = "default" ) -type Template struct { - metav1.ObjectMeta `json:",inline"` - Spec v1alpha1.TemplateSpecCommon `json:"spec"` - Status v1alpha1.TemplateStatusCommon `json:"status"` -} +type ( + Opt func(template Template) -type Opt func(template *Template) + Template interface { + client.Object + GetHelmSpec() *v1alpha1.HelmSpec + GetCommonStatus() *v1alpha1.TemplateStatusCommon + } +) func NewClusterTemplate(opts ...Opt) *v1alpha1.ClusterTemplate { - templateState := NewTemplate(opts...) - return &v1alpha1.ClusterTemplate{ - ObjectMeta: templateState.ObjectMeta, - Spec: v1alpha1.ClusterTemplateSpec{TemplateSpecCommon: templateState.Spec}, - Status: v1alpha1.ClusterTemplateStatus{TemplateStatusCommon: templateState.Status}, + t := &v1alpha1.ClusterTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultName, + Namespace: DefaultNamespace, + }, + } + + for _, o := range opts { + o(t) } + + return t } func NewServiceTemplate(opts ...Opt) *v1alpha1.ServiceTemplate { - templateState := NewTemplate(opts...) - return &v1alpha1.ServiceTemplate{ - ObjectMeta: templateState.ObjectMeta, - Spec: v1alpha1.ServiceTemplateSpec{TemplateSpecCommon: templateState.Spec}, - Status: v1alpha1.ServiceTemplateStatus{TemplateStatusCommon: templateState.Status}, + t := &v1alpha1.ServiceTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultName, + Namespace: DefaultNamespace, + }, } -} -func NewProviderTemplate(opts ...Opt) *v1alpha1.ProviderTemplate { - templateState := NewTemplate(opts...) - return &v1alpha1.ProviderTemplate{ - ObjectMeta: templateState.ObjectMeta, - Spec: v1alpha1.ProviderTemplateSpec{TemplateSpecCommon: templateState.Spec}, - Status: v1alpha1.ProviderTemplateStatus{TemplateStatusCommon: templateState.Status}, + for _, o := range opts { + o(t) } + + return t } -func NewTemplate(opts ...Opt) *Template { - template := &Template{ +func NewProviderTemplate(opts ...Opt) *v1alpha1.ProviderTemplate { + t := &v1alpha1.ProviderTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultName, Namespace: DefaultNamespace, }, } - for _, opt := range opts { - opt(template) + + for _, o := range opts { + o(t) } - return template + + return t } func WithName(name string) Opt { - return func(t *Template) { - t.Name = name + return func(t Template) { + t.SetName(name) } } func WithNamespace(namespace string) Opt { - return func(t *Template) { - t.Namespace = namespace + return func(t Template) { + t.SetNamespace(namespace) } } func WithLabels(labels map[string]string) Opt { - return func(t *Template) { - t.Labels = labels + return func(t Template) { + t.SetLabels(labels) } } func ManagedByHMC() Opt { - return func(t *Template) { - if t.Labels == nil { - t.Labels = make(map[string]string) + return func(template Template) { + labels := template.GetLabels() + if labels == nil { + labels = make(map[string]string) } - t.Labels[v1alpha1.HMCManagedLabelKey] = v1alpha1.HMCManagedLabelValue - } -} + labels[v1alpha1.HMCManagedLabelKey] = v1alpha1.HMCManagedLabelValue -func WithHelmSpec(helmSpec v1alpha1.HelmSpec) Opt { - return func(t *Template) { - t.Spec.Helm = helmSpec + template.SetLabels(labels) } } -func WithProviders(providers v1alpha1.Providers) Opt { - return func(t *Template) { - t.Spec.Providers = providers +func WithHelmSpec(helmSpec v1alpha1.HelmSpec) Opt { + return func(t Template) { + spec := t.GetHelmSpec() + spec.ChartName = helmSpec.ChartName + spec.ChartRef = helmSpec.ChartRef + spec.ChartVersion = helmSpec.ChartVersion } } func WithValidationStatus(validationStatus v1alpha1.TemplateValidationStatus) Opt { - return func(t *Template) { - t.Status.TemplateValidationStatus = validationStatus + return func(t Template) { + status := t.GetCommonStatus() + status.TemplateValidationStatus = validationStatus } } -func WithProvidersStatus(providers v1alpha1.Providers) Opt { - return func(t *Template) { - t.Status.Providers = providers +func WithProvidersStatus[T v1alpha1.Providers | v1alpha1.ProvidersTupled](providers T) Opt { + return func(t Template) { + switch v := t.(type) { + case *v1alpha1.ClusterTemplate: + var ok bool + v.Status.Providers, ok = any(providers).(v1alpha1.ProvidersTupled) + if !ok { + panic(fmt.Sprintf("unexpected type %T", providers)) + } + case *v1alpha1.ProviderTemplate: + var ok bool + v.Status.Providers, ok = any(providers).(v1alpha1.ProvidersTupled) + if !ok { + panic(fmt.Sprintf("unexpected type %T", providers)) + } + case *v1alpha1.ServiceTemplate: + var ok bool + v.Status.Providers, ok = any(providers).(v1alpha1.Providers) + if !ok { + panic(fmt.Sprintf("unexpected type %T", providers)) + } + } } } func WithConfigStatus(config string) Opt { - return func(t *Template) { - t.Status.Config = &apiextensionsv1.JSON{ + return func(t Template) { + status := t.GetCommonStatus() + status.Config = &apiextensionsv1.JSON{ Raw: []byte(config), } } From 571dc773420f7954df8d714d16b457841b058cb8 Mon Sep 17 00:00:00 2001 From: zerospiel Date: Fri, 4 Oct 2024 17:14:22 +0200 Subject: [PATCH 08/29] Enforce compatibility attributes * fill out statuses with the new attributes for managedclustes,managements,*templates resources * fix a couple of logical errors * check compatibility attributes in the admission controller * tests * fix typos Closes #400 #354 --- .golangci.yml | 7 + api/v1alpha1/clustertemplate_types.go | 26 +- api/v1alpha1/common.go | 4 +- api/v1alpha1/managedcluster_types.go | 8 +- api/v1alpha1/providertemplate_types.go | 22 +- api/v1alpha1/servicetemplate_types.go | 50 +++- api/v1alpha1/templates_common.go | 35 +-- api/v1alpha1/zz_generated.deepcopy.go | 1 + go.mod | 2 +- .../controller/managedcluster_controller.go | 39 ++- internal/controller/template_controller.go | 2 +- .../templatemanagement_controller.go | 4 +- internal/sveltos/profile.go | 4 +- internal/webhook/managedcluster_webhook.go | 246 ++++++++++++++---- .../webhook/managedcluster_webhook_test.go | 120 +++++++-- internal/webhook/template_webhook.go | 21 +- internal/webhook/template_webhook_test.go | 6 +- .../hmc.mirantis.com_clustertemplates.yaml | 12 +- .../hmc.mirantis.com_managedclusters.yaml | 57 +++- .../crds/hmc.mirantis.com_managements.yaml | 6 +- .../hmc.mirantis.com_providertemplates.yaml | 12 +- test/managedcluster/vsphere/vsphere.go | 2 +- test/objects/managedcluster/managedcluster.go | 8 +- test/objects/template/template.go | 11 + 24 files changed, 521 insertions(+), 184 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f47fe05ba..e6232ebd2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,6 +15,9 @@ issues: - text: "struct-tag: unknown option 'inline' in JSON tag" linters: - revive + - text: "Unhandled error in call to function fmt.Print*" + linters: + - revive linters: disable-all: true enable: @@ -60,6 +63,10 @@ linters: - whitespace linters-settings: + dupl: + # Tokens count to trigger issue. + # Default: 150 + threshold: 200 gofmt: # Apply the rewrite rules to the source before reformatting. # https://pkg.go.dev/cmd/gofmt diff --git a/api/v1alpha1/clustertemplate_types.go b/api/v1alpha1/clustertemplate_types.go index 0d4127ffa..a10368b7b 100644 --- a/api/v1alpha1/clustertemplate_types.go +++ b/api/v1alpha1/clustertemplate_types.go @@ -21,7 +21,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ClusterTemplateKind = "ClusterTemplate" +const ( + // Denotes the clustertemplate resource Kind. + ClusterTemplateKind = "ClusterTemplate" + // ChartAnnotationKubernetesVersion is an annotation containing the Kubernetes exact version in the SemVer format associated with a ClusterTemplate. + ChartAnnotationKubernetesVersion = "hmc.mirantis.com/k8s-version" +) // ClusterTemplateSpec defines the desired state of ClusterTemplate type ClusterTemplateSpec struct { @@ -61,6 +66,20 @@ func (t *ClusterTemplate) FillStatusWithProviders(annotations map[string]string) return fmt.Errorf("failed to parse ClusterTemplate infrastructure providers: %v", err) } + kversion := annotations[ChartAnnotationKubernetesVersion] + if t.Spec.KubertenesVersion != "" { + kversion = t.Spec.KubertenesVersion + } + if kversion == "" { + return nil + } + + if _, err := semver.NewVersion(kversion); err != nil { + return fmt.Errorf("failed to parse kubernetes version %s: %w", kversion, err) + } + + t.Status.KubertenesVersion = kversion + return nil } @@ -69,11 +88,6 @@ func (t *ClusterTemplate) GetSpecProviders() ProvidersTupled { return t.Spec.Providers } -// GetStatusProviders returns .status.providers of the Template. -func (t *ClusterTemplate) GetStatusProviders() ProvidersTupled { - return t.Status.Providers -} - // GetHelmSpec returns .spec.helm of the Template. func (t *ClusterTemplate) GetHelmSpec() *HelmSpec { return &t.Spec.Helm diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 936b442b7..f100a4bfc 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -34,7 +34,7 @@ type ( // Holds different types of CAPI providers with either // an exact or constrainted version in the SemVer format. The requirement - // is determined by a consumer this type. + // is determined by a consumer of this type. ProvidersTupled struct { // List of CAPI infrastructure providers with either an exact or constrainted version in the SemVer format. InfrastructureProviders []ProviderTuple `json:"infrastructure,omitempty"` @@ -49,7 +49,7 @@ type ( // Name of the provider. Name string `json:"name,omitempty"` // Compatibility restriction in the SemVer format (exact or constrainted version) - VersionOrContraint string `json:"versionOrContraint,omitempty"` + VersionOrConstraint string `json:"versionOrConstraint,omitempty"` } ) diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 7a4915488..225ce290a 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -74,7 +74,8 @@ type ManagedClusterSpec struct { // +kubebuilder:validation:MinLength=1 // Template is a reference to a Template object located in the same namespace. - Template string `json:"template"` + Template string `json:"template"` + // Name reference to the related Credentials object. Credential string `json:"credential,omitempty"` // Services is a list of services created via ServiceTemplates // that could be installed on the target cluster. @@ -101,8 +102,11 @@ type ManagedClusterSpec struct { // ManagedClusterStatus defines the observed state of ManagedCluster type ManagedClusterStatus struct { // Currently compatible K8S version of the cluster. Being set only if - // the corresponding ClusterTemplate provided it in the spec. + // provided by the corresponding ClusterTemplate. KubertenesVersion string `json:"k8sVersion,omitempty"` + // Providers represent exposed CAPI providers with constrainted compatibility versions set. + // Propagated from the corresponding ClusterTemplate. + Providers ProvidersTupled `json:"providers,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` // ObservedGeneration is the last observed generation. diff --git a/api/v1alpha1/providertemplate_types.go b/api/v1alpha1/providertemplate_types.go index e35cbd7ff..c1326b71c 100644 --- a/api/v1alpha1/providertemplate_types.go +++ b/api/v1alpha1/providertemplate_types.go @@ -21,6 +21,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// ChartAnnotationCAPIVersion is an annotation containing the CAPI exact version in the SemVer format associated with a ProviderTemplate. +const ChartAnnotationCAPIVersion = "hmc.mirantis.com/capi-version" + // ProviderTemplateSpec defines the desired state of ProviderTemplate type ProviderTemplateSpec struct { Helm HelmSpec `json:"helm"` @@ -59,6 +62,20 @@ func (t *ProviderTemplate) FillStatusWithProviders(annotations map[string]string return fmt.Errorf("failed to parse ProviderTemplate infrastructure providers: %v", err) } + capiVersion := annotations[ChartAnnotationCAPIVersion] + if t.Spec.CAPIVersion != "" { + capiVersion = t.Spec.CAPIVersion + } + if capiVersion == "" { + return nil + } + + if _, err := semver.NewVersion(capiVersion); err != nil { + return fmt.Errorf("failed to parse CAPI version %s: %w", capiVersion, err) + } + + t.Status.CAPIVersion = capiVersion + return nil } @@ -67,11 +84,6 @@ func (t *ProviderTemplate) GetSpecProviders() ProvidersTupled { return t.Spec.Providers } -// GetStatusProviders returns .status.providers of the Template. -func (t *ProviderTemplate) GetStatusProviders() ProvidersTupled { - return t.Status.Providers -} - // GetHelmSpec returns .spec.helm of the Template. func (t *ProviderTemplate) GetHelmSpec() *HelmSpec { return &t.Spec.Helm diff --git a/api/v1alpha1/servicetemplate_types.go b/api/v1alpha1/servicetemplate_types.go index 10c3152fc..bf2890ccd 100644 --- a/api/v1alpha1/servicetemplate_types.go +++ b/api/v1alpha1/servicetemplate_types.go @@ -15,12 +15,19 @@ package v1alpha1 import ( + "fmt" "strings" + "github.com/Masterminds/semver/v3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ServiceTemplateKind = "ServiceTemplate" +const ( + // Denotes the servicetemplate resource Kind. + ServiceTemplateKind = "ServiceTemplate" + // ChartAnnotationKubernetesConstraint is an annotation containing the Kubernetes constrainted version in the SemVer format associated with a ServiceTemplate. + ChartAnnotationKubernetesConstraint = "hmc.mirantis.com/k8s-version-constraint" +) // ServiceTemplateSpec defines the desired state of ServiceTemplate type ServiceTemplateSpec struct { @@ -43,28 +50,23 @@ type ServiceTemplateStatus struct { // FillStatusWithProviders sets the status of the template with providers // either from the spec or from the given annotations. -// -// The return parameter is noop and is always nil. func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) error { parseProviders := func(typ providersType) []string { var ( - pspec, pstatus []string - anno string + pspec []string + anno string ) switch typ { case bootstrapProvidersType: - pspec, pstatus = t.Spec.Providers.BootstrapProviders, t.Status.Providers.BootstrapProviders - anno = ChartAnnotationBootstrapProviders + pspec, anno = t.Spec.Providers.BootstrapProviders, ChartAnnotationBootstrapProviders case controlPlaneProvidersType: - pspec, pstatus = t.Spec.Providers.ControlPlaneProviders, t.Status.Providers.ControlPlaneProviders - anno = ChartAnnotationControlPlaneProviders + pspec, anno = t.Spec.Providers.ControlPlaneProviders, ChartAnnotationControlPlaneProviders case infrastructureProvidersType: - pspec, pstatus = t.Spec.Providers.InfrastructureProviders, t.Status.Providers.InfrastructureProviders - anno = ChartAnnotationInfraProviders + pspec, anno = t.Spec.Providers.InfrastructureProviders, ChartAnnotationInfraProviders } if len(pspec) > 0 { - return pstatus + return pspec } providers := annotations[anno] @@ -72,13 +74,35 @@ func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) return []string{} } - return strings.Split(providers, ",") + splitted := strings.Split(providers, ",") + result := make([]string, 0, len(splitted)) + for _, v := range splitted { + if c := strings.TrimSpace(v); c != "" { + result = append(result, c) + } + } + + return result } t.Status.Providers.BootstrapProviders = parseProviders(bootstrapProvidersType) t.Status.Providers.ControlPlaneProviders = parseProviders(controlPlaneProvidersType) t.Status.Providers.InfrastructureProviders = parseProviders(infrastructureProvidersType) + kconstraint := annotations[ChartAnnotationKubernetesConstraint] + if t.Spec.KubertenesConstraint != "" { + kconstraint = t.Spec.KubertenesConstraint + } + if kconstraint == "" { + return nil + } + + if _, err := semver.NewConstraint(kconstraint); err != nil { + return fmt.Errorf("failed to parse kubernetes constraint %s: %w", kconstraint, err) + } + + t.Status.KubertenesConstraint = kconstraint + return nil } diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go index 5ea92ccaa..36fd1f167 100644 --- a/api/v1alpha1/templates_common.go +++ b/api/v1alpha1/templates_common.go @@ -85,14 +85,10 @@ const ( infrastructureProvidersType ) -func parseProviders[T any](providersGetter interface { - GetSpecProviders() ProvidersTupled - GetStatusProviders() ProvidersTupled -}, typ providersType, annotations map[string]string, validationFn func(string) (T, error), -) ([]ProviderTuple, error) { - pspec, pstatus, anno := getProvidersSpecStatusAnno(providersGetter, typ) +func parseProviders[T any](providersGetter interface{ GetSpecProviders() ProvidersTupled }, typ providersType, annotations map[string]string, validationFn func(string) (T, error)) ([]ProviderTuple, error) { + pspec, anno := getProvidersSpecAnno(providersGetter, typ) if len(pspec) > 0 { - return pstatus, nil + return pspec, nil } providers := annotations[anno] @@ -102,13 +98,12 @@ func parseProviders[T any](providersGetter interface { var ( splitted = strings.Split(providers, ",") + pstatus = make([]ProviderTuple, 0, len(splitted)) merr error ) - - pstatus = make([]ProviderTuple, 0, len(splitted)) - for _, v := range splitted { - nVerOrC := strings.SplitN(v, " ", 1) + v = strings.TrimSpace(v) + nVerOrC := strings.SplitN(v, " ", 2) if len(nVerOrC) == 0 { // BCE (bound check elimination) continue } @@ -121,30 +116,26 @@ func parseProviders[T any](providersGetter interface { ver := strings.TrimSpace(nVerOrC[1]) if _, err := validationFn(ver); err != nil { // validation - merr = errors.Join(merr, fmt.Errorf("failed to parse version %s in the %s: %v", ver, v, err)) + merr = errors.Join(merr, fmt.Errorf("failed to parse %s in the %s: %v", ver, v, err)) continue } - n.VersionOrContraint = ver + n.VersionOrConstraint = ver pstatus = append(pstatus, n) } return pstatus, merr } -func getProvidersSpecStatusAnno(providersGetter interface { - GetSpecProviders() ProvidersTupled - GetStatusProviders() ProvidersTupled -}, typ providersType, -) (spec, status []ProviderTuple, annotation string) { +func getProvidersSpecAnno(providersGetter interface{ GetSpecProviders() ProvidersTupled }, typ providersType) (spec []ProviderTuple, annotation string) { switch typ { case bootstrapProvidersType: - return providersGetter.GetSpecProviders().BootstrapProviders, providersGetter.GetStatusProviders().BootstrapProviders, ChartAnnotationBootstrapProviders + return providersGetter.GetSpecProviders().BootstrapProviders, ChartAnnotationBootstrapProviders case controlPlaneProvidersType: - return providersGetter.GetSpecProviders().ControlPlaneProviders, providersGetter.GetStatusProviders().ControlPlaneProviders, ChartAnnotationControlPlaneProviders + return providersGetter.GetSpecProviders().ControlPlaneProviders, ChartAnnotationControlPlaneProviders case infrastructureProvidersType: - return providersGetter.GetSpecProviders().InfrastructureProviders, providersGetter.GetStatusProviders().InfrastructureProviders, ChartAnnotationInfraProviders + return providersGetter.GetSpecProviders().InfrastructureProviders, ChartAnnotationInfraProviders default: - return []ProviderTuple{}, []ProviderTuple{}, "" + return []ProviderTuple{}, "" } } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a0d3c518a..0060cf256 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -488,6 +488,7 @@ func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { *out = *in + in.Providers.DeepCopyInto(&out.Providers) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/go.mod b/go.mod index 7e6c43301..4bdaa60c8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,6 @@ require ( github.com/fluxcd/pkg/apis/meta v1.6.1 github.com/fluxcd/pkg/runtime v0.49.1 github.com/fluxcd/source-controller/api v1.4.1 - github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/onsi/ginkgo/v2 v2.20.2 @@ -72,6 +71,7 @@ require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index 24476c1ff..86a05fb4c 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -26,7 +26,6 @@ import ( fluxmeta "github.com/fluxcd/pkg/apis/meta" fluxconditions "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1" - "github.com/go-logr/logr" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" corev1 "k8s.io/api/core/v1" @@ -38,7 +37,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" @@ -107,7 +105,7 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque if !managedCluster.DeletionTimestamp.IsZero() { l.Info("Deleting ManagedCluster") - return r.Delete(ctx, l, managedCluster) + return r.Delete(ctx, managedCluster) } if managedCluster.Status.ObservedGeneration == 0 { @@ -121,10 +119,13 @@ func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque l.Error(err, "Failed to track ManagedCluster creation") } } - return r.Update(ctx, l, managedCluster) + + return r.Update(ctx, managedCluster) } -func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (bool, error) { +func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Context, managedCluster *hmc.ManagedCluster) (requeue bool, _ error) { + l := ctrl.LoggerFrom(ctx) + resourceID := schema.GroupVersionResource{ Group: "cluster.x-k8s.io", Version: "v1beta1", @@ -181,7 +182,9 @@ func (r *ManagedClusterReconciler) setStatusFromClusterStatus(ctx context.Contex return !allConditionsComplete, nil } -func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (result ctrl.Result, err error) { +func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *hmc.ManagedCluster) (result ctrl.Result, err error) { + l := ctrl.LoggerFrom(ctx) + finalizersUpdated := controllerutil.AddFinalizer(managedCluster, hmc.ManagedClusterFinalizer) if finalizersUpdated { if err := r.Client.Update(ctx, managedCluster); err != nil { @@ -214,6 +217,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) return ctrl.Result{}, err } + if !template.Status.Valid { errMsg := "provided template is not marked as valid" apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ @@ -224,12 +228,17 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) return ctrl.Result{}, errors.New(errMsg) } + // template is ok, propagate data from it + managedCluster.Status.KubertenesVersion = template.Status.KubertenesVersion + managedCluster.Status.Providers = template.Status.Providers + apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, Status: metav1.ConditionTrue, Reason: hmc.SucceededReason, Message: "Template is valid", }) + source, err := r.getSource(ctx, template.Status.ChartRef) if err != nil { apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ @@ -347,7 +356,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, l logr.Logger, ma }) } - requeue, err := r.setStatusFromClusterStatus(ctx, l, managedCluster) + requeue, err := r.setStatusFromClusterStatus(ctx, managedCluster) if err != nil { if requeue { return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, err @@ -387,7 +396,7 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M } tmpl := &hmc.ServiceTemplate{} - tmplRef := types.NamespacedName{Name: svc.Template, Namespace: mc.Namespace} + tmplRef := client.ObjectKey{Name: svc.Template, Namespace: mc.Namespace} if err := r.Get(ctx, tmplRef, tmpl); err != nil { return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err) } @@ -427,7 +436,7 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M }) } - if _, err := sveltos.ReconcileProfile(ctx, r.Client, l, mc.Namespace, mc.Name, + if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, map[string]string{ hmc.FluxHelmChartNamespaceKey: mc.Namespace, hmc.FluxHelmChartNameKey: mc.Name, @@ -457,14 +466,14 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M // getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate. // It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository. func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) { - tmplRef := types.NamespacedName{Namespace: tmpl.Namespace, Name: tmpl.Name} + tmplRef := client.ObjectKey{Namespace: tmpl.Namespace, Name: tmpl.Name} if tmpl.Status.ChartRef == nil { return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String()) } hc := &sourcev1.HelmChart{} - if err := r.Get(ctx, types.NamespacedName{ + if err := r.Get(ctx, client.ObjectKey{ Namespace: tmpl.Status.ChartRef.Namespace, Name: tmpl.Status.ChartRef.Name, }, hc); err != nil { @@ -472,7 +481,7 @@ func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, } repo := &sourcev1.HelmRepository{} - if err := r.Get(ctx, types.NamespacedName{ + if err := r.Get(ctx, client.ObjectKey{ // Using chart's namespace because it's source // (helm repository in this case) should be within the same namespace. Namespace: hc.Namespace, @@ -552,7 +561,9 @@ func (r *ManagedClusterReconciler) getSource(ctx context.Context, ref *hcv2.Cros return &hc, nil } -func (r *ManagedClusterReconciler) Delete(ctx context.Context, l logr.Logger, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { +func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *hmc.ManagedCluster) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + hr := &hcv2.HelmRelease{} err := r.Get(ctx, client.ObjectKey{ Name: managedCluster.Name, @@ -632,7 +643,7 @@ func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNam template := &hmc.ClusterTemplate{} templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "namespace", templateNamespace, "name", templateName) + ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "template namespace", templateNamespace, "template name", templateName) return nil, err } diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index 8b1cf9799..e92b8eeab 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -143,7 +143,7 @@ func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template tem } err := helm.ReconcileHelmRepository(ctx, r.Client, defaultRepoName, namespace, r.DefaultRegistryConfig.HelmRepositorySpec()) if err != nil { - l.Error(err, "Failed to reconcile default HelmRepository", "namespace", template.GetNamespace()) + l.Error(err, "Failed to reconcile default HelmRepository") return ctrl.Result{}, err } } diff --git a/internal/controller/templatemanagement_controller.go b/internal/controller/templatemanagement_controller.go index 40f69a2b8..571418563 100644 --- a/internal/controller/templatemanagement_controller.go +++ b/internal/controller/templatemanagement_controller.go @@ -248,7 +248,7 @@ func (r *TemplateManagementReconciler) createTemplateChain(ctx context.Context, } return err } - l.Info(fmt.Sprintf("%s was successfully created", source.Kind()), "namespace", targetNamespace, "name", source.GetName()) + l.Info(fmt.Sprintf("%s was successfully created", source.Kind()), "target namespace", targetNamespace, "source name", source.GetName()) return nil } @@ -262,7 +262,7 @@ func (r *TemplateManagementReconciler) deleteTemplateChain(ctx context.Context, } return err } - l.Info(fmt.Sprintf("%s was successfully deleted", chain.Kind()), "namespace", chain.GetNamespace(), "name", chain.GetName()) + l.Info(fmt.Sprintf("%s was successfully deleted", chain.Kind()), "chain namespace", chain.GetNamespace(), "chain name", chain.GetName()) return nil } diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go index 500b40dab..7c4f60ccf 100644 --- a/internal/sveltos/profile.go +++ b/internal/sveltos/profile.go @@ -20,7 +20,6 @@ import ( "math" hmc "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/go-logr/logr" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" libsveltosv1beta1 "github.com/projectsveltos/libsveltos/api/v1beta1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -53,12 +52,13 @@ type HelmChartOpts struct { // ReconcileProfile reconciles a Sveltos Profile object. func ReconcileProfile(ctx context.Context, cl client.Client, - l logr.Logger, namespace string, name string, matchLabels map[string]string, opts ReconcileProfileOpts, ) (*sveltosv1beta1.Profile, error) { + l := ctrl.LoggerFrom(ctx) + cp := &sveltosv1beta1.Profile{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index 113a89f60..c4dd0ac21 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -18,8 +18,11 @@ import ( "context" "errors" "fmt" + "slices" "sort" + "github.com/Masterminds/semver/v3" + admissionv1 "k8s.io/api/admission/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -28,19 +31,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/Mirantis/hmc/api/v1alpha1" + hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" ) type ManagedClusterValidator struct { client.Client } -var errInvalidManagedCluster = errors.New("the ManagedCluster is invalid") +const invalidManagedClusterMsg = "the ManagedCluster is invalid" func (v *ManagedClusterValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { v.Client = mgr.GetClient() return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.ManagedCluster{}). + For(&hmcv1alpha1.ManagedCluster{}). WithValidator(v). WithDefaulter(v). Complete() @@ -53,38 +56,95 @@ var ( // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - managedCluster, ok := obj.(*v1alpha1.ManagedCluster) + managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } + return nil, nil } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, _ runtime.Object, newObj runtime.Object) (admission.Warnings, error) { - newManagedCluster, ok := newObj.(*v1alpha1.ManagedCluster) + newManagedCluster, ok := newObj.(*hmcv1alpha1.ManagedCluster) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", newObj)) } + template, err := v.getManagedClusterTemplate(ctx, newManagedCluster.Namespace, newManagedCluster.Spec.Template) if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return nil, fmt.Errorf("%s: %v", errInvalidManagedCluster, err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } + + if err := validateK8sCompatibility(ctx, v.Client, newManagedCluster); err != nil { + return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %v", err) + } + return nil, nil } +func validateK8sCompatibility(ctx context.Context, cl client.Client, mc *hmcv1alpha1.ManagedCluster) error { + if len(mc.Spec.Services) == 0 || mc.Status.KubertenesVersion == "" { + return nil + } + + svcTpls := new(hmcv1alpha1.ServiceTemplateList) + if err := cl.List(ctx, svcTpls, client.InNamespace(mc.Namespace)); err != nil { + return fmt.Errorf("failed to list ServiceTemplates in %s namespace: %w", mc.Namespace, err) + } + + svcTplName2KConstraint := make(map[string]string, len(svcTpls.Items)) + for _, v := range svcTpls.Items { + svcTplName2KConstraint[v.Name] = v.Status.KubertenesConstraint + } + + mcVersion, err := semver.NewVersion(mc.Status.KubertenesVersion) + if err != nil { // should never happen + return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", mc.Status.KubertenesVersion, mc.Namespace, mc.Name, err) + } + + for _, v := range mc.Spec.Services { + if v.Disable { + continue + } + + kc, ok := svcTplName2KConstraint[v.Template] + if !ok { + return fmt.Errorf("specified ServiceTemplate %s/%s is missing in the cluster", mc.Namespace, v.Template) + } + + if kc == "" { + continue + } + + tplConstraint, err := semver.NewConstraint(kc) + if err != nil { // should never happen + return fmt.Errorf("failed to parse k8s constrainted version %s of the ServiceTemplate %s/%s: %w", kc, mc.Namespace, v.Template, err) + } + + if !tplConstraint.Check(mcVersion) { + return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrainted version %s from the ServiceTemplate %s/%s", + mc.Status.KubertenesVersion, mc.Namespace, mc.Name, + kc, mc.Namespace, v.Template) + } + } + + return nil +} + // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { return nil, nil @@ -92,94 +152,168 @@ func (*ManagedClusterValidator) ValidateDelete(_ context.Context, _ runtime.Obje // Default implements webhook.Defaulter so a webhook will be registered for the type. func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Object) error { - managedCluster, ok := obj.(*v1alpha1.ManagedCluster) + managedCluster, ok := obj.(*hmcv1alpha1.ManagedCluster) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected ManagedCluster but got a %T", obj)) } - // Only apply defaults when there's no configuration provided - if managedCluster.Spec.Config != nil { + // Only apply defaults when there's no configuration provided; + // if template ref is empty, then nothing to default + if managedCluster.Spec.Config != nil || managedCluster.Spec.Template == "" { return nil } + template, err := v.getManagedClusterTemplate(ctx, managedCluster.Namespace, managedCluster.Spec.Template) if err != nil { - return fmt.Errorf("could not get template for the managedcluster: %s", err) + return fmt.Errorf("could not get template for the managedcluster: %v", err) } - err = v.isTemplateValid(ctx, template) - if err != nil { - return fmt.Errorf("template is invalid: %s", err) + + if err := v.isTemplateValid(ctx, template); err != nil { + return fmt.Errorf("template is invalid: %v", err) } + if template.Status.Config == nil { return nil } + managedCluster.Spec.DryRun = true managedCluster.Spec.Config = &apiextensionsv1.JSON{Raw: template.Status.Config.Raw} + return nil } -func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (*v1alpha1.ClusterTemplate, error) { - template := &v1alpha1.ClusterTemplate{} - templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} - if err := v.Get(ctx, templateRef, template); err != nil { - return nil, err - } - return template, nil +func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, templateNamespace, templateName string) (tpl *hmcv1alpha1.ClusterTemplate, err error) { + tpl = new(hmcv1alpha1.ClusterTemplate) + return tpl, v.Get(ctx, client.ObjectKey{Namespace: templateNamespace, Name: templateName}, tpl) } -func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *v1alpha1.ClusterTemplate) error { +func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { if !template.Status.Valid { return fmt.Errorf("the template is not valid: %s", template.Status.ValidationError) } - err := v.verifyProviders(ctx, template) - if err != nil { - return fmt.Errorf("providers verification failed: %v", err) + + if err := v.verifyProviders(ctx, template); err != nil { + return fmt.Errorf("failed to verify providers: %v", err) } + return nil } -func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *v1alpha1.ClusterTemplate) error { - requiredProviders := template.Status.Providers - management := &v1alpha1.Management{} - managementRef := client.ObjectKey{Name: v1alpha1.ManagementName} - if err := v.Get(ctx, managementRef, management); err != nil { +func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { + management := new(hmcv1alpha1.Management) + if err := v.Get(ctx, client.ObjectKey{Name: hmcv1alpha1.ManagementName}, management); err != nil { return err } - exposedProviders := management.Status.AvailableProviders - missingProviders := make(map[string][]string) - missingProviders["bootstrap"] = getMissingProviders(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) - missingProviders["control plane"] = getMissingProviders(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) - missingProviders["infrastructure"] = getMissingProviders(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + const ( + bootstrapProviderType = "bootstrap" + controlPlateProviderType = "control plane" + infraProviderType = "infrastructure" + ) - var errs []error - for providerType, missing := range missingProviders { - if len(missing) > 0 { - sort.Slice(missing, func(i, j int) bool { - return missing[i] < missing[j] - }) - errs = append(errs, fmt.Errorf("one or more required %s providers are not deployed yet: %v", providerType, missing)) + var ( + exposedProviders = management.Status.AvailableProviders + requiredProviders = template.Status.Providers + + missingBootstrap, missingCP, missingInfra []string + wrongVersionProviders map[string][]string + ) + + // on update we have to validate versions between exact the provider tpl and constraints from the cluster tpl + if req, _ := admission.RequestFromContext(ctx); req.Operation == admissionv1.Update { + wrongVersionProviders = make(map[string][]string, 3) + missing, wrongVers, err := getMissingProvidersWithWrongVersions(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + if err != nil { + return err } + wrongVersionProviders[bootstrapProviderType], missingBootstrap = wrongVers, missing + + missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + if err != nil { + return err + } + wrongVersionProviders[controlPlateProviderType], missingCP = wrongVers, missing + + missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + if err != nil { + return err + } + wrongVersionProviders[infraProviderType], missingInfra = wrongVers, missing + } else { + missingBootstrap = getMissingProviders(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + missingCP = getMissingProviders(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + missingInfra = getMissingProviders(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) } + + missingProviders := map[string][]string{ + bootstrapProviderType: missingBootstrap, + controlPlateProviderType: missingCP, + infraProviderType: missingInfra, + } + + errs := collectErrors(missingProviders, "one or more required %s providers are not deployed yet: %v") + errs = append(errs, collectErrors(wrongVersionProviders, "one or more required %s providers does not satisfy constraints: %v")...) if len(errs) > 0 { sort.Slice(errs, func(i, j int) bool { return errs[i].Error() < errs[j].Error() }) + return errors.Join(errs...) } + return nil } -func getMissingProviders(exposedProviders, requiredProviders []v1alpha1.ProviderTuple) (missing []string) { - exposedSet := make(map[string]struct{}, len(requiredProviders)) - for _, v := range exposedProviders { - exposedSet[v.Name] = struct{}{} +func collectErrors(m map[string][]string, msgFormat string) (errs []error) { + for providerType, missing := range m { + if len(missing) > 0 { + slices.Sort(missing) + errs = append(errs, fmt.Errorf(msgFormat, providerType, missing)) + } + } + + return errs +} + +func getMissingProviders(exposed, required []hmcv1alpha1.ProviderTuple) (missing []string) { + missing, _, _ = getMissingProvidersWithWrongVersions(exposed, required) + return missing +} + +func getMissingProvidersWithWrongVersions(exposed, required []hmcv1alpha1.ProviderTuple) (missing, nonSatisfying []string, _ error) { + exposedSet := make(map[string]hmcv1alpha1.ProviderTuple, len(exposed)) + for _, v := range exposed { + exposedSet[v.Name] = v } - for _, v := range requiredProviders { - if _, ok := exposedSet[v.Name]; !ok { - missing = append(missing, v.Name) + var merr error + for _, reqWithConstraint := range required { + exposedWithExactVer, ok := exposedSet[reqWithConstraint.Name] + if !ok { + missing = append(missing, reqWithConstraint.Name) + continue + } + + if exposedWithExactVer.VersionOrConstraint == "" || reqWithConstraint.VersionOrConstraint == "" { + continue + } + + exactVer, err := semver.NewVersion(exposedWithExactVer.VersionOrConstraint) + if err != nil { + merr = errors.Join(merr, fmt.Errorf("failed to parse version %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) + continue + } + + requiredC, err := semver.NewConstraint(reqWithConstraint.VersionOrConstraint) + if err != nil { + merr = errors.Join(merr, fmt.Errorf("failed to parse constraint %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) + continue + } + + if !requiredC.Check(exactVer) { + nonSatisfying = append(nonSatisfying, fmt.Sprintf("%s %s !~ %s", reqWithConstraint.Name, exposedWithExactVer.VersionOrConstraint, reqWithConstraint.VersionOrConstraint)) } } - return missing + return missing, nonSatisfying, merr } diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go index 32557cfea..e5945223e 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -20,6 +20,7 @@ import ( "testing" . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -57,7 +58,7 @@ var ( }, { name: "should fail if the ClusterTemplate is not found in the ManagedCluster's namespace", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -69,7 +70,7 @@ var ( }, { name: "should fail if the cluster template was found but is invalid (some validation error)", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -84,7 +85,7 @@ var ( }, { name: "should fail if one or more requested providers are not available yet", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ management.NewManagement( management.WithAvailableProviders(v1alpha1.ProvidersTupled{ @@ -102,11 +103,11 @@ var ( template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: "the ManagedCluster is invalid: providers verification failed: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", + err: "the ManagedCluster is invalid: failed to verify providers: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", }, { name: "should succeed", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -126,7 +127,11 @@ var ( func TestManagedClusterValidateCreate(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }) for _, tt := range createAndUpdateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() @@ -140,11 +145,8 @@ func TestManagedClusterValidateCreate(t *testing.T) { } else { g.Expect(err).To(Succeed()) } - if len(tt.warnings) > 0 { - g.Expect(warn).To(Equal(tt.warnings)) - } else { - g.Expect(warn).To(BeEmpty()) - } + + g.Expect(warn).To(Equal(tt.warnings)) }) } } @@ -152,8 +154,81 @@ func TestManagedClusterValidateCreate(t *testing.T) { func TestManagedClusterValidateUpdate(t *testing.T) { g := NewWithT(t) - ctx := context.Background() - for _, tt := range createAndUpdateTests { + updateTests := append(createAndUpdateTests[:0:0], createAndUpdateTests...) + updateTests = append(updateTests, []struct { + name string + managedCluster *v1alpha1.ManagedCluster + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "provider template versions does not satisfy cluster template constraints", + managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + existingObjects: []runtime.Object{ + management.NewManagement(management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: "v1.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + })), + template.NewClusterTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: ">=999.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, + }), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + }, + err: `the ManagedCluster is invalid: failed to verify providers: one or more required bootstrap providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] +one or more required control plane providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] +one or more required infrastructure providers does not satisfy constraints: [aws v1.0.0 !~ >=999.0.0]`, + }, + { + name: "cluster template k8s version does not satisfy service template constraints", + managedCluster: managedcluster.NewManagedCluster( + managedcluster.WithClusterTemplate(testTemplateName), + managedcluster.WithK8sVersionStatus("v1.30.0"), + managedcluster.WithServiceTemplate(testTemplateName), + ), + existingObjects: []runtime.Object{ + management.NewManagement(management.WithAvailableProviders(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: "v1.0.0"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, + })), + template.NewClusterTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.ProvidersTupled{ + InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, + BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, + }), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + template.NewServiceTemplate( + template.WithName(testTemplateName), + template.WithProvidersStatus(v1alpha1.Providers{ + InfrastructureProviders: []string{"aws"}, + BootstrapProviders: []string{"k0s"}, + ControlPlaneProviders: []string{"k0s"}, + }), + template.WithServiceK8sConstraint("<1.30"), + template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + ), + }, + err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ManagedCluster default/managedcluster does not satisfy constrainted version <1.30 from the ServiceTemplate default/%s`, testTemplateName), + warnings: admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, + }, + }...) + + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + }, + }) + for _, tt := range updateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() validator := &ManagedClusterValidator{Client: c} @@ -166,11 +241,8 @@ func TestManagedClusterValidateUpdate(t *testing.T) { } else { g.Expect(err).To(Succeed()) } - if len(tt.warnings) > 0 { - g.Expect(warn).To(Equal(tt.warnings)) - } else { - g.Expect(warn).To(BeEmpty()) - } + + g.Expect(warn).To(Equal(tt.warnings)) }) } } @@ -196,8 +268,8 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should not set defaults: template is invalid", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -212,8 +284,8 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should not set defaults: config in template status is unset", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), - output: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), + output: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), existingObjects: []runtime.Object{ mgmt, template.NewClusterTemplate( @@ -224,9 +296,9 @@ func TestManagedClusterDefault(t *testing.T) { }, { name: "should set defaults", - input: managedcluster.NewManagedCluster(managedcluster.WithTemplate(testTemplateName)), + input: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), output: managedcluster.NewManagedCluster( - managedcluster.WithTemplate(testTemplateName), + managedcluster.WithClusterTemplate(testTemplateName), managedcluster.WithConfig(managedClusterConfig), managedcluster.WithDryRun(true), ), diff --git a/internal/webhook/template_webhook.go b/internal/webhook/template_webhook.go index 298aee065..a57343ef7 100644 --- a/internal/webhook/template_webhook.go +++ b/internal/webhook/template_webhook.go @@ -20,7 +20,6 @@ import ( "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -68,13 +67,10 @@ func (v *ClusterTemplateValidator) ValidateDelete(ctx context.Context, obj runti } managedClusters := &v1alpha1.ManagedClusterList{} - listOptions := client.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{v1alpha1.TemplateKey: template.Name}), - Limit: 1, - Namespace: template.Namespace, - } - err := v.Client.List(ctx, managedClusters, &listOptions) - if err != nil { + if err := v.Client.List(ctx, managedClusters, + client.InNamespace(template.Namespace), + client.MatchingFields{v1alpha1.TemplateKey: template.Name}, + client.Limit(1)); err != nil { return nil, err } @@ -126,11 +122,10 @@ func (v *ServiceTemplateValidator) ValidateDelete(ctx context.Context, obj runti } managedClusters := &v1alpha1.ManagedClusterList{} - if err := v.Client.List(ctx, managedClusters, &client.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{v1alpha1.ServicesTemplateKey: tmpl.Name}), - Limit: 1, - Namespace: tmpl.Namespace, - }); err != nil { + if err := v.Client.List(ctx, managedClusters, + client.InNamespace(tmpl.Namespace), + client.MatchingFields{v1alpha1.ServicesTemplateKey: tmpl.Name}, + client.Limit(1)); err != nil { return nil, err } diff --git a/internal/webhook/template_webhook_test.go b/internal/webhook/template_webhook_test.go index 9db759593..44938a4cc 100644 --- a/internal/webhook/template_webhook_test.go +++ b/internal/webhook/template_webhook_test.go @@ -47,7 +47,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { template: tpl, existingObjects: []runtime.Object{managedcluster.NewManagedCluster( managedcluster.WithNamespace(namespace), - managedcluster.WithTemplate(tpl.Name), + managedcluster.WithClusterTemplate(tpl.Name), )}, warnings: admission.Warnings{"The ClusterTemplate object can't be removed if ManagedCluster objects referencing it still exist"}, err: "template deletion is forbidden", @@ -57,7 +57,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { template: tpl, existingObjects: []runtime.Object{managedcluster.NewManagedCluster( managedcluster.WithNamespace("new"), - managedcluster.WithTemplate(tpl.Name), + managedcluster.WithClusterTemplate(tpl.Name), )}, }, { @@ -68,7 +68,7 @@ func TestClusterTemplateValidateDelete(t *testing.T) { { name: "should succeed", template: template.NewClusterTemplate(), - existingObjects: []runtime.Object{managedcluster.NewManagedCluster(managedcluster.WithTemplate(tplTest.Name))}, + existingObjects: []runtime.Object{managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(tplTest.Name))}, }, } diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index 4dc8aadd6..91bcd8de5 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -122,7 +122,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -138,7 +138,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -154,7 +154,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -230,7 +230,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -246,7 +246,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -262,7 +262,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 37d55cab1..e1fa118ea 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -63,6 +63,7 @@ spec: the template and DryRun will be enabled. x-kubernetes-preserve-unknown-fields: true credential: + description: Name reference to the related Credentials object. type: string dryRun: description: DryRun specifies whether the template should be applied @@ -191,12 +192,66 @@ spec: k8sVersion: description: |- Currently compatible K8S version of the cluster. Being set only if - the corresponding ClusterTemplate provided it in the spec. + provided by the corresponding ClusterTemplate. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer + providers: + description: |- + Providers represent exposed CAPI providers with constrainted compatibility versions set. + Propagated from the corresponding ClusterTemplate. + properties: + bootstrap: + description: List of CAPI bootstrap providers with either an exact + or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + controlPlane: + description: List of CAPI control plane providers with either + an exact or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + infrastructure: + description: List of CAPI infrastructure providers with either + an exact or constrainted version in the SemVer format. + items: + description: Represents name of the provider with either an + exact or constrainted version in the SemVer format. + properties: + name: + description: Name of the provider. + type: string + versionOrConstraint: + description: Compatibility restriction in the SemVer format + (exact or constrainted version) + type: string + type: object + type: array + type: object type: object type: object served: true diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml index 04c98a061..5f9d66007 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml @@ -126,7 +126,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -142,7 +142,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -158,7 +158,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index 8442a2e15..e00635e13 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -120,7 +120,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -136,7 +136,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -152,7 +152,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -227,7 +227,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -243,7 +243,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string @@ -259,7 +259,7 @@ spec: name: description: Name of the provider. type: string - versionOrContraint: + versionOrConstraint: description: Compatibility restriction in the SemVer format (exact or constrainted version) type: string diff --git a/test/managedcluster/vsphere/vsphere.go b/test/managedcluster/vsphere/vsphere.go index 1d9b3f4eb..620a42cfb 100644 --- a/test/managedcluster/vsphere/vsphere.go +++ b/test/managedcluster/vsphere/vsphere.go @@ -89,7 +89,7 @@ func CreateClusterIdentity(kc *kubeclient.KubeClient, secretName string, identit result, err := client.Resource(gvr).Create(ctx, clusterIdentity, metav1.CreateOptions{}) if err != nil { - fmt.Printf("%+v", result) //nolint:revive // false-positive + fmt.Printf("%+v", result) return fmt.Errorf("failed to create vsphereclusteridentity: %w", err) } diff --git a/test/objects/managedcluster/managedcluster.go b/test/objects/managedcluster/managedcluster.go index 377d20ec6..15a7d1525 100644 --- a/test/objects/managedcluster/managedcluster.go +++ b/test/objects/managedcluster/managedcluster.go @@ -60,12 +60,18 @@ func WithDryRun(dryRun bool) Opt { } } -func WithTemplate(templateName string) Opt { +func WithClusterTemplate(templateName string) Opt { return func(p *v1alpha1.ManagedCluster) { p.Spec.Template = templateName } } +func WithK8sVersionStatus(v string) Opt { + return func(managedCluster *v1alpha1.ManagedCluster) { + managedCluster.Status.KubertenesVersion = v + } +} + func WithConfig(config string) Opt { return func(p *v1alpha1.ManagedCluster) { p.Spec.Config = &apiextensionsv1.JSON{ diff --git a/test/objects/template/template.go b/test/objects/template/template.go index 6fa1e5bd8..52fcf56fd 100644 --- a/test/objects/template/template.go +++ b/test/objects/template/template.go @@ -123,6 +123,17 @@ func WithHelmSpec(helmSpec v1alpha1.HelmSpec) Opt { } } +func WithServiceK8sConstraint(v string) Opt { + return func(template Template) { + switch tt := template.(type) { + case *v1alpha1.ServiceTemplate: + tt.Status.KubertenesConstraint = v + default: + panic(fmt.Sprintf("unexpected obj typed %T, expected *ServiceTemplate", tt)) + } + } +} + func WithValidationStatus(validationStatus v1alpha1.TemplateValidationStatus) Opt { return func(t Template) { status := t.GetCommonStatus() From 545f2babb9ae468d068e0fbf2d99536ae5b655ad Mon Sep 17 00:00:00 2001 From: zerospiel Date: Tue, 8 Oct 2024 15:17:25 +0200 Subject: [PATCH 09/29] Addressed comments on compatibility attrs * amends to descs and fix typos * correctly parse providers * changed providers anno separator * enforce CAPI version check in providertemplates * amends to the API regarding CAPI version compatibility --- api/v1alpha1/clustertemplate_types.go | 21 ++- api/v1alpha1/common.go | 16 +- api/v1alpha1/managedcluster_types.go | 7 +- api/v1alpha1/providertemplate_types.go | 70 ++++++-- api/v1alpha1/servicetemplate_types.go | 26 ++- api/v1alpha1/templates_common.go | 13 +- api/v1alpha1/zz_generated.deepcopy.go | 1 - .../controller/managedcluster_controller.go | 3 +- internal/controller/release_controller.go | 8 +- internal/controller/template_controller.go | 6 +- internal/helm/release.go | 2 +- internal/webhook/managedcluster_webhook.go | 75 +++----- .../webhook/managedcluster_webhook_test.go | 80 ++++----- internal/webhook/management_webhook.go | 80 ++++++++- internal/webhook/management_webhook_test.go | 165 ++++++++++++++++-- .../hmc.mirantis.com_clustertemplates.yaml | 92 +++++----- .../hmc.mirantis.com_managedclusters.yaml | 56 +----- .../crds/hmc.mirantis.com_managements.yaml | 36 ++-- .../hmc.mirantis.com_providertemplates.yaml | 105 +++++++---- .../hmc.mirantis.com_servicetemplates.yaml | 7 +- templates/provider/k0smotron/Chart.yaml | 2 +- test/kubeclient/kubeclient.go | 4 +- test/objects/managedcluster/managedcluster.go | 6 - test/objects/management/management.go | 6 + test/objects/release/release.go | 70 ++++++++ test/objects/template/template.go | 35 +++- 26 files changed, 654 insertions(+), 338 deletions(-) create mode 100644 test/objects/release/release.go diff --git a/api/v1alpha1/clustertemplate_types.go b/api/v1alpha1/clustertemplate_types.go index a10368b7b..024cc2d77 100644 --- a/api/v1alpha1/clustertemplate_types.go +++ b/api/v1alpha1/clustertemplate_types.go @@ -31,17 +31,20 @@ const ( // ClusterTemplateSpec defines the desired state of ClusterTemplate type ClusterTemplateSpec struct { Helm HelmSpec `json:"helm"` - // Compatible K8S version of the cluster set in the SemVer format. - KubertenesVersion string `json:"k8sVersion,omitempty"` - // Providers represent required CAPI providers with constrainted compatibility versions set. Should be set if not present in the Helm chart metadata. + // Kubernetes exact version in the SemVer format provided by this ClusterTemplate. + KubernetesVersion string `json:"k8sVersion,omitempty"` + // Providers represent required CAPI providers with constrained compatibility versions set. + // Should be set if not present in the Helm chart metadata. + // Compatibility attributes are optional to be defined. Providers ProvidersTupled `json:"providers,omitempty"` } // ClusterTemplateStatus defines the observed state of ClusterTemplate type ClusterTemplateStatus struct { - // Compatible K8S version of the cluster set in the SemVer format. - KubertenesVersion string `json:"k8sVersion,omitempty"` - // Providers represent exposed CAPI providers with constrainted compatibility versions set. + // Kubernetes exact version in the SemVer format provided by this ClusterTemplate. + KubernetesVersion string `json:"k8sVersion,omitempty"` + // Providers represent required CAPI providers with constrained compatibility versions set + // if the latter has been given. Providers ProvidersTupled `json:"providers,omitempty"` TemplateStatusCommon `json:",inline"` @@ -67,8 +70,8 @@ func (t *ClusterTemplate) FillStatusWithProviders(annotations map[string]string) } kversion := annotations[ChartAnnotationKubernetesVersion] - if t.Spec.KubertenesVersion != "" { - kversion = t.Spec.KubertenesVersion + if t.Spec.KubernetesVersion != "" { + kversion = t.Spec.KubernetesVersion } if kversion == "" { return nil @@ -78,7 +81,7 @@ func (t *ClusterTemplate) FillStatusWithProviders(annotations map[string]string) return fmt.Errorf("failed to parse kubernetes version %s: %w", kversion, err) } - t.Status.KubertenesVersion = kversion + t.Status.KubernetesVersion = kversion return nil } diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index f100a4bfc..0a25b4a70 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -33,22 +33,26 @@ type ( } // Holds different types of CAPI providers with either - // an exact or constrainted version in the SemVer format. The requirement + // an exact or constrained version in the SemVer format. The requirement // is determined by a consumer of this type. ProvidersTupled struct { - // List of CAPI infrastructure providers with either an exact or constrainted version in the SemVer format. + // List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + // Compatibility attributes are optional to be defined. InfrastructureProviders []ProviderTuple `json:"infrastructure,omitempty"` - // List of CAPI bootstrap providers with either an exact or constrainted version in the SemVer format. + // List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + // Compatibility attributes are optional to be defined. BootstrapProviders []ProviderTuple `json:"bootstrap,omitempty"` - // List of CAPI control plane providers with either an exact or constrainted version in the SemVer format. + // List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + // Compatibility attributes are optional to be defined. ControlPlaneProviders []ProviderTuple `json:"controlPlane,omitempty"` } - // Represents name of the provider with either an exact or constrainted version in the SemVer format. + // Represents name of the provider with either an exact or constrained version in the SemVer format. ProviderTuple struct { // Name of the provider. Name string `json:"name,omitempty"` - // Compatibility restriction in the SemVer format (exact or constrainted version) + // Compatibility restriction in the SemVer format (exact or constrained version). + // Optional to be defined. VersionOrConstraint string `json:"versionOrConstraint,omitempty"` } ) diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 225ce290a..befb3122a 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -101,12 +101,9 @@ type ManagedClusterSpec struct { // ManagedClusterStatus defines the observed state of ManagedCluster type ManagedClusterStatus struct { - // Currently compatible K8S version of the cluster. Being set only if + // Currently compatible exact Kubernetes version of the cluster. Being set only if // provided by the corresponding ClusterTemplate. - KubertenesVersion string `json:"k8sVersion,omitempty"` - // Providers represent exposed CAPI providers with constrainted compatibility versions set. - // Propagated from the corresponding ClusterTemplate. - Providers ProvidersTupled `json:"providers,omitempty"` + KubernetesVersion string `json:"k8sVersion,omitempty"` // Conditions contains details for the current state of the ManagedCluster Conditions []metav1.Condition `json:"conditions,omitempty"` // ObservedGeneration is the last observed generation. diff --git a/api/v1alpha1/providertemplate_types.go b/api/v1alpha1/providertemplate_types.go index c1326b71c..b71f020cd 100644 --- a/api/v1alpha1/providertemplate_types.go +++ b/api/v1alpha1/providertemplate_types.go @@ -21,23 +21,39 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ChartAnnotationCAPIVersion is an annotation containing the CAPI exact version in the SemVer format associated with a ProviderTemplate. -const ChartAnnotationCAPIVersion = "hmc.mirantis.com/capi-version" +const ( + // ChartAnnotationCAPIVersion is an annotation containing the CAPI exact version in the SemVer format associated with a ProviderTemplate. + ChartAnnotationCAPIVersion = "hmc.mirantis.com/capi-version" + // ChartAnnotationCAPIVersionConstraint is an annotation containing the CAPI version constraint in the SemVer format associated with a ProviderTemplate. + ChartAnnotationCAPIVersionConstraint = "hmc.mirantis.com/capi-version-constraint" +) + +// +kubebuilder:validation:XValidation:rule="!(has(self.capiVersion) && has(self.capiVersionConstraint))", message="Either capiVersion or capiVersionConstraint may be set, but not both" // ProviderTemplateSpec defines the desired state of ProviderTemplate type ProviderTemplateSpec struct { - Helm HelmSpec `json:"helm"` - // Compatible CAPI provider version set in the SemVer format. + Helm HelmSpec `json:"helm,omitempty"` + // CAPI exact version in the SemVer format. + // Applicable only for the cluster-api ProviderTemplate itself. CAPIVersion string `json:"capiVersion,omitempty"` - // Represents required CAPI providers with exact compatibility versions set. Should be set if not present in the Helm chart metadata. + // CAPI version constraint in the SemVer format indicating compatibility with the core CAPI. + // Not applicable for the cluster-api ProviderTemplate. + CAPIVersionConstraint string `json:"capiVersionConstraint,omitempty"` + // Providers represent exposed CAPI providers with exact compatibility versions set. + // Should be set if not present in the Helm chart metadata. + // Compatibility attributes are optional to be defined. Providers ProvidersTupled `json:"providers,omitempty"` } // ProviderTemplateStatus defines the observed state of ProviderTemplate type ProviderTemplateStatus struct { - // Compatible CAPI provider version in the SemVer format. + // CAPI exact version in the SemVer format. + // Applicable only for the capi Template itself. CAPIVersion string `json:"capiVersion,omitempty"` - // Providers represent exposed CAPI providers with exact compatibility versions set. + // CAPI version constraint in the SemVer format indicating compatibility with the core CAPI. + CAPIVersionConstraint string `json:"capiVersionConstraint,omitempty"` + // Providers represent exposed CAPI providers with exact compatibility versions set + // if the latter has been given. Providers ProvidersTupled `json:"providers,omitempty"` TemplateStatusCommon `json:",inline"` @@ -62,19 +78,35 @@ func (t *ProviderTemplate) FillStatusWithProviders(annotations map[string]string return fmt.Errorf("failed to parse ProviderTemplate infrastructure providers: %v", err) } - capiVersion := annotations[ChartAnnotationCAPIVersion] - if t.Spec.CAPIVersion != "" { - capiVersion = t.Spec.CAPIVersion + if t.Name == CoreCAPIName { + capiVersion := annotations[ChartAnnotationCAPIVersion] + if t.Spec.CAPIVersion != "" { + capiVersion = t.Spec.CAPIVersion + } + if capiVersion == "" { + return nil + } + + if _, err := semver.NewVersion(capiVersion); err != nil { + return fmt.Errorf("failed to parse CAPI version %s: %w", capiVersion, err) + } + + t.Status.CAPIVersion = capiVersion + } else { + capiConstraint := annotations[ChartAnnotationCAPIVersionConstraint] + if t.Spec.CAPIVersionConstraint != "" { + capiConstraint = t.Spec.CAPIVersionConstraint + } + if capiConstraint == "" { + return nil + } + + if _, err := semver.NewConstraint(capiConstraint); err != nil { + return fmt.Errorf("failed to parse CAPI version constraint %s: %w", capiConstraint, err) + } + + t.Status.CAPIVersionConstraint = capiConstraint } - if capiVersion == "" { - return nil - } - - if _, err := semver.NewVersion(capiVersion); err != nil { - return fmt.Errorf("failed to parse CAPI version %s: %w", capiVersion, err) - } - - t.Status.CAPIVersion = capiVersion return nil } diff --git a/api/v1alpha1/servicetemplate_types.go b/api/v1alpha1/servicetemplate_types.go index bf2890ccd..540c33eb6 100644 --- a/api/v1alpha1/servicetemplate_types.go +++ b/api/v1alpha1/servicetemplate_types.go @@ -25,7 +25,7 @@ import ( const ( // Denotes the servicetemplate resource Kind. ServiceTemplateKind = "ServiceTemplate" - // ChartAnnotationKubernetesConstraint is an annotation containing the Kubernetes constrainted version in the SemVer format associated with a ServiceTemplate. + // ChartAnnotationKubernetesConstraint is an annotation containing the Kubernetes constrained version in the SemVer format associated with a ServiceTemplate. ChartAnnotationKubernetesConstraint = "hmc.mirantis.com/k8s-version-constraint" ) @@ -33,16 +33,17 @@ const ( type ServiceTemplateSpec struct { Helm HelmSpec `json:"helm"` // Constraint describing compatible K8S versions of the cluster set in the SemVer format. - KubertenesConstraint string `json:"k8sConstraint,omitempty"` - // Represents required CAPI providers. Should be set if not present in the Helm chart metadata. + KubernetesConstraint string `json:"k8sConstraint,omitempty"` + // Providers represent requested CAPI providers. + // Should be set if not present in the Helm chart metadata. Providers Providers `json:"providers,omitempty"` } // ServiceTemplateStatus defines the observed state of ServiceTemplate type ServiceTemplateStatus struct { // Constraint describing compatible K8S versions of the cluster set in the SemVer format. - KubertenesConstraint string `json:"k8sConstraint,omitempty"` - // Represents exposed CAPI providers. + KubernetesConstraint string `json:"k8sConstraint,omitempty"` + // Providers represent requested CAPI providers. Providers Providers `json:"providers,omitempty"` TemplateStatusCommon `json:",inline"` @@ -65,17 +66,14 @@ func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) pspec, anno = t.Spec.Providers.InfrastructureProviders, ChartAnnotationInfraProviders } - if len(pspec) > 0 { - return pspec - } - providers := annotations[anno] if len(providers) == 0 { - return []string{} + return pspec } - splitted := strings.Split(providers, ",") + splitted := strings.Split(providers, multiProviderSeparator) result := make([]string, 0, len(splitted)) + result = append(result, pspec...) for _, v := range splitted { if c := strings.TrimSpace(v); c != "" { result = append(result, c) @@ -90,8 +88,8 @@ func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) t.Status.Providers.InfrastructureProviders = parseProviders(infrastructureProvidersType) kconstraint := annotations[ChartAnnotationKubernetesConstraint] - if t.Spec.KubertenesConstraint != "" { - kconstraint = t.Spec.KubertenesConstraint + if t.Spec.KubernetesConstraint != "" { + kconstraint = t.Spec.KubernetesConstraint } if kconstraint == "" { return nil @@ -101,7 +99,7 @@ func (t *ServiceTemplate) FillStatusWithProviders(annotations map[string]string) return fmt.Errorf("failed to parse kubernetes constraint %s: %w", kconstraint, err) } - t.Status.KubertenesConstraint = kconstraint + t.Status.KubernetesConstraint = kconstraint return nil } diff --git a/api/v1alpha1/templates_common.go b/api/v1alpha1/templates_common.go index 36fd1f167..3f1abaf92 100644 --- a/api/v1alpha1/templates_common.go +++ b/api/v1alpha1/templates_common.go @@ -85,22 +85,23 @@ const ( infrastructureProvidersType ) +const multiProviderSeparator = ";" + func parseProviders[T any](providersGetter interface{ GetSpecProviders() ProvidersTupled }, typ providersType, annotations map[string]string, validationFn func(string) (T, error)) ([]ProviderTuple, error) { pspec, anno := getProvidersSpecAnno(providersGetter, typ) - if len(pspec) > 0 { - return pspec, nil - } providers := annotations[anno] if len(providers) == 0 { - return []ProviderTuple{}, nil + return pspec, nil } var ( - splitted = strings.Split(providers, ",") - pstatus = make([]ProviderTuple, 0, len(splitted)) + splitted = strings.Split(providers, multiProviderSeparator) + pstatus = make([]ProviderTuple, 0, len(splitted)+len(pspec)) merr error ) + pstatus = append(pstatus, pspec...) + for _, v := range splitted { v = strings.TrimSpace(v) nVerOrC := strings.SplitN(v, " ", 2) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0060cf256..a0d3c518a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -488,7 +488,6 @@ func (in *ManagedClusterSpec) DeepCopy() *ManagedClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedClusterStatus) DeepCopyInto(out *ManagedClusterStatus) { *out = *in - in.Providers.DeepCopyInto(&out.Providers) if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index 86a05fb4c..e29981f24 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -229,8 +229,7 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *h return ctrl.Result{}, errors.New(errMsg) } // template is ok, propagate data from it - managedCluster.Status.KubertenesVersion = template.Status.KubertenesVersion - managedCluster.Status.Providers = template.Status.Providers + managedCluster.Status.KubernetesVersion = template.Status.KubernetesVersion apimeta.SetStatusCondition(managedCluster.GetConditions(), metav1.Condition{ Type: hmc.TemplateReadyCondition, diff --git a/internal/controller/release_controller.go b/internal/controller/release_controller.go index b341d84fc..825126e29 100644 --- a/internal/controller/release_controller.go +++ b/internal/controller/release_controller.go @@ -51,14 +51,14 @@ type ReleaseReconciler struct { Config *rest.Config - CreateManagement bool - CreateRelease bool - CreateTemplates bool - HMCTemplatesChartName string SystemNamespace string DefaultRegistryConfig helm.DefaultRegistryConfig + + CreateManagement bool + CreateRelease bool + CreateTemplates bool } func (r *ReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index e92b8eeab..98374916d 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -39,11 +39,11 @@ const ( // TemplateReconciler reconciles a *Template object type TemplateReconciler struct { client.Client - SystemNamespace string - - DefaultRegistryConfig helm.DefaultRegistryConfig downloadHelmChartFunc func(context.Context, *sourcev1.Artifact) (*chart.Chart, error) + + SystemNamespace string + DefaultRegistryConfig helm.DefaultRegistryConfig } type ClusterTemplateReconciler struct { diff --git a/internal/helm/release.go b/internal/helm/release.go index e89df8182..dd63a666c 100644 --- a/internal/helm/release.go +++ b/internal/helm/release.go @@ -38,8 +38,8 @@ type ReconcileHelmReleaseOpts struct { OwnerReference *metav1.OwnerReference ChartRef *hcv2.CrossNamespaceSourceReference ReconcileInterval *time.Duration - DependsOn []meta.NamespacedObjectReference TargetNamespace string + DependsOn []meta.NamespacedObjectReference CreateNamespace bool } diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index c4dd0ac21..811765286 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -22,7 +22,6 @@ import ( "sort" "github.com/Masterminds/semver/v3" - admissionv1 "k8s.io/api/admission/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -70,6 +69,10 @@ func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtim return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } + if err := validateK8sCompatibility(ctx, v.Client, template, managedCluster); err != nil { + return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %v", err) + } + return nil, nil } @@ -89,16 +92,16 @@ func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, _ runtime. return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - if err := validateK8sCompatibility(ctx, v.Client, newManagedCluster); err != nil { + if err := validateK8sCompatibility(ctx, v.Client, template, newManagedCluster); err != nil { return admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, fmt.Errorf("failed to validate k8s compatibility: %v", err) } return nil, nil } -func validateK8sCompatibility(ctx context.Context, cl client.Client, mc *hmcv1alpha1.ManagedCluster) error { - if len(mc.Spec.Services) == 0 || mc.Status.KubertenesVersion == "" { - return nil +func validateK8sCompatibility(ctx context.Context, cl client.Client, template *hmcv1alpha1.ClusterTemplate, mc *hmcv1alpha1.ManagedCluster) error { + if len(mc.Spec.Services) == 0 || template.Status.KubernetesVersion == "" { + return nil // nothing to do } svcTpls := new(hmcv1alpha1.ServiceTemplateList) @@ -108,12 +111,12 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, mc *hmcv1al svcTplName2KConstraint := make(map[string]string, len(svcTpls.Items)) for _, v := range svcTpls.Items { - svcTplName2KConstraint[v.Name] = v.Status.KubertenesConstraint + svcTplName2KConstraint[v.Name] = v.Status.KubernetesConstraint } - mcVersion, err := semver.NewVersion(mc.Status.KubertenesVersion) + mcVersion, err := semver.NewVersion(template.Status.KubernetesVersion) if err != nil { // should never happen - return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", mc.Status.KubertenesVersion, mc.Namespace, mc.Name, err) + return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", template.Status.KubernetesVersion, mc.Namespace, mc.Name, err) } for _, v := range mc.Spec.Services { @@ -132,12 +135,12 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, mc *hmcv1al tplConstraint, err := semver.NewConstraint(kc) if err != nil { // should never happen - return fmt.Errorf("failed to parse k8s constrainted version %s of the ServiceTemplate %s/%s: %w", kc, mc.Namespace, v.Template, err) + return fmt.Errorf("failed to parse k8s constrained version %s of the ServiceTemplate %s/%s: %w", kc, mc.Namespace, v.Template, err) } if !tplConstraint.Check(mcVersion) { - return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrainted version %s from the ServiceTemplate %s/%s", - mc.Status.KubertenesVersion, mc.Namespace, mc.Name, + return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrained version %s from the ServiceTemplate %s/%s", + template.Status.KubernetesVersion, mc.Namespace, mc.Name, kc, mc.Namespace, v.Template) } } @@ -212,43 +215,26 @@ func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template ) var ( - exposedProviders = management.Status.AvailableProviders - requiredProviders = template.Status.Providers + exposedProviders = management.Status.AvailableProviders + requiredProviders = template.Status.Providers + wrongVersionProviders, missingProviders = make(map[string][]string, 3), make(map[string][]string, 3) - missingBootstrap, missingCP, missingInfra []string - wrongVersionProviders map[string][]string + err error ) - // on update we have to validate versions between exact the provider tpl and constraints from the cluster tpl - if req, _ := admission.RequestFromContext(ctx); req.Operation == admissionv1.Update { - wrongVersionProviders = make(map[string][]string, 3) - missing, wrongVers, err := getMissingProvidersWithWrongVersions(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) - if err != nil { - return err - } - wrongVersionProviders[bootstrapProviderType], missingBootstrap = wrongVers, missing - - missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) - if err != nil { - return err - } - wrongVersionProviders[controlPlateProviderType], missingCP = wrongVers, missing + missingProviders[bootstrapProviderType], wrongVersionProviders[bootstrapProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + if err != nil { + return err + } - missing, wrongVers, err = getMissingProvidersWithWrongVersions(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) - if err != nil { - return err - } - wrongVersionProviders[infraProviderType], missingInfra = wrongVers, missing - } else { - missingBootstrap = getMissingProviders(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) - missingCP = getMissingProviders(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) - missingInfra = getMissingProviders(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + missingProviders[controlPlateProviderType], wrongVersionProviders[controlPlateProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + if err != nil { + return err } - missingProviders := map[string][]string{ - bootstrapProviderType: missingBootstrap, - controlPlateProviderType: missingCP, - infraProviderType: missingInfra, + missingProviders[infraProviderType], wrongVersionProviders[infraProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + if err != nil { + return err } errs := collectErrors(missingProviders, "one or more required %s providers are not deployed yet: %v") @@ -275,11 +261,6 @@ func collectErrors(m map[string][]string, msgFormat string) (errs []error) { return errs } -func getMissingProviders(exposed, required []hmcv1alpha1.ProviderTuple) (missing []string) { - missing, _, _ = getMissingProvidersWithWrongVersions(exposed, required) - return missing -} - func getMissingProvidersWithWrongVersions(exposed, required []hmcv1alpha1.ProviderTuple) (missing, nonSatisfying []string, _ error) { exposedSet := make(map[string]hmcv1alpha1.ProviderTuple, len(exposed)) for _, v := range exposed { diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go index e5945223e..82c833dca 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -121,47 +121,6 @@ var ( ), }, }, - } -) - -func TestManagedClusterValidateCreate(t *testing.T) { - g := NewWithT(t) - - ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ - AdmissionRequest: admissionv1.AdmissionRequest{ - Operation: admissionv1.Create, - }, - }) - for _, tt := range createAndUpdateTests { - t.Run(tt.name, func(t *testing.T) { - c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() - validator := &ManagedClusterValidator{Client: c} - warn, err := validator.ValidateCreate(ctx, tt.managedCluster) - if tt.err != "" { - g.Expect(err).To(HaveOccurred()) - if err.Error() != tt.err { - t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) - } - } else { - g.Expect(err).To(Succeed()) - } - - g.Expect(warn).To(Equal(tt.warnings)) - }) - } -} - -func TestManagedClusterValidateUpdate(t *testing.T) { - g := NewWithT(t) - - updateTests := append(createAndUpdateTests[:0:0], createAndUpdateTests...) - updateTests = append(updateTests, []struct { - name string - managedCluster *v1alpha1.ManagedCluster - existingObjects []runtime.Object - err string - warnings admission.Warnings - }{ { name: "provider template versions does not satisfy cluster template constraints", managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), @@ -189,7 +148,6 @@ one or more required infrastructure providers does not satisfy constraints: [aws name: "cluster template k8s version does not satisfy service template constraints", managedCluster: managedcluster.NewManagedCluster( managedcluster.WithClusterTemplate(testTemplateName), - managedcluster.WithK8sVersionStatus("v1.30.0"), managedcluster.WithServiceTemplate(testTemplateName), ), existingObjects: []runtime.Object{ @@ -206,6 +164,7 @@ one or more required infrastructure providers does not satisfy constraints: [aws ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, }), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), + template.WithClusterStatusK8sVersion("v1.30.0"), ), template.NewServiceTemplate( template.WithName(testTemplateName), @@ -218,17 +177,48 @@ one or more required infrastructure providers does not satisfy constraints: [aws template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ManagedCluster default/managedcluster does not satisfy constrainted version <1.30 from the ServiceTemplate default/%s`, testTemplateName), + err: fmt.Sprintf(`failed to validate k8s compatibility: k8s version v1.30.0 of the ManagedCluster default/%s does not satisfy constrained version <1.30 from the ServiceTemplate default/%s`, managedcluster.DefaultName, testTemplateName), warnings: admission.Warnings{"Failed to validate k8s version compatibility with ServiceTemplates"}, }, - }...) + } +) + +func TestManagedClusterValidateCreate(t *testing.T) { + g := NewWithT(t) + + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }) + for _, tt := range createAndUpdateTests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() + validator := &ManagedClusterValidator{Client: c} + warn, err := validator.ValidateCreate(ctx, tt.managedCluster) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + if err.Error() != tt.err { + t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) + } + } else { + g.Expect(err).To(Succeed()) + } + + g.Expect(warn).To(Equal(tt.warnings)) + }) + } +} + +func TestManagedClusterValidateUpdate(t *testing.T) { + g := NewWithT(t) ctx := admission.NewContextWithRequest(context.Background(), admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ Operation: admissionv1.Update, }, }) - for _, tt := range updateTests { + for _, tt := range createAndUpdateTests { t.Run(tt.name, func(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() validator := &ManagedClusterValidator{Client: c} diff --git a/internal/webhook/management_webhook.go b/internal/webhook/management_webhook.go index f1784cd73..b0c0f1704 100644 --- a/internal/webhook/management_webhook.go +++ b/internal/webhook/management_webhook.go @@ -17,14 +17,17 @@ package webhook import ( "context" "errors" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Masterminds/semver/v3" + hmcv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" ) type ManagementValidator struct { @@ -36,7 +39,7 @@ var errManagementDeletionForbidden = errors.New("management deletion is forbidde func (v *ManagementValidator) SetupWebhookWithManager(mgr ctrl.Manager) error { v.Client = mgr.GetClient() return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.Management{}). + For(&hmcv1alpha1.Management{}). WithValidator(v). WithDefaulter(v). Complete() @@ -53,13 +56,82 @@ func (*ManagementValidator) ValidateCreate(_ context.Context, _ runtime.Object) } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (*ManagementValidator) ValidateUpdate(_ context.Context, _ runtime.Object, _ runtime.Object) (admission.Warnings, error) { +func (v *ManagementValidator) ValidateUpdate(ctx context.Context, _, newObj runtime.Object) (admission.Warnings, error) { + const invalidMgmtMsg = "the Management is invalid" + + mgmt, ok := newObj.(*hmcv1alpha1.Management) + if !ok { + return nil, apierrors.NewBadRequest(fmt.Sprintf("expected Management but got a %T", newObj)) + } + + release := new(hmcv1alpha1.Release) + if err := v.Get(ctx, client.ObjectKey{Name: mgmt.Spec.Release}, release); err != nil { + // TODO: probably we do not want this skip if extra checks will be introduced + if apierrors.IsNotFound(err) && (mgmt.Spec.Core == nil || mgmt.Spec.Core.CAPI.Template == "") { + return nil, nil // nothing to do + } + return nil, fmt.Errorf("failed to get Release %s: %w", mgmt.Spec.Release, err) + } + + capiTplName := release.Spec.CAPI.Template + if mgmt.Spec.Core != nil && mgmt.Spec.Core.CAPI.Template != "" { + capiTplName = mgmt.Spec.Core.CAPI.Template + } + + capiTpl := new(hmcv1alpha1.ProviderTemplate) + if err := v.Get(ctx, client.ObjectKey{Name: capiTplName}, capiTpl); err != nil { + return nil, fmt.Errorf("failed to get ProviderTemplate %s: %w", capiTplName, err) + } + + if capiTpl.Status.CAPIVersion == "" { + return nil, nil // nothing to validate against + } + + capiRequiredVersion, err := semver.NewVersion(capiTpl.Status.CAPIVersion) + if err != nil { // should never happen + return nil, fmt.Errorf("%s: invalid CAPI version %s in the ProviderTemplate %s to be validated against: %v", invalidMgmtMsg, capiTpl.Status.CAPIVersion, capiTpl.Name, err) + } + + var wrongVersions error + for _, p := range mgmt.Spec.Providers { + tplName := p.Template + if tplName == "" { + tplName = release.ProviderTemplate(p.Name) + } + + if tplName == capiTpl.Name { // skip capi itself + continue + } + + pTpl := new(hmcv1alpha1.ProviderTemplate) + if err := v.Get(ctx, client.ObjectKey{Name: tplName}, pTpl); err != nil { + return nil, fmt.Errorf("failed to get ProviderTemplate %s: %w", tplName, err) + } + + if pTpl.Status.CAPIVersionConstraint == "" { + continue + } + + constraint, err := semver.NewConstraint(pTpl.Status.CAPIVersionConstraint) + if err != nil { // should never happen + return nil, fmt.Errorf("%s: invalid CAPI version constraint %s in the ProviderTemplate %s: %v", invalidMgmtMsg, pTpl.Status.CAPIVersionConstraint, pTpl.Name, err) + } + + if !constraint.Check(capiRequiredVersion) { + wrongVersions = errors.Join(wrongVersions, fmt.Errorf("core CAPI version %s does not satisfy ProviderTemplate %s constraint %s", capiRequiredVersion, pTpl.Name, constraint)) + } + } + + if wrongVersions != nil { + return admission.Warnings{"The Management object has incompatible CAPI versions ProviderTemplates"}, fmt.Errorf("%s: %s", invalidMgmtMsg, wrongVersions) + } + return nil, nil } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. func (v *ManagementValidator) ValidateDelete(ctx context.Context, _ runtime.Object) (admission.Warnings, error) { - managedClusters := &v1alpha1.ManagedClusterList{} + managedClusters := &hmcv1alpha1.ManagedClusterList{} err := v.Client.List(ctx, managedClusters, client.Limit(1)) if err != nil { return nil, err diff --git a/internal/webhook/management_webhook_test.go b/internal/webhook/management_webhook_test.go index a8bd18036..13bb2942d 100644 --- a/internal/webhook/management_webhook_test.go +++ b/internal/webhook/management_webhook_test.go @@ -16,9 +16,11 @@ package webhook import ( "context" + "fmt" "testing" . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -26,13 +28,160 @@ import ( "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/test/objects/managedcluster" "github.com/Mirantis/hmc/test/objects/management" + "github.com/Mirantis/hmc/test/objects/release" + "github.com/Mirantis/hmc/test/objects/template" "github.com/Mirantis/hmc/test/scheme" ) +func TestManagementValidateUpdate(t *testing.T) { + g := NewWithT(t) + + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Operation: admissionv1.Update}}) + + const ( + versionOne = "1.0.0" + constraintVerOne, constraintVerTwo = "^1.0.0", "~2.0.0" + invalidVersion = "invalid-ver" + invalidConstraint = "invalid-constraint" + ) + + providerAwsDefaultTpl := v1alpha1.Provider{ + Name: "aws", + Component: v1alpha1.Component{ + Template: template.DefaultName, + }, + } + + tests := []struct { + name string + management *v1alpha1.Management + existingObjects []runtime.Object + err string + warnings admission.Warnings + }{ + { + name: "no release and no core capi tpl set, should succeed", + management: management.NewManagement(), + }, + { + name: "no capi providertemplate, should fail", + management: management.NewManagement(management.WithRelease(release.DefaultName)), + existingObjects: []runtime.Object{release.New()}, + err: fmt.Sprintf(`failed to get ProviderTemplate %s: providertemplates.hmc.mirantis.com "%s" not found`, release.DefaultCAPITemplateName, release.DefaultCAPITemplateName), + }, + { + name: "capi providertemplate without capi version set, should succeed", + management: management.NewManagement(management.WithRelease(release.DefaultName)), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate(template.WithName(release.DefaultCAPITemplateName)), + }, + }, + { + name: "capi providertemplate with wrong capi semver set, should fail", + management: management.NewManagement(management.WithRelease(release.DefaultName)), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate( + template.WithName(release.DefaultCAPITemplateName), + template.WithProviderStatusCAPIVersion(invalidVersion), + ), + }, + err: fmt.Sprintf("the Management is invalid: invalid CAPI version %s in the ProviderTemplate %s to be validated against: Invalid Semantic Version", invalidVersion, release.DefaultCAPITemplateName), + }, + { + name: "providertemplates without specified capi constraints, should succeed", + management: management.NewManagement( + management.WithRelease(release.DefaultName), + management.WithProviders([]v1alpha1.Provider{providerAwsDefaultTpl}), + ), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate( + template.WithName(release.DefaultCAPITemplateName), + template.WithProviderStatusCAPIVersion(versionOne), + ), + template.NewProviderTemplate(), + }, + }, + { + name: "providertemplates with invalid specified capi semver, should fail", + management: management.NewManagement( + management.WithRelease(release.DefaultName), + management.WithProviders([]v1alpha1.Provider{providerAwsDefaultTpl}), + ), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate( + template.WithName(release.DefaultCAPITemplateName), + template.WithProviderStatusCAPIVersion(versionOne), + ), + template.NewProviderTemplate( + template.WithProviderStatusCAPIConstraint(invalidConstraint), + ), + }, + err: fmt.Sprintf("the Management is invalid: invalid CAPI version constraint %s in the ProviderTemplate %s: improper constraint: %s", invalidConstraint, template.DefaultName, invalidConstraint), + }, + { + name: "providertemplates do not match capi version, should fail", + management: management.NewManagement( + management.WithRelease(release.DefaultName), + management.WithProviders([]v1alpha1.Provider{providerAwsDefaultTpl}), + ), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate( + template.WithName(release.DefaultCAPITemplateName), + template.WithProviderStatusCAPIVersion(versionOne), + ), + template.NewProviderTemplate( + template.WithProviderStatusCAPIConstraint(constraintVerTwo), + ), + }, + warnings: admission.Warnings{"The Management object has incompatible CAPI versions ProviderTemplates"}, + err: fmt.Sprintf("the Management is invalid: core CAPI version %s does not satisfy ProviderTemplate %s constraint %s", versionOne, template.DefaultName, constraintVerTwo), + }, + { + name: "providertemplates match capi version, should succeed", + management: management.NewManagement( + management.WithRelease(release.DefaultName), + management.WithProviders([]v1alpha1.Provider{providerAwsDefaultTpl}), + ), + existingObjects: []runtime.Object{ + release.New(), + template.NewProviderTemplate( + template.WithName(release.DefaultCAPITemplateName), + template.WithProviderStatusCAPIVersion(versionOne), + ), + template.NewProviderTemplate( + template.WithProviderStatusCAPIConstraint(constraintVerOne), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() + validator := &ManagementValidator{Client: c} + + warnings, err := validator.ValidateUpdate(ctx, nil, tt.management) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tt.err)) + } else { + g.Expect(err).To(Succeed()) + } + + g.Expect(warnings).To(Equal(tt.warnings)) + }) + } +} + func TestManagementValidateDelete(t *testing.T) { g := NewWithT(t) - ctx := context.Background() + ctx := admission.NewContextWithRequest(context.Background(), admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Operation: admissionv1.Delete}}) tests := []struct { name string @@ -55,23 +204,19 @@ func TestManagementValidateDelete(t *testing.T) { } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(_ *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.existingObjects...).Build() validator := &ManagementValidator{Client: c} + warn, err := validator.ValidateDelete(ctx, tt.management) if tt.err != "" { g.Expect(err).To(HaveOccurred()) - if err.Error() != tt.err { - t.Fatalf("expected error '%s', got error: %s", tt.err, err.Error()) - } + g.Expect(err).To(MatchError(tt.err)) } else { g.Expect(err).To(Succeed()) } - if len(tt.warnings) > 0 { - g.Expect(warn).To(Equal(tt.warnings)) - } else { - g.Expect(warn).To(BeEmpty()) - } + + g.Expect(warn).To(Equal(tt.warnings)) }) } } diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml index 91bcd8de5..51fc2e84e 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_clustertemplates.yaml @@ -104,59 +104,66 @@ spec: rule: (has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef)) k8sVersion: - description: Compatible K8S version of the cluster set in the SemVer - format. + description: Kubernetes exact version in the SemVer format provided + by this ClusterTemplate. type: string providers: - description: Providers represent required CAPI providers with constrainted - compatibility versions set. Should be set if not present in the - Helm chart metadata. + description: |- + Providers represent required CAPI providers with constrained compatibility versions set. + Should be set if not present in the Helm chart metadata. + Compatibility attributes are optional to be defined. properties: bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. + description: |- + List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array @@ -209,62 +216,69 @@ spec: description: Description contains information about the template. type: string k8sVersion: - description: Compatible K8S version of the cluster set in the SemVer - format. + description: Kubernetes exact version in the SemVer format provided + by this ClusterTemplate. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer providers: - description: Providers represent exposed CAPI providers with constrainted - compatibility versions set. + description: |- + Providers represent required CAPI providers with constrained compatibility versions set + if the latter has been given. properties: bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. + description: |- + List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index e1fa118ea..6c33ac0bb 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -191,67 +191,13 @@ spec: type: array k8sVersion: description: |- - Currently compatible K8S version of the cluster. Being set only if + Currently compatible exact Kubernetes version of the cluster. Being set only if provided by the corresponding ClusterTemplate. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer - providers: - description: |- - Providers represent exposed CAPI providers with constrainted compatibility versions set. - Propagated from the corresponding ClusterTemplate. - properties: - bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. - items: - description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. - properties: - name: - description: Name of the provider. - type: string - versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) - type: string - type: object - type: array - controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. - items: - description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. - properties: - name: - description: Name of the provider. - type: string - versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) - type: string - type: object - type: array - infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. - items: - description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. - properties: - name: - description: Name of the provider. - type: string - versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) - type: string - type: object - type: array - type: object type: object type: object served: true diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml index 5f9d66007..8ffcde13e 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managements.yaml @@ -117,50 +117,56 @@ spec: their exact compatibility versions if specified in ProviderTemplates on the Management cluster. properties: bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. + description: |- + List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml index e00635e13..39ddece49 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_providertemplates.yaml @@ -57,7 +57,14 @@ spec: description: ProviderTemplateSpec defines the desired state of ProviderTemplate properties: capiVersion: - description: Compatible CAPI provider version set in the SemVer format. + description: |- + CAPI exact version in the SemVer format. + Applicable only for the cluster-api ProviderTemplate itself. + type: string + capiVersionConstraint: + description: |- + CAPI version constraint in the SemVer format indicating compatibility with the core CAPI. + Not applicable for the cluster-api ProviderTemplate. type: string helm: description: HelmSpec references a Helm chart representing the HMC @@ -107,69 +114,84 @@ spec: rule: (has(self.chartName) && !has(self.chartRef)) || (!has(self.chartName) && has(self.chartRef)) providers: - description: Represents required CAPI providers with exact compatibility - versions set. Should be set if not present in the Helm chart metadata. + description: |- + Providers represent exposed CAPI providers with exact compatibility versions set. + Should be set if not present in the Helm chart metadata. + Compatibility attributes are optional to be defined. properties: bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. + description: |- + List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array type: object - required: - - helm type: object x-kubernetes-validations: - message: Spec is immutable rule: self == oldSelf + - message: Either capiVersion or capiVersionConstraint may be set, but + not both + rule: '!(has(self.capiVersion) && has(self.capiVersionConstraint))' status: description: ProviderTemplateStatus defines the observed state of ProviderTemplate properties: capiVersion: - description: Compatible CAPI provider version in the SemVer format. + description: |- + CAPI exact version in the SemVer format. + Applicable only for the capi Template itself. + type: string + capiVersionConstraint: + description: CAPI version constraint in the SemVer format indicating + compatibility with the core CAPI. type: string chartRef: description: |- @@ -214,54 +236,61 @@ spec: format: int64 type: integer providers: - description: Providers represent exposed CAPI providers with exact - compatibility versions set. + description: |- + Providers represent exposed CAPI providers with exact compatibility versions set + if the latter has been given. properties: bootstrap: - description: List of CAPI bootstrap providers with either an exact - or constrainted version in the SemVer format. + description: |- + List of CAPI bootstrap providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array controlPlane: - description: List of CAPI control plane providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI control plane providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array infrastructure: - description: List of CAPI infrastructure providers with either - an exact or constrainted version in the SemVer format. + description: |- + List of CAPI infrastructure providers with either an exact or constrained version in the SemVer format. + Compatibility attributes are optional to be defined. items: description: Represents name of the provider with either an - exact or constrainted version in the SemVer format. + exact or constrained version in the SemVer format. properties: name: description: Name of the provider. type: string versionOrConstraint: - description: Compatibility restriction in the SemVer format - (exact or constrainted version) + description: |- + Compatibility restriction in the SemVer format (exact or constrained version). + Optional to be defined. type: string type: object type: array diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml index 465f57233..967e71ac7 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_servicetemplates.yaml @@ -108,8 +108,9 @@ spec: cluster set in the SemVer format. type: string providers: - description: Represents required CAPI providers. Should be set if - not present in the Helm chart metadata. + description: |- + Providers represent requested CAPI providers. + Should be set if not present in the Helm chart metadata. properties: bootstrap: description: BootstrapProviders is the list of CAPI bootstrap @@ -186,7 +187,7 @@ spec: format: int64 type: integer providers: - description: Represents exposed CAPI providers. + description: Providers represent requested CAPI providers. properties: bootstrap: description: BootstrapProviders is the list of CAPI bootstrap diff --git a/templates/provider/k0smotron/Chart.yaml b/templates/provider/k0smotron/Chart.yaml index ab31a4d89..c453ca418 100644 --- a/templates/provider/k0smotron/Chart.yaml +++ b/templates/provider/k0smotron/Chart.yaml @@ -22,4 +22,4 @@ appVersion: "1.0.4" annotations: hmc.mirantis.com/infrastructure-providers: k0smotron hmc.mirantis.com/bootstrap-providers: k0s - hmc.mirantis.com/control-plane-providers: k0s,k0smotron + hmc.mirantis.com/control-plane-providers: k0s; k0smotron diff --git a/test/kubeclient/kubeclient.go b/test/kubeclient/kubeclient.go index 459e797a2..34edc413c 100644 --- a/test/kubeclient/kubeclient.go +++ b/test/kubeclient/kubeclient.go @@ -34,11 +34,11 @@ import ( ) type KubeClient struct { - Namespace string - Client kubernetes.Interface ExtendedClient apiextensionsclientset.Interface Config *rest.Config + + Namespace string } // NewFromLocal creates a new instance of KubeClient from a given namespace diff --git a/test/objects/managedcluster/managedcluster.go b/test/objects/managedcluster/managedcluster.go index 15a7d1525..4204576a6 100644 --- a/test/objects/managedcluster/managedcluster.go +++ b/test/objects/managedcluster/managedcluster.go @@ -66,12 +66,6 @@ func WithClusterTemplate(templateName string) Opt { } } -func WithK8sVersionStatus(v string) Opt { - return func(managedCluster *v1alpha1.ManagedCluster) { - managedCluster.Status.KubertenesVersion = v - } -} - func WithConfig(config string) Opt { return func(p *v1alpha1.ManagedCluster) { p.Spec.Config = &apiextensionsv1.JSON{ diff --git a/test/objects/management/management.go b/test/objects/management/management.go index 98c5ade15..a6861c804 100644 --- a/test/objects/management/management.go +++ b/test/objects/management/management.go @@ -75,3 +75,9 @@ func WithComponentsStatus(components map[string]v1alpha1.ComponentStatus) Opt { p.Status.Components = components } } + +func WithRelease(v string) Opt { + return func(management *v1alpha1.Management) { + management.Spec.Release = v + } +} diff --git a/test/objects/release/release.go b/test/objects/release/release.go new file mode 100644 index 000000000..ab92e5b4e --- /dev/null +++ b/test/objects/release/release.go @@ -0,0 +1,70 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package release + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/Mirantis/hmc/api/v1alpha1" +) + +const ( + DefaultName = "release-test-0-0-1" + + DefaultCAPITemplateName = "cluster-api-test-0-0-1" + DefaultHMCTemplateName = "hmc-test-0-0-1" +) + +type Opt func(*v1alpha1.Release) + +func New(opts ...Opt) *v1alpha1.Release { + release := &v1alpha1.Release{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultName, + }, + Spec: v1alpha1.ReleaseSpec{ + HMC: v1alpha1.CoreProviderTemplate{ + Template: DefaultHMCTemplateName, + }, + CAPI: v1alpha1.CoreProviderTemplate{ + Template: DefaultCAPITemplateName, + }, + }, + } + + for _, opt := range opts { + opt(release) + } + + return release +} + +func WithName(name string) Opt { + return func(r *v1alpha1.Release) { + r.Name = name + } +} + +func WithHMCTemplateName(v string) Opt { + return func(r *v1alpha1.Release) { + r.Spec.HMC.Template = v + } +} + +func WithCAPITemplateName(v string) Opt { + return func(r *v1alpha1.Release) { + r.Spec.CAPI.Template = v + } +} diff --git a/test/objects/template/template.go b/test/objects/template/template.go index 52fcf56fd..f53877503 100644 --- a/test/objects/template/template.go +++ b/test/objects/template/template.go @@ -72,8 +72,7 @@ func NewServiceTemplate(opts ...Opt) *v1alpha1.ServiceTemplate { func NewProviderTemplate(opts ...Opt) *v1alpha1.ProviderTemplate { t := &v1alpha1.ProviderTemplate{ ObjectMeta: metav1.ObjectMeta{ - Name: DefaultName, - Namespace: DefaultNamespace, + Name: DefaultName, }, } @@ -127,7 +126,7 @@ func WithServiceK8sConstraint(v string) Opt { return func(template Template) { switch tt := template.(type) { case *v1alpha1.ServiceTemplate: - tt.Status.KubertenesConstraint = v + tt.Status.KubernetesConstraint = v default: panic(fmt.Sprintf("unexpected obj typed %T, expected *ServiceTemplate", tt)) } @@ -174,3 +173,33 @@ func WithConfigStatus(config string) Opt { } } } + +func WithProviderStatusCAPIVersion(v string) Opt { + return func(template Template) { + pt, ok := template.(*v1alpha1.ProviderTemplate) + if !ok { + panic(fmt.Sprintf("unexpected type %T, expected ProviderTemplate", template)) + } + pt.Status.CAPIVersion = v + } +} + +func WithProviderStatusCAPIConstraint(v string) Opt { + return func(template Template) { + pt, ok := template.(*v1alpha1.ProviderTemplate) + if !ok { + panic(fmt.Sprintf("unexpected type %T, expected ProviderTemplate", template)) + } + pt.Status.CAPIVersionConstraint = v + } +} + +func WithClusterStatusK8sVersion(v string) Opt { + return func(template Template) { + ct, ok := template.(*v1alpha1.ClusterTemplate) + if !ok { + panic(fmt.Sprintf("unexpected type %T, expected ClusterTemplate", template)) + } + ct.Status.KubernetesVersion = v + } +} From 29f63eb38b14096ccad59454d7d8a32a04a0f1f2 Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Tue, 15 Oct 2024 12:16:59 +0700 Subject: [PATCH 10/29] Wait for templates creation in Release controller Also report current state in conditions. Signed-off-by: Andrei Pavlov --- api/v1alpha1/common.go | 14 +++ api/v1alpha1/managedcluster_types.go | 14 --- api/v1alpha1/release_types.go | 11 ++- api/v1alpha1/zz_generated.deepcopy.go | 1 - internal/controller/release_controller.go | 92 ++++++++++++------- .../crds/hmc.mirantis.com_releases.yaml | 20 +--- .../hmc/templates/rbac/controller/roles.yaml | 6 ++ 7 files changed, 92 insertions(+), 66 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 0a25b4a70..5ee4e32e0 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -21,6 +21,20 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + // SucceededReason indicates a condition or event observed a success, for example when declared desired state + // matches actual state, or a performed action succeeded. + SucceededReason string = "Succeeded" + + // FailedReason indicates a condition or event observed a failure, for example when declared state does not match + // actual state, or a performed action failed. + FailedReason string = "Failed" + + // ProgressingReason indicates a condition or event observed progression, for example when the reconciliation of a + // resource or an action has started. + ProgressingReason string = "Progressing" +) + type ( // Providers hold different types of CAPI providers. Providers struct { diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index befb3122a..89a7f9bf9 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -50,20 +50,6 @@ const ( ReadyCondition string = "Ready" ) -const ( - // SucceededReason indicates a condition or event observed a success, for example when declared desired state - // matches actual state, or a performed action succeeded. - SucceededReason string = "Succeeded" - - // FailedReason indicates a condition or event observed a failure, for example when declared state does not match - // actual state, or a performed action failed. - FailedReason string = "Failed" - - // ProgressingReason indicates a condition or event observed progression, for example when the reconciliation of a - // resource or an action has started. - ProgressingReason string = "Progressing" -) - // ManagedClusterSpec defines the desired state of ManagedCluster type ManagedClusterSpec struct { // Config allows to provide parameters for template customization. diff --git a/api/v1alpha1/release_types.go b/api/v1alpha1/release_types.go index 8acf8bdaf..a20f1dc73 100644 --- a/api/v1alpha1/release_types.go +++ b/api/v1alpha1/release_types.go @@ -18,6 +18,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + ReleaseKind = "Release" + + // TemplatesCreatedCondition indicates that all templates associated with the Release are created. + TemplatesCreatedCondition = "TemplatesCreated" +) + // ReleaseSpec defines the desired state of Release type ReleaseSpec struct { // Version of the HMC Release in the semver format. @@ -52,12 +59,12 @@ func (in *Release) ProviderTemplate(name string) string { // ReleaseStatus defines the observed state of Release type ReleaseStatus struct { - // Templates indicates the status of templates associated with the Release. - Templates ComponentStatus `json:"templates,omitempty"` // Conditions contains details for the current state of the Release Conditions []metav1.Condition `json:"conditions,omitempty"` // Ready indicates whether HMC is ready to be upgraded to this Release. Ready bool `json:"ready,omitempty"` + // ObservedGeneration is the last observed generation. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a0d3c518a..e14ce1474 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -997,7 +997,6 @@ func (in *ReleaseSpec) DeepCopy() *ReleaseSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReleaseStatus) DeepCopyInto(out *ReleaseStatus) { *out = *in - out.Templates = in.Templates if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) diff --git a/internal/controller/release_controller.go b/internal/controller/release_controller.go index 825126e29..79ac14676 100644 --- a/internal/controller/release_controller.go +++ b/internal/controller/release_controller.go @@ -21,14 +21,18 @@ import ( "fmt" hcv2 "github.com/fluxcd/helm-controller/api/v2" + fluxmeta "github.com/fluxcd/pkg/apis/meta" + fluxconditions "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/storage/driver" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -61,18 +65,31 @@ type ReleaseReconciler struct { CreateTemplates bool } -func (r *ReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *ReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, err error) { l := ctrl.LoggerFrom(ctx).WithValues("controller", "ReleaseController") l.Info("Reconciling Release") defer l.Info("Release reconcile is finished") - err := r.reconcileHMCTemplates(ctx, req) + release := &hmc.Release{} + if req.Name != "" { + if err := r.Client.Get(ctx, req.NamespacedName, release); err != nil { + l.Error(err, "failed to get Release") + return ctrl.Result{}, err + } + defer func() { + release.Status.ObservedGeneration = release.Generation + err = errors.Join(err, r.Status().Update(ctx, release)) + }() + } + + err = r.reconcileHMCTemplates(ctx, release.Name, release.Spec.Version, release.UID) + r.updateTemplatesCondition(release, err) if err != nil { l.Error(err, "failed to reconcile HMC Templates") return ctrl.Result{}, err } - if initialReconcile(req) { + if release.Name == "" { if err := r.ensureManagement(ctx); err != nil { l.Error(err, "failed to get or create Management object") return ctrl.Result{}, err @@ -81,8 +98,23 @@ func (r *ReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct return ctrl.Result{}, nil } -func initialReconcile(req ctrl.Request) bool { - return req.Name == "" +func (r *ReleaseReconciler) updateTemplatesCondition(release *hmc.Release, err error) { + condition := metav1.Condition{ + Type: hmc.TemplatesCreatedCondition, + Status: metav1.ConditionTrue, + ObservedGeneration: release.Generation, + Reason: hmc.SucceededReason, + Message: "All templates have been created", + } + if !r.CreateTemplates { + condition.Message = "Templates creation is disabled" + } + if err != nil { + condition.Status = metav1.ConditionFalse + condition.Message = err.Error() + condition.Reason = hmc.FailedReason + } + meta.SetStatusCondition(&release.Status.Conditions, condition) } func (r *ReleaseReconciler) ensureManagement(ctx context.Context) error { @@ -150,45 +182,36 @@ func (r *ReleaseReconciler) ensureManagement(ctx context.Context) error { return nil } -func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, req ctrl.Request) error { +func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, releaseName, releaseVersion string, releaseUID types.UID) error { l := ctrl.LoggerFrom(ctx) if !r.CreateTemplates { l.Info("Templates creation is disabled") return nil } - if initialReconcile(req) && !r.CreateRelease { + if releaseName == "" && !r.CreateRelease { l.Info("Initial creation of HMC Release is skipped") return nil } - releaseName := utils.ReleaseNameFromVersion(build.Version) - version := build.Version var ownerRefs []metav1.OwnerReference - release := &hmc.Release{} - if !initialReconcile(req) { - if err := r.Client.Get(ctx, req.NamespacedName, release); err != nil { - l.Error(err, "failed to get Release") + if releaseName == "" { + releaseName = utils.ReleaseNameFromVersion(build.Version) + releaseVersion = build.Version + err := helm.ReconcileHelmRepository(ctx, r.Client, defaultRepoName, r.SystemNamespace, r.DefaultRegistryConfig.HelmRepositorySpec()) + if err != nil { + l.Error(err, "Failed to reconcile default HelmRepository", "namespace", r.SystemNamespace) return err } - releaseName = req.Name - version = release.Spec.Version + } else { ownerRefs = []metav1.OwnerReference{ { APIVersion: hmc.GroupVersion.String(), - Kind: release.Kind, - Name: release.Name, - UID: release.UID, + Kind: hmc.ReleaseKind, + Name: releaseName, + UID: releaseUID, }, } } - if initialReconcile(req) { - err := helm.ReconcileHelmRepository(ctx, r.Client, defaultRepoName, r.SystemNamespace, r.DefaultRegistryConfig.HelmRepositorySpec()) - if err != nil { - l.Error(err, "Failed to reconcile default HelmRepository", "namespace", r.SystemNamespace) - return err - } - } - hmcTemplatesName := utils.TemplatesChartFromReleaseName(releaseName) helmChart := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ @@ -205,7 +228,7 @@ func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, req ctrl. helmChart.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue helmChart.Spec = sourcev1.HelmChartSpec{ Chart: r.HMCTemplatesChartName, - Version: version, + Version: releaseVersion, SourceRef: sourcev1.LocalHelmChartSourceReference{ Kind: sourcev1.HelmRepositoryKind, Name: defaultRepoName, @@ -221,10 +244,6 @@ func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, req ctrl. l.Info(fmt.Sprintf("Successfully %s %s/%s HelmChart", operation, r.SystemNamespace, hmcTemplatesName)) } - if _, err := helm.ArtifactReady(helmChart); err != nil { - return fmt.Errorf("HelmChart %s/%s Artifact is not ready: %w", r.SystemNamespace, hmcTemplatesName, err) - } - opts := helm.ReconcileHelmReleaseOpts{ ChartRef: &hcv2.CrossNamespaceSourceReference{ Kind: helmChart.Kind, @@ -233,7 +252,7 @@ func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, req ctrl. }, } - if initialReconcile(req) { + if releaseName == "" { createReleaseValues := map[string]any{ "createRelease": true, } @@ -244,13 +263,20 @@ func (r *ReleaseReconciler) reconcileHMCTemplates(ctx context.Context, req ctrl. opts.Values = &apiextensionsv1.JSON{Raw: raw} } - _, operation, err = helm.ReconcileHelmRelease(ctx, r.Client, hmcTemplatesName, r.SystemNamespace, opts) + hr, operation, err := helm.ReconcileHelmRelease(ctx, r.Client, hmcTemplatesName, r.SystemNamespace, opts) if err != nil { return err } if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { l.Info(fmt.Sprintf("Successfully %s %s/%s HelmRelease", operation, r.SystemNamespace, hmcTemplatesName)) } + hrReadyCondition := fluxconditions.Get(hr, fluxmeta.ReadyCondition) + if hrReadyCondition == nil || hrReadyCondition.ObservedGeneration != hr.Generation { + return fmt.Errorf("HelmRelease %s/%s is not ready yet. Waiting for reconciliation", r.SystemNamespace, hmcTemplatesName) + } + if hrReadyCondition.Status == metav1.ConditionFalse { + return fmt.Errorf("HelmRelease %s/%s is not ready yet. %s", r.SystemNamespace, hmcTemplatesName, hrReadyCondition.Message) + } return nil } diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml index ee19b66f7..4ff05bbd2 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_releases.yaml @@ -145,26 +145,14 @@ spec: - type type: object type: array + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer ready: description: Ready indicates whether HMC is ready to be upgraded to this Release. type: boolean - templates: - description: Templates indicates the status of templates associated - with the Release. - properties: - error: - description: Error stores as error message in case of failed installation - type: string - success: - description: Success represents if a component installation was - successful - type: boolean - template: - description: Template is the name of the Template associated with - this component. - type: string - type: object type: object type: object served: true diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 21c49b962..739275821 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -54,6 +54,12 @@ rules: - get - list - watch +- apiGroups: + - hmc.mirantis.com + resources: + - releases/status + verbs: + - update - apiGroups: - hmc.mirantis.com resources: From b7e1f2247edf70dff71d45e0c56433b71d561063 Mon Sep 17 00:00:00 2001 From: zerospiel Date: Tue, 15 Oct 2024 14:58:45 +0200 Subject: [PATCH 11/29] Compatibility attributes amends * move clustertemplates compatibility verification from the validationwebhook to the tpl ctrl * correspinding integration tests Related Issue: https://github.com/Mirantis/hmc/issues/400 --- api/v1alpha1/release_types.go | 4 +- internal/controller/template_controller.go | 127 +++++++++++++- .../controller/template_controller_test.go | 156 ++++++++++++++++-- internal/webhook/managedcluster_webhook.go | 137 ++------------- .../webhook/managedcluster_webhook_test.go | 60 ------- 5 files changed, 276 insertions(+), 208 deletions(-) diff --git a/api/v1alpha1/release_types.go b/api/v1alpha1/release_types.go index a20f1dc73..83e11b78e 100644 --- a/api/v1alpha1/release_types.go +++ b/api/v1alpha1/release_types.go @@ -61,10 +61,10 @@ func (in *Release) ProviderTemplate(name string) string { type ReleaseStatus struct { // Conditions contains details for the current state of the Release Conditions []metav1.Condition `json:"conditions,omitempty"` - // Ready indicates whether HMC is ready to be upgraded to this Release. - Ready bool `json:"ready,omitempty"` // ObservedGeneration is the last observed generation. ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // Ready indicates whether HMC is ready to be upgraded to this Release. + Ready bool `json:"ready,omitempty"` } // +kubebuilder:object:root=true diff --git a/internal/controller/template_controller.go b/internal/controller/template_controller.go index 98374916d..6611a1901 100644 --- a/internal/controller/template_controller.go +++ b/internal/controller/template_controller.go @@ -17,8 +17,12 @@ package controller import ( "context" "encoding/json" + "errors" "fmt" + "slices" + "time" + "github.com/Masterminds/semver/v3" helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" sourcev1 "github.com/fluxcd/source-controller/api/v1" "helm.sh/helm/v3/pkg/chart" @@ -26,7 +30,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" @@ -34,6 +40,8 @@ import ( const ( defaultRepoName = "hmc-templates" + + defaultRequeueTime = 1 * time.Minute ) // TemplateReconciler reconciles a *Template object @@ -73,7 +81,24 @@ func (r *ClusterTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, err } - return r.ReconcileTemplate(ctx, clusterTemplate) + result, err := r.ReconcileTemplate(ctx, clusterTemplate) + if err != nil { + l.Error(err, "failed to reconcile template") + return result, err + } + + l.Info("Validating template compatibility attributes") + if err := r.validateCompatibilityAttrs(ctx, clusterTemplate); err != nil { + if apierrors.IsNotFound(err) { + l.Info("Validation cannot be performed until Management cluster appears", "requeue in", defaultRequeueTime) + return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil + } + + l.Error(err, "failed to validate compatibility attributes") + return ctrl.Result{}, err + } + + return result, nil } func (r *ServiceTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -189,7 +214,7 @@ func (r *TemplateReconciler) ReconcileTemplate(ctx context.Context, template tem } l.Info("Validating Helm chart") - if err = helmChart.Validate(); err != nil { + if err := helmChart.Validate(); err != nil { l.Error(err, "Helm chart validation failed") _ = r.updateStatus(ctx, template, err.Error()) return ctrl.Result{}, err @@ -301,23 +326,115 @@ func (r *TemplateReconciler) getHelmChartFromChartRef(ctx context.Context, chart return helmChart, nil } +func (r *ClusterTemplateReconciler) validateCompatibilityAttrs(ctx context.Context, template *hmc.ClusterTemplate) error { + management := new(hmc.Management) + if err := r.Client.Get(ctx, client.ObjectKey{Name: hmc.ManagementName}, management); err != nil { + if apierrors.IsNotFound(err) { + _ = r.updateStatus(ctx, template, "Waiting for Management creation to complete validation") + return err + } + + err = fmt.Errorf("failed to get Management: %v", err) + _ = r.updateStatus(ctx, template, err.Error()) + return err + } + + exposedProviders, requiredProviders := management.Status.AvailableProviders, template.Status.Providers + + ctrl.LoggerFrom(ctx).V(1).Info("providers to check", "exposed", exposedProviders, "required", requiredProviders) + + var merr error + missing, wrong, parsing := collectMissingProvidersWithWrongVersions("bootstrap", exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) + merr = errors.Join(merr, missing, wrong, parsing) + + missing, wrong, parsing = collectMissingProvidersWithWrongVersions("control plane", exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) + merr = errors.Join(merr, missing, wrong, parsing) + + missing, wrong, parsing = collectMissingProvidersWithWrongVersions("infrastructure", exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) + merr = errors.Join(merr, missing, wrong, parsing) + + if merr != nil { + _ = r.updateStatus(ctx, template, merr.Error()) + return merr + } + + return r.updateStatus(ctx, template, "") +} + +// collectMissingProvidersWithWrongVersions returns collected errors for missing providers, providers with +// wrong versions that do not satisfy the corresponding constraints, and parsing errors respectevly. +func collectMissingProvidersWithWrongVersions(typ string, exposed, required []hmc.ProviderTuple) (missingErr, nonSatisfyingErr, parsingErr error) { + exposedSet := make(map[string]hmc.ProviderTuple, len(exposed)) + for _, v := range exposed { + exposedSet[v.Name] = v + } + + var missing, nonSatisfying []string + for _, reqWithConstraint := range required { + exposedWithExactVer, ok := exposedSet[reqWithConstraint.Name] + if !ok { + missing = append(missing, reqWithConstraint.Name) + continue + } + + version := exposedWithExactVer.VersionOrConstraint + constraint := reqWithConstraint.VersionOrConstraint + + if version == "" || constraint == "" { + continue + } + + exactVer, err := semver.NewVersion(version) + if err != nil { + parsingErr = errors.Join(parsingErr, fmt.Errorf("failed to parse version %s of the provider %s: %w", version, exposedWithExactVer.Name, err)) + continue + } + + requiredC, err := semver.NewConstraint(constraint) + if err != nil { + parsingErr = errors.Join(parsingErr, fmt.Errorf("failed to parse constraint %s of the provider %s: %w", version, exposedWithExactVer.Name, err)) + continue + } + + if !requiredC.Check(exactVer) { + nonSatisfying = append(nonSatisfying, fmt.Sprintf("%s %s !~ %s", reqWithConstraint.Name, version, constraint)) + } + } + + if len(missing) > 0 { + slices.Sort(missing) + missingErr = fmt.Errorf("one or more required %s providers are not deployed yet: %v", typ, missing) + } + + if len(nonSatisfying) > 0 { + slices.Sort(nonSatisfying) + nonSatisfyingErr = fmt.Errorf("one or more required %s providers does not satisfy constraints: %v", typ, nonSatisfying) + } + + if parsingErr != nil { + parsingErr = fmt.Errorf("one or more errors parsing %s providers' versions and constraints : %v", typ, parsingErr) + } + + return missingErr, nonSatisfyingErr, parsingErr +} + // SetupWithManager sets up the controller with the Manager. func (r *ClusterTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&hmc.ClusterTemplate{}). + For(&hmc.ClusterTemplate{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } // SetupWithManager sets up the controller with the Manager. func (r *ServiceTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&hmc.ServiceTemplate{}). + For(&hmc.ServiceTemplate{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } // SetupWithManager sets up the controller with the Manager. func (r *ProviderTemplateReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&hmc.ProviderTemplate{}). + For(&hmc.ProviderTemplate{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } diff --git a/internal/controller/template_controller_test.go b/internal/controller/template_controller_test.go index e206ea993..3fec09bbc 100644 --- a/internal/controller/template_controller_test.go +++ b/internal/controller/template_controller_test.go @@ -16,15 +16,18 @@ package controller import ( "context" + "fmt" + "time" helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "helm.sh/helm/v3/pkg/chart" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" hmcmirantiscomv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" @@ -32,11 +35,13 @@ import ( var _ = Describe("Template Controller", func() { Context("When reconciling a resource", func() { - const resourceName = "test-resource" - const helmRepoNamespace = "default" - const helmRepoName = "test-helmrepo" - const helmChartName = "test-helmchart" - const helmChartURL = "http://source-controller.hmc-system.svc.cluster.local./helmchart/hmc-system/test-chart/0.1.0.tar.gz" + const ( + resourceName = "test-resource" + helmRepoNamespace = metav1.NamespaceDefault + helmRepoName = "test-helmrepo" + helmChartName = "test-helmchart" + helmChartURL = "http://source-controller.hmc-system.svc.cluster.local./helmchart/hmc-system/test-chart/0.1.0.tar.gz" + ) fakeDownloadHelmChartFunc := func(context.Context, *sourcev1.Artifact) (*chart.Chart, error) { return &chart.Chart{ @@ -52,7 +57,7 @@ var _ = Describe("Template Controller", func() { typeNamespacedName := types.NamespacedName{ Name: resourceName, - Namespace: "default", + Namespace: metav1.NamespaceDefault, } clusterTemplate := &hmcmirantiscomv1alpha1.ClusterTemplate{} serviceTemplate := &hmcmirantiscomv1alpha1.ServiceTemplate{} @@ -71,7 +76,7 @@ var _ = Describe("Template Controller", func() { BeforeEach(func() { By("creating helm repository") err := k8sClient.Get(ctx, types.NamespacedName{Name: helmRepoName, Namespace: helmRepoNamespace}, helmRepo) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { helmRepo = &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ Name: helmRepoName, @@ -86,7 +91,7 @@ var _ = Describe("Template Controller", func() { By("creating helm chart") err = k8sClient.Get(ctx, types.NamespacedName{Name: helmChartName, Namespace: helmRepoNamespace}, helmChart) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { helmChart = &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Name: helmChartName, @@ -112,11 +117,11 @@ var _ = Describe("Template Controller", func() { By("creating the custom resource for the Kind ClusterTemplate") err = k8sClient.Get(ctx, typeNamespacedName, clusterTemplate) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { resource := &hmcmirantiscomv1alpha1.ClusterTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, - Namespace: "default", + Namespace: metav1.NamespaceDefault, }, Spec: hmcmirantiscomv1alpha1.ClusterTemplateSpec{Helm: helmSpec}, } @@ -124,11 +129,11 @@ var _ = Describe("Template Controller", func() { } By("creating the custom resource for the Kind ServiceTemplate") err = k8sClient.Get(ctx, typeNamespacedName, serviceTemplate) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { resource := &hmcmirantiscomv1alpha1.ServiceTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, - Namespace: "default", + Namespace: metav1.NamespaceDefault, }, Spec: hmcmirantiscomv1alpha1.ServiceTemplateSpec{Helm: helmSpec}, } @@ -136,11 +141,10 @@ var _ = Describe("Template Controller", func() { } By("creating the custom resource for the Kind ProviderTemplate") err = k8sClient.Get(ctx, typeNamespacedName, providerTemplate) - if err != nil && errors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { resource := &hmcmirantiscomv1alpha1.ProviderTemplate{ ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + Name: resourceName, }, Spec: hmcmirantiscomv1alpha1.ProviderTemplateSpec{Helm: helmSpec}, } @@ -170,6 +174,7 @@ var _ = Describe("Template Controller", func() { By("Cleanup the specific resource instance ClusterTemplate") Expect(k8sClient.Delete(ctx, providerTemplateResource)).To(Succeed()) }) + It("should successfully reconcile the resource", func() { templateReconciler := TemplateReconciler{ Client: k8sClient, @@ -190,5 +195,124 @@ var _ = Describe("Template Controller", func() { _, err = providerTemplateReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) Expect(err).NotTo(HaveOccurred()) }) + + It("should successfully validate cluster templates providers compatibility attributes", func() { + const ( + clusterTemplateName = "cluster-template-test-name" + mgmtName = hmcmirantiscomv1alpha1.ManagementName + someProviderName = "test-provider-name" + someProviderVersion = "v1.0.0" + someProviderVersionConstraint = ">= 1.0.0 <2.0.0-0" // ^1.0.0 + + timeout = time.Second * 10 + interval = time.Millisecond * 250 + ) + + // NOTE: the cluster template from BeforeEach cannot be reused because spec is immutable + By("Creating cluster template with constrained versions") + clusterTemplate = &hmcmirantiscomv1alpha1.ClusterTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterTemplateName, + Namespace: metav1.NamespaceDefault, + }, + Spec: hmcmirantiscomv1alpha1.ClusterTemplateSpec{ + Helm: helmSpec, + Providers: hmcmirantiscomv1alpha1.ProvidersTupled{ + BootstrapProviders: []hmcmirantiscomv1alpha1.ProviderTuple{ + { + Name: someProviderName, + VersionOrConstraint: someProviderVersionConstraint, // constraint + }, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, clusterTemplate)).To(Succeed()) + + By("Checking the cluster template has been updated") + Eventually(func() error { + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterTemplate), clusterTemplate); err != nil { + return err + } + + if l := len(clusterTemplate.Spec.Providers.BootstrapProviders); l != 1 { + return fmt.Errorf("expected .spec.providers.bootstrapProviders length to be exactly 1, got %d", l) + } + + if v := clusterTemplate.Spec.Providers.BootstrapProviders[0]; v.Name != someProviderName || v.VersionOrConstraint != someProviderVersionConstraint { + return fmt.Errorf("expected .spec.providers.bootstrapProviders[0] to be %s:%s, got %s:%s", someProviderName, someProviderVersionConstraint, v.Name, v.VersionOrConstraint) + } + + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) + + By("Creating a management cluster object with proper required versions in status") + // must set status here since it's controller by another ctrl + mgmt := &hmcmirantiscomv1alpha1.Management{ + ObjectMeta: metav1.ObjectMeta{ + Name: mgmtName, + }, + } + Expect(k8sClient.Create(ctx, mgmt)).To(Succeed()) + mgmt.Status = hmcmirantiscomv1alpha1.ManagementStatus{ + AvailableProviders: hmcmirantiscomv1alpha1.ProvidersTupled{ + BootstrapProviders: []hmcmirantiscomv1alpha1.ProviderTuple{ + { + Name: someProviderName, + VersionOrConstraint: someProviderVersion, // version + }, + }, + }, + } + Expect(k8sClient.Status().Update(ctx, mgmt)).To(Succeed()) + + By("Checking the management cluster appears") + Eventually(func() error { + if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(mgmt), mgmt); err != nil { + return err + } + + if l := len(mgmt.Status.AvailableProviders.BootstrapProviders); l != 1 { + return fmt.Errorf("expected .status.availableProviders.bootstrapProviders length to be exactly 1, got %d", l) + } + + if l := len(mgmt.Status.AvailableProviders.BootstrapProviders); l != 1 { + return fmt.Errorf("expected .status.availableProviders.bootstrapProviders length to be exactly 1, got %d", l) + } + + if v := mgmt.Status.AvailableProviders.BootstrapProviders[0]; v.Name != someProviderName || v.VersionOrConstraint != someProviderVersion { + return fmt.Errorf("expected .status.availableProviders.bootstrapProviders[0] to be %s:%s, got %s:%s", someProviderName, someProviderVersionConstraint, v.Name, v.VersionOrConstraint) + } + + return nil + }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) + + By("Reconciling the cluster template") + clusterTemplateReconciler := &ClusterTemplateReconciler{TemplateReconciler: TemplateReconciler{ + Client: k8sClient, + downloadHelmChartFunc: fakeDownloadHelmChartFunc, + }} + _, err := clusterTemplateReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{ + Name: clusterTemplateName, + Namespace: metav1.NamespaceDefault, + }}) + Expect(err).NotTo(HaveOccurred()) + + By("Having the valid cluster template status") + Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterTemplate), clusterTemplate)).To(Succeed()) + Expect(clusterTemplate.Status.Valid && clusterTemplate.Status.ValidationError == "").To(BeTrue()) + Expect(clusterTemplate.Status.Providers.BootstrapProviders).To(HaveLen(1)) + Expect(clusterTemplate.Status.Providers.BootstrapProviders[0]).To(Equal(hmcmirantiscomv1alpha1.ProviderTuple{Name: someProviderName, VersionOrConstraint: someProviderVersionConstraint})) + + By("Removing the created objects") + Expect(k8sClient.Delete(ctx, mgmt)).To(Succeed()) + Expect(k8sClient.Delete(ctx, clusterTemplate)).To(Succeed()) + + By("Checking the created objects have been removed") + Eventually(func() bool { + return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgmt), &hmcmirantiscomv1alpha1.Management{})) && + apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterTemplate), &hmcmirantiscomv1alpha1.ClusterTemplate{})) + }).WithTimeout(timeout).WithPolling(interval).Should(BeTrue()) + }) }) }) diff --git a/internal/webhook/managedcluster_webhook.go b/internal/webhook/managedcluster_webhook.go index 811765286..21d57237b 100644 --- a/internal/webhook/managedcluster_webhook.go +++ b/internal/webhook/managedcluster_webhook.go @@ -16,10 +16,7 @@ package webhook import ( "context" - "errors" "fmt" - "slices" - "sort" "github.com/Masterminds/semver/v3" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -65,7 +62,7 @@ func (v *ManagedClusterValidator) ValidateCreate(ctx context.Context, obj runtim return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - if err := v.isTemplateValid(ctx, template); err != nil { + if err := isTemplateValid(template); err != nil { return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } @@ -88,7 +85,7 @@ func (v *ManagedClusterValidator) ValidateUpdate(ctx context.Context, _ runtime. return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } - if err := v.isTemplateValid(ctx, template); err != nil { + if err := isTemplateValid(template); err != nil { return nil, fmt.Errorf("%s: %v", invalidManagedClusterMsg, err) } @@ -104,16 +101,6 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, template *h return nil // nothing to do } - svcTpls := new(hmcv1alpha1.ServiceTemplateList) - if err := cl.List(ctx, svcTpls, client.InNamespace(mc.Namespace)); err != nil { - return fmt.Errorf("failed to list ServiceTemplates in %s namespace: %w", mc.Namespace, err) - } - - svcTplName2KConstraint := make(map[string]string, len(svcTpls.Items)) - for _, v := range svcTpls.Items { - svcTplName2KConstraint[v.Name] = v.Status.KubernetesConstraint - } - mcVersion, err := semver.NewVersion(template.Status.KubernetesVersion) if err != nil { // should never happen return fmt.Errorf("failed to parse k8s version %s of the ManagedCluster %s/%s: %w", template.Status.KubernetesVersion, mc.Namespace, mc.Name, err) @@ -124,24 +111,25 @@ func validateK8sCompatibility(ctx context.Context, cl client.Client, template *h continue } - kc, ok := svcTplName2KConstraint[v.Template] - if !ok { - return fmt.Errorf("specified ServiceTemplate %s/%s is missing in the cluster", mc.Namespace, v.Template) + svcTpl := new(hmcv1alpha1.ServiceTemplate) + if err := cl.Get(ctx, client.ObjectKey{Namespace: mc.Namespace, Name: v.Template}, svcTpl); err != nil { + return fmt.Errorf("failed to get ServiceTemplate %s/%s: %w", mc.Namespace, v.Template, err) } - if kc == "" { + constraint := svcTpl.Status.KubernetesConstraint + if constraint == "" { continue } - tplConstraint, err := semver.NewConstraint(kc) + tplConstraint, err := semver.NewConstraint(constraint) if err != nil { // should never happen - return fmt.Errorf("failed to parse k8s constrained version %s of the ServiceTemplate %s/%s: %w", kc, mc.Namespace, v.Template, err) + return fmt.Errorf("failed to parse k8s constrained version %s of the ServiceTemplate %s/%s: %w", constraint, mc.Namespace, v.Template, err) } if !tplConstraint.Check(mcVersion) { return fmt.Errorf("k8s version %s of the ManagedCluster %s/%s does not satisfy constrained version %s from the ServiceTemplate %s/%s", template.Status.KubernetesVersion, mc.Namespace, mc.Name, - kc, mc.Namespace, v.Template) + constraint, mc.Namespace, v.Template) } } @@ -171,7 +159,7 @@ func (v *ManagedClusterValidator) Default(ctx context.Context, obj runtime.Objec return fmt.Errorf("could not get template for the managedcluster: %v", err) } - if err := v.isTemplateValid(ctx, template); err != nil { + if err := isTemplateValid(template); err != nil { return fmt.Errorf("template is invalid: %v", err) } @@ -190,111 +178,10 @@ func (v *ManagedClusterValidator) getManagedClusterTemplate(ctx context.Context, return tpl, v.Get(ctx, client.ObjectKey{Namespace: templateNamespace, Name: templateName}, tpl) } -func (v *ManagedClusterValidator) isTemplateValid(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { +func isTemplateValid(template *hmcv1alpha1.ClusterTemplate) error { if !template.Status.Valid { return fmt.Errorf("the template is not valid: %s", template.Status.ValidationError) } - if err := v.verifyProviders(ctx, template); err != nil { - return fmt.Errorf("failed to verify providers: %v", err) - } - - return nil -} - -func (v *ManagedClusterValidator) verifyProviders(ctx context.Context, template *hmcv1alpha1.ClusterTemplate) error { - management := new(hmcv1alpha1.Management) - if err := v.Get(ctx, client.ObjectKey{Name: hmcv1alpha1.ManagementName}, management); err != nil { - return err - } - - const ( - bootstrapProviderType = "bootstrap" - controlPlateProviderType = "control plane" - infraProviderType = "infrastructure" - ) - - var ( - exposedProviders = management.Status.AvailableProviders - requiredProviders = template.Status.Providers - wrongVersionProviders, missingProviders = make(map[string][]string, 3), make(map[string][]string, 3) - - err error - ) - - missingProviders[bootstrapProviderType], wrongVersionProviders[bootstrapProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.BootstrapProviders, requiredProviders.BootstrapProviders) - if err != nil { - return err - } - - missingProviders[controlPlateProviderType], wrongVersionProviders[controlPlateProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.ControlPlaneProviders, requiredProviders.ControlPlaneProviders) - if err != nil { - return err - } - - missingProviders[infraProviderType], wrongVersionProviders[infraProviderType], err = getMissingProvidersWithWrongVersions(exposedProviders.InfrastructureProviders, requiredProviders.InfrastructureProviders) - if err != nil { - return err - } - - errs := collectErrors(missingProviders, "one or more required %s providers are not deployed yet: %v") - errs = append(errs, collectErrors(wrongVersionProviders, "one or more required %s providers does not satisfy constraints: %v")...) - if len(errs) > 0 { - sort.Slice(errs, func(i, j int) bool { - return errs[i].Error() < errs[j].Error() - }) - - return errors.Join(errs...) - } - return nil } - -func collectErrors(m map[string][]string, msgFormat string) (errs []error) { - for providerType, missing := range m { - if len(missing) > 0 { - slices.Sort(missing) - errs = append(errs, fmt.Errorf(msgFormat, providerType, missing)) - } - } - - return errs -} - -func getMissingProvidersWithWrongVersions(exposed, required []hmcv1alpha1.ProviderTuple) (missing, nonSatisfying []string, _ error) { - exposedSet := make(map[string]hmcv1alpha1.ProviderTuple, len(exposed)) - for _, v := range exposed { - exposedSet[v.Name] = v - } - - var merr error - for _, reqWithConstraint := range required { - exposedWithExactVer, ok := exposedSet[reqWithConstraint.Name] - if !ok { - missing = append(missing, reqWithConstraint.Name) - continue - } - - if exposedWithExactVer.VersionOrConstraint == "" || reqWithConstraint.VersionOrConstraint == "" { - continue - } - - exactVer, err := semver.NewVersion(exposedWithExactVer.VersionOrConstraint) - if err != nil { - merr = errors.Join(merr, fmt.Errorf("failed to parse version %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) - continue - } - - requiredC, err := semver.NewConstraint(reqWithConstraint.VersionOrConstraint) - if err != nil { - merr = errors.Join(merr, fmt.Errorf("failed to parse constraint %s of the provider %s: %w", exposedWithExactVer.VersionOrConstraint, exposedWithExactVer.Name, err)) - continue - } - - if !requiredC.Check(exactVer) { - nonSatisfying = append(nonSatisfying, fmt.Sprintf("%s %s !~ %s", reqWithConstraint.Name, exposedWithExactVer.VersionOrConstraint, reqWithConstraint.VersionOrConstraint)) - } - } - - return missing, nonSatisfying, merr -} diff --git a/internal/webhook/managedcluster_webhook_test.go b/internal/webhook/managedcluster_webhook_test.go index 82c833dca..b1157f1c3 100644 --- a/internal/webhook/managedcluster_webhook_test.go +++ b/internal/webhook/managedcluster_webhook_test.go @@ -83,28 +83,6 @@ var ( }, err: "the ManagedCluster is invalid: the template is not valid: validation error example", }, - { - name: "should fail if one or more requested providers are not available yet", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), - existingObjects: []runtime.Object{ - management.NewManagement( - management.WithAvailableProviders(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - }), - ), - template.NewClusterTemplate( - template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "azure"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - }), - template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), - ), - }, - err: "the ManagedCluster is invalid: failed to verify providers: one or more required control plane providers are not deployed yet: [k0s]\none or more required infrastructure providers are not deployed yet: [azure]", - }, { name: "should succeed", managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), @@ -112,37 +90,9 @@ var ( mgmt, template.NewClusterTemplate( template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - }), - template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), - ), - }, - }, - { - name: "provider template versions does not satisfy cluster template constraints", - managedCluster: managedcluster.NewManagedCluster(managedcluster.WithClusterTemplate(testTemplateName)), - existingObjects: []runtime.Object{ - management.NewManagement(management.WithAvailableProviders(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: "v1.0.0"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, - ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: "v1.0.0"}}, - })), - template.NewClusterTemplate( - template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws", VersionOrConstraint: ">=999.0.0"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, - ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s", VersionOrConstraint: ">=999.0.0"}}, - }), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), }, - err: `the ManagedCluster is invalid: failed to verify providers: one or more required bootstrap providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] -one or more required control plane providers does not satisfy constraints: [k0s v1.0.0 !~ >=999.0.0] -one or more required infrastructure providers does not satisfy constraints: [aws v1.0.0 !~ >=999.0.0]`, }, { name: "cluster template k8s version does not satisfy service template constraints", @@ -158,21 +108,11 @@ one or more required infrastructure providers does not satisfy constraints: [aws })), template.NewClusterTemplate( template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.ProvidersTupled{ - InfrastructureProviders: []v1alpha1.ProviderTuple{{Name: "aws"}}, - BootstrapProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - ControlPlaneProviders: []v1alpha1.ProviderTuple{{Name: "k0s"}}, - }), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), template.WithClusterStatusK8sVersion("v1.30.0"), ), template.NewServiceTemplate( template.WithName(testTemplateName), - template.WithProvidersStatus(v1alpha1.Providers{ - InfrastructureProviders: []string{"aws"}, - BootstrapProviders: []string{"k0s"}, - ControlPlaneProviders: []string{"k0s"}, - }), template.WithServiceK8sConstraint("<1.30"), template.WithValidationStatus(v1alpha1.TemplateValidationStatus{Valid: true}), ), From 3f9a2addc4b5f9feda254a7d94afa594a2e9ae7b Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Wed, 16 Oct 2024 17:31:09 +0700 Subject: [PATCH 12/29] Publish Release object to gh release assets Signed-off-by: Andrei Pavlov --- .github/workflows/release.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a98e439e3..ec328e3d3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -50,5 +50,7 @@ jobs: with: generate_release_notes: true prerelease: ${{ contains(github.ref_name, '-') }} - files: dist/install.yaml + files: | + dist/install.yaml + templates/provider/hmc-templates/files/release.yaml draft: true From b71966233577255ed0cc8424bd37a505509c9104 Mon Sep 17 00:00:00 2001 From: Michael Schmid Date: Wed, 16 Oct 2024 14:36:11 -0400 Subject: [PATCH 13/29] Upgrade k0smotron to v1.1.2 --- templates/provider/k0smotron/templates/providers.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/provider/k0smotron/templates/providers.yaml b/templates/provider/k0smotron/templates/providers.yaml index addf50193..bf3051054 100644 --- a/templates/provider/k0smotron/templates/providers.yaml +++ b/templates/provider/k0smotron/templates/providers.yaml @@ -3,7 +3,7 @@ kind: InfrastructureProvider metadata: name: k0smotron spec: - version: v1.0.4 + version: v1.1.2 fetchConfig: url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/infrastructure-components.yaml {{- if .Values.configSecret.name }} @@ -17,7 +17,7 @@ kind: BootstrapProvider metadata: name: k0smotron spec: - version: v1.0.4 + version: v1.1.2 fetchConfig: url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/bootstrap-components.yaml {{- if .Values.configSecret.name }} @@ -31,7 +31,7 @@ kind: ControlPlaneProvider metadata: name: k0smotron spec: - version: v1.0.4 + version: v1.1.2 fetchConfig: url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/control-plane-components.yaml {{- if .Values.configSecret.name }} From 87f195d1241b9d0ddaecd4d41e3edc3fbe1f66d6 Mon Sep 17 00:00:00 2001 From: Michael Schmid Date: Wed, 16 Oct 2024 14:37:41 -0400 Subject: [PATCH 14/29] use the official `k0sproject-k0smotron` provider --- .../provider/k0smotron/templates/providers.yaml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/templates/provider/k0smotron/templates/providers.yaml b/templates/provider/k0smotron/templates/providers.yaml index bf3051054..0b7178684 100644 --- a/templates/provider/k0smotron/templates/providers.yaml +++ b/templates/provider/k0smotron/templates/providers.yaml @@ -1,11 +1,9 @@ apiVersion: operator.cluster.x-k8s.io/v1alpha2 kind: InfrastructureProvider metadata: - name: k0smotron + name: k0sproject-k0smotron spec: version: v1.1.2 - fetchConfig: - url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/infrastructure-components.yaml {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} @@ -15,11 +13,9 @@ spec: apiVersion: operator.cluster.x-k8s.io/v1alpha2 kind: BootstrapProvider metadata: - name: k0smotron + name: k0sproject-k0smotron spec: version: v1.1.2 - fetchConfig: - url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/bootstrap-components.yaml {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} @@ -29,11 +25,9 @@ spec: apiVersion: operator.cluster.x-k8s.io/v1alpha2 kind: ControlPlaneProvider metadata: - name: k0smotron + name: k0sproject-k0smotron spec: version: v1.1.2 - fetchConfig: - url: https://github.com/k0sproject/k0smotron/releases/{{ .Values.version }}/control-plane-components.yaml {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} From 0c4a7bec1b78e4fc6f9612915b0404191dba9232 Mon Sep 17 00:00:00 2001 From: Michael Schmid Date: Wed, 16 Oct 2024 14:49:24 -0400 Subject: [PATCH 15/29] remove in-template BeachHeadServices --- templates/cluster/aws-standalone-cp/README.md | 11 ----- .../beachheadservices/cert-manager.yaml | 42 ------------------- .../beachheadservices/nginx-ingress.yaml | 36 ---------------- .../cluster/aws-standalone-cp/values.yaml | 6 +-- test/e2e/e2e_test.go | 1 - test/managedcluster/constants.go | 1 - .../resources/aws-standalone-cp.yaml.tpl | 1 - 7 files changed, 1 insertion(+), 97 deletions(-) delete mode 100644 templates/cluster/aws-standalone-cp/README.md delete mode 100644 templates/cluster/aws-standalone-cp/templates/beachheadservices/cert-manager.yaml delete mode 100644 templates/cluster/aws-standalone-cp/templates/beachheadservices/nginx-ingress.yaml diff --git a/templates/cluster/aws-standalone-cp/README.md b/templates/cluster/aws-standalone-cp/README.md deleted file mode 100644 index 268e26297..000000000 --- a/templates/cluster/aws-standalone-cp/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## Install applications into Target Cluster - -To install applications into the target cluster created using Cluster API (CAPI) upon creation, a Flux `HelmRelease` object is to be made such that its `.spec.KubeConfig` references the kubeconfig of the target cluster. - -**Reference:** https://fluxcd.io/flux/components/helm/helmreleases/#remote-clusters--cluster-api - -This chart/template already defines the following applications under `templates/beachheadservices` which can be be installed into the target cluster by setting `.Values.installBeachHeadServices=true`: -1. cert-manager -2. nginx-ingress - -**Important:** The Flux objects added to `templates/beachheadservices` to install custom applications must have the `hmc.mirantis.com/managed: "true"` label to be reconciled by HMC. diff --git a/templates/cluster/aws-standalone-cp/templates/beachheadservices/cert-manager.yaml b/templates/cluster/aws-standalone-cp/templates/beachheadservices/cert-manager.yaml deleted file mode 100644 index 36c6b8f33..000000000 --- a/templates/cluster/aws-standalone-cp/templates/beachheadservices/cert-manager.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if .Values.installBeachHeadServices }} -apiVersion: source.toolkit.fluxcd.io/v1 -kind: HelmRepository -metadata: - name: cert-manager - labels: - hmc.mirantis.com/managed: "true" -spec: - interval: 24h - url: https://charts.jetstack.io ---- -apiVersion: helm.toolkit.fluxcd.io/v2 -kind: HelmRelease -metadata: - name: {{ include "cluster.name" . }}-cert-manager - labels: - hmc.mirantis.com/managed: "true" -spec: - chart: - metadata: - labels: - hmc.mirantis.com/managed: "true" - spec: - chart: cert-manager - version: "v1.12.3" - sourceRef: - kind: HelmRepository - name: cert-manager - install: - createNamespace: true - remediation: - retries: -1 - interval: 10m - kubeConfig: - secretRef: - name: {{ include "cluster.name" . }}-kubeconfig - releaseName: cert-manager - targetNamespace: cert-manager - storageNamespace: cert-manager - values: - installCRDs: true -{{- end }} diff --git a/templates/cluster/aws-standalone-cp/templates/beachheadservices/nginx-ingress.yaml b/templates/cluster/aws-standalone-cp/templates/beachheadservices/nginx-ingress.yaml deleted file mode 100644 index 73c329159..000000000 --- a/templates/cluster/aws-standalone-cp/templates/beachheadservices/nginx-ingress.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if .Values.installBeachHeadServices }} -apiVersion: source.toolkit.fluxcd.io/v1beta2 -kind: OCIRepository -metadata: - name: nginx-ingress - labels: - hmc.mirantis.com/managed: "true" -spec: - interval: 24h - url: oci://ghcr.io/nginxinc/charts/nginx-ingress - ref: - semver: "1.3.2" ---- -apiVersion: helm.toolkit.fluxcd.io/v2 -kind: HelmRelease -metadata: - name: {{ include "cluster.name" . }}-nginx-ingress - labels: - hmc.mirantis.com/managed: "true" -spec: - targetNamespace: nginx-ingress - storageNamespace: nginx-ingress - kubeConfig: - secretRef: - name: {{ include "cluster.name" . }}-kubeconfig - interval: 10m - chartRef: - kind: OCIRepository - name: nginx-ingress - install: - createNamespace: true - remediation: - retries: -1 - values: - fullnameOverride: nginx-ingress -{{- end }} diff --git a/templates/cluster/aws-standalone-cp/values.yaml b/templates/cluster/aws-standalone-cp/values.yaml index 2d09cf5c8..fd0d52ae0 100644 --- a/templates/cluster/aws-standalone-cp/values.yaml +++ b/templates/cluster/aws-standalone-cp/values.yaml @@ -46,8 +46,4 @@ worker: # K0s parameters k0s: - version: v1.30.4+k0s.0 - -# Optionally install applications defined under -# templates/beachheadservices into target cluster -installBeachHeadServices: false + version: v1.30.4+k0s.0 \ No newline at end of file diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index e13282d0c..87b4ca584 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -132,7 +132,6 @@ var _ = Describe("controller", Ordered, func() { // Deploy standalone with an xlarge instance since it will also be // hosting the hosted cluster. GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") - GinkgoT().Setenv(managedcluster.EnvVarInstallBeachHeadServices, "false") templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) diff --git a/test/managedcluster/constants.go b/test/managedcluster/constants.go index cd43527fb..5badd6112 100644 --- a/test/managedcluster/constants.go +++ b/test/managedcluster/constants.go @@ -18,7 +18,6 @@ const ( // Common EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" EnvVarHostedManagedClusterName = "HOSTED_MANAGED_CLUSTER_NAME" - EnvVarInstallBeachHeadServices = "INSTALL_BEACH_HEAD_SERVICES" EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" EnvVarWorkerNumber = "WORKER_NUMBER" EnvVarNamespace = "NAMESPACE" diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl index e5ae88486..f81532fea 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -16,6 +16,5 @@ spec: instanceType: ${AWS_INSTANCE_TYPE:=t3.small} worker: instanceType: ${AWS_INSTANCE_TYPE:=t3.small} - installBeachHeadServices: ${INSTALL_BEACH_HEAD_SERVICES:=true} From 49c8c81ddc5736e5d0b95df1490f5a40a0765d79 Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Thu, 17 Oct 2024 13:47:53 +0700 Subject: [PATCH 16/29] Update provider versions to latest Signed-off-by: Andrei Pavlov --- .../provider/cluster-api-provider-azure/Chart.yaml | 2 +- .../cluster-api-provider-azure/templates/provider.yaml | 2 +- .../provider/cluster-api-provider-vsphere/Chart.yaml | 2 +- .../templates/provider.yaml | 2 +- templates/provider/cluster-api/Chart.yaml | 2 +- templates/provider/cluster-api/templates/provider.yaml | 2 +- .../hmc-templates/files/templates/projectsveltos.yaml | 4 ++-- templates/provider/hmc/Chart.lock | 10 +++++----- templates/provider/hmc/Chart.yaml | 6 +++--- templates/provider/projectsveltos/Chart.lock | 6 +++--- templates/provider/projectsveltos/Chart.yaml | 6 +++--- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/templates/provider/cluster-api-provider-azure/Chart.yaml b/templates/provider/cluster-api-provider-azure/Chart.yaml index c7aba8223..1836ecc63 100644 --- a/templates/provider/cluster-api-provider-azure/Chart.yaml +++ b/templates/provider/cluster-api-provider-azure/Chart.yaml @@ -18,6 +18,6 @@ version: 0.0.1 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.16.0" +appVersion: "1.17.0" annotations: hmc.mirantis.com/infrastructure-providers: azure diff --git a/templates/provider/cluster-api-provider-azure/templates/provider.yaml b/templates/provider/cluster-api-provider-azure/templates/provider.yaml index e9015b3cc..bcabcefe6 100644 --- a/templates/provider/cluster-api-provider-azure/templates/provider.yaml +++ b/templates/provider/cluster-api-provider-azure/templates/provider.yaml @@ -3,7 +3,7 @@ kind: InfrastructureProvider metadata: name: azure spec: - version: v1.16.0 + version: v1.17.0 {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} diff --git a/templates/provider/cluster-api-provider-vsphere/Chart.yaml b/templates/provider/cluster-api-provider-vsphere/Chart.yaml index 6112ceb2b..ec5260231 100644 --- a/templates/provider/cluster-api-provider-vsphere/Chart.yaml +++ b/templates/provider/cluster-api-provider-vsphere/Chart.yaml @@ -18,7 +18,7 @@ version: 0.0.1 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.11.0" +appVersion: "1.11.1" annotations: hmc.mirantis.com/type: provider hmc.mirantis.com/infrastructure-providers: vsphere diff --git a/templates/provider/cluster-api-provider-vsphere/templates/provider.yaml b/templates/provider/cluster-api-provider-vsphere/templates/provider.yaml index 0fa57f809..01f3d8053 100644 --- a/templates/provider/cluster-api-provider-vsphere/templates/provider.yaml +++ b/templates/provider/cluster-api-provider-vsphere/templates/provider.yaml @@ -3,7 +3,7 @@ kind: InfrastructureProvider metadata: name: vsphere spec: - version: v1.11.0 + version: v1.11.1 {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} diff --git a/templates/provider/cluster-api/Chart.yaml b/templates/provider/cluster-api/Chart.yaml index ec52541b1..c7083c864 100644 --- a/templates/provider/cluster-api/Chart.yaml +++ b/templates/provider/cluster-api/Chart.yaml @@ -18,4 +18,4 @@ version: 0.0.1 # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.7.2" +appVersion: "1.8.4" diff --git a/templates/provider/cluster-api/templates/provider.yaml b/templates/provider/cluster-api/templates/provider.yaml index 7817980fd..a8cbd4cb6 100644 --- a/templates/provider/cluster-api/templates/provider.yaml +++ b/templates/provider/cluster-api/templates/provider.yaml @@ -3,7 +3,7 @@ kind: CoreProvider metadata: name: cluster-api spec: - version: v1.7.2 + version: v1.8.4 {{- if .Values.configSecret.name }} configSecret: name: {{ .Values.configSecret.name }} diff --git a/templates/provider/hmc-templates/files/templates/projectsveltos.yaml b/templates/provider/hmc-templates/files/templates/projectsveltos.yaml index 5add09d01..9af1ca7ea 100644 --- a/templates/provider/hmc-templates/files/templates/projectsveltos.yaml +++ b/templates/provider/hmc-templates/files/templates/projectsveltos.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: projectsveltos-0-39-0 + name: projectsveltos-0-40-0 annotations: helm.sh/resource-policy: keep spec: helm: chartName: projectsveltos - chartVersion: 0.39.0 + chartVersion: 0.40.0 diff --git a/templates/provider/hmc/Chart.lock b/templates/provider/hmc/Chart.lock index 841bb2fda..9e1363578 100644 --- a/templates/provider/hmc/Chart.lock +++ b/templates/provider/hmc/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: flux2 repository: https://fluxcd-community.github.io/helm-charts - version: 2.13.0 + version: 2.14.0 - name: cert-manager repository: https://charts.jetstack.io - version: v1.15.1 + version: v1.16.1 - name: cluster-api-operator repository: https://kubernetes-sigs.github.io/cluster-api-operator - version: 0.12.0 -digest: sha256:f29162745d3dd876c3e35689b6771faa7b66d5f91c0ac532c28db580deadf218 -generated: "2024-08-21T19:47:21.00915+04:00" + version: 0.14.0 +digest: sha256:55fababfa5c61d84122f9a80d3e420a9cd29920fb344d017770109727457d993 +generated: "2024-10-17T14:14:48.728483+07:00" diff --git a/templates/provider/hmc/Chart.yaml b/templates/provider/hmc/Chart.yaml index 309ae2a54..741a762a8 100644 --- a/templates/provider/hmc/Chart.yaml +++ b/templates/provider/hmc/Chart.yaml @@ -22,14 +22,14 @@ appVersion: "0.1.0" dependencies: - name: flux2 - version: 2.13.0 + version: 2.14.0 repository: https://fluxcd-community.github.io/helm-charts condition: flux2.enabled - name: cert-manager - version: 1.15.1 + version: 1.16.1 repository: https://charts.jetstack.io condition: cert-manager.enabled - name: cluster-api-operator - version: 0.12.0 + version: 0.14.0 repository: https://kubernetes-sigs.github.io/cluster-api-operator condition: cluster-api-operator.enabled diff --git a/templates/provider/projectsveltos/Chart.lock b/templates/provider/projectsveltos/Chart.lock index e3f7be7fb..9f30f0b1f 100644 --- a/templates/provider/projectsveltos/Chart.lock +++ b/templates/provider/projectsveltos/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: projectsveltos repository: https://projectsveltos.github.io/helm-charts - version: 0.39.0 -digest: sha256:b7ba1c599c7e2a0fa2f76bffb82e4ab7b80dfde32b8633200428595a4589c067 -generated: "2024-10-08T10:02:11.69624-04:00" + version: 0.40.0 +digest: sha256:7f3e7d162343dc876eaf724169ba0f17f7e39728a2692c170e734c4474072174 +generated: "2024-10-17T13:45:56.574098+07:00" diff --git a/templates/provider/projectsveltos/Chart.yaml b/templates/provider/projectsveltos/Chart.yaml index 66d6d07c2..98f51d76a 100644 --- a/templates/provider/projectsveltos/Chart.yaml +++ b/templates/provider/projectsveltos/Chart.yaml @@ -2,9 +2,9 @@ apiVersion: v2 name: projectsveltos description: A Helm chart to refer the official projectsveltos helm chart type: application -version: 0.39.0 -appVersion: "0.39.0" +version: 0.40.0 +appVersion: "0.40.0" dependencies: - name: projectsveltos - version: 0.39.0 + version: 0.40.0 repository: https://projectsveltos.github.io/helm-charts From 31cc845774ff296fb9d4e97f9a2ed75f7db4f3d4 Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Thu, 17 Oct 2024 11:16:41 +0700 Subject: [PATCH 17/29] Strip leading v in VERSION Signed-off-by: Andrei Pavlov --- Makefile | 4 ++-- templates/provider/hmc-templates/files/release.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 51247bdd0..c914fafca 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ NAMESPACE ?= hmc-system -VERSION ?= $(shell git describe --tags --always) -FQDN_VERSION = $(patsubst v%,%,$(subst .,-, $(VERSION))) +VERSION ?= $(patsubst v%,%,$(shell git describe --tags --always)) +FQDN_VERSION = $(subst .,-,$(VERSION)) # Image URL to use all building/pushing image targets IMG ?= hmc/controller:latest IMG_REPO = $(shell echo $(IMG) | cut -d: -f1) diff --git a/templates/provider/hmc-templates/files/release.yaml b/templates/provider/hmc-templates/files/release.yaml index 563018933..19fd8e9bd 100644 --- a/templates/provider/hmc-templates/files/release.yaml +++ b/templates/provider/hmc-templates/files/release.yaml @@ -5,7 +5,7 @@ metadata: annotations: helm.sh/resource-policy: keep spec: - version: v0.0.2 + version: 0.0.2 hmc: template: hmc-0-0-1 capi: From 8557714f848edf00008485e7cb0ce99298b91512 Mon Sep 17 00:00:00 2001 From: zerospiel Date: Thu, 17 Oct 2024 14:37:20 +0200 Subject: [PATCH 18/29] Fix wrong providers names in telemetry --- api/v1alpha1/common.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/common.go b/api/v1alpha1/common.go index 5ee4e32e0..c4f326b45 100644 --- a/api/v1alpha1/common.go +++ b/api/v1alpha1/common.go @@ -150,11 +150,11 @@ func (c ProvidersTupled) BootstrapProvidersNames() []string { } func (c ProvidersTupled) ControlPlaneProvidersNames() []string { - return c.names(bootstrapProvidersType) + return c.names(controlPlaneProvidersType) } func (c ProvidersTupled) InfrastructureProvidersNames() []string { - return c.names(bootstrapProvidersType) + return c.names(infrastructureProvidersType) } func (c ProvidersTupled) names(typ providersType) []string { From bb2fb12c81485edea94d61f3372d14fca029f63d Mon Sep 17 00:00:00 2001 From: zerospiel Date: Thu, 17 Oct 2024 17:52:13 +0200 Subject: [PATCH 19/29] Fix sveltos tpls version in the Release Closes #510 --- templates/provider/hmc-templates/files/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/provider/hmc-templates/files/release.yaml b/templates/provider/hmc-templates/files/release.yaml index 19fd8e9bd..e6de2946c 100644 --- a/templates/provider/hmc-templates/files/release.yaml +++ b/templates/provider/hmc-templates/files/release.yaml @@ -20,4 +20,4 @@ spec: - name: cluster-api-provider-aws template: cluster-api-provider-aws-0-0-1 - name: projectsveltos - template: projectsveltos-0-39-0 + template: projectsveltos-0-40-0 From 4ae38217a2788a889e7c9b264f6ace087d4778d8 Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Fri, 20 Sep 2024 13:29:44 -0400 Subject: [PATCH 20/29] Add self-hosted runner, break E2E tests into labeled Specs * Break provider tests into seperate files with labels representing either onprem or cloud providers. * Add new jobs to CI workflow which dictate where tests will run, onprem provider tests like vSphere will run on self-hosted runners since they will use internal resources to test. Cloud provider tests will use the existing workflow since they can access providers without network access and can take advantage of the much larger GitHub hosted pool. Hosted/Self-hosted tests can run concurrently. * Make Cleanup depend on the cloud-e2etest only. * Use new GINKGO_LABEL_FILTER to manage what tests run where. * Move controller validatation into BeforeSuite since the controller needs to be up and ready for each provider test, this will also enable us to add controller specific test cases later and make those run without the "test e2e" flag. * Seperate self-hosted and hosted test concurrency groups * Update docs with test filtering instructions * Ensure a Release exists for the custom build.version we deploy * Move all e2e related helpers into e2e dir * Add new clusteridentity package for creating ClusterIdentity kind's and associated Secret's. * Merge PR workflows together * Make sure VERSION gets passed across jobs * Ensure uniqueness among deployed ManagedClusters, simplify MANAGED_CLUSTER_NAME in CI to prevent Azure availabilitySetName validation error. * Default Azure test templates to uswest2 to prevent issues with AvailabilityZone. * Use the same concurrency-group across all jobs, except Cleanup which intentionally does not belong to a concurrency-group. * Use Setup Go across jobs for caching. * Support patching other hosted clusters to status.ready with a common patching helper. * Move VSphere delete into AfterEach to serve as cleanup. * Add support for cleaning Azure resources. * Prevent ginkgo from timing out tests. * Use azure-disk CSI driver we deploy via templates. Signed-off-by: Kyle Squizzato --- .github/workflows/build.yml | 52 -- .github/workflows/build_test.yml | 240 ++++++++ .github/workflows/test.yml | 111 ---- .gitignore | 2 +- Makefile | 40 +- ..._nuke.yaml.tpl => aws-cloud-nuke.yaml.tpl} | 7 +- config/dev/azure-cloud-nuke.yaml.tpl | 26 + config/dev/vsphere-managedcluster.yaml | 4 +- docs/dev.md | 20 + .../templates/k0scontrolplane.yaml | 2 + test/e2e/controller.go | 86 --- test/e2e/controller_test.go | 29 + test/e2e/e2e_suite_test.go | 200 ++++++ test/e2e/e2e_test.go | 578 ------------------ test/{ => e2e}/kubeclient/kubeclient.go | 46 +- test/e2e/managedcluster/aws/aws.go | 73 +++ test/{ => e2e}/managedcluster/azure/azure.go | 89 ++- .../clusteridentity/clusteridentity.go | 253 ++++++++ test/e2e/managedcluster/common.go | 84 +++ test/{ => e2e}/managedcluster/constants.go | 26 +- .../managedcluster/managedcluster.go | 52 +- .../managedcluster/providervalidator.go | 4 +- .../resources/aws-hosted-cp.yaml.tpl | 7 +- .../resources/aws-standalone-cp.yaml.tpl | 7 +- .../resources/azure-hosted-cp.yaml.tpl | 9 +- .../resources/azure-standalone-cp.yaml.tpl | 10 +- .../resources/vsphere-hosted-cp.yaml.tpl | 5 +- .../resources/vsphere-standalone-cp.yaml.tpl | 5 +- .../managedcluster/validate_deleted.go | 2 +- .../managedcluster/validate_deployed.go | 25 +- test/e2e/managedcluster/vsphere/vsphere.go | 36 ++ test/e2e/provider_aws_test.go | 188 ++++++ test/e2e/provider_azure_test.go | 175 ++++++ test/e2e/provider_vsphere_test.go | 97 +++ test/managedcluster/aws/aws.go | 164 ----- test/managedcluster/vsphere/vsphere.go | 114 ---- test/utils/utils.go | 4 + 37 files changed, 1642 insertions(+), 1230 deletions(-) delete mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/build_test.yml delete mode 100644 .github/workflows/test.yml rename config/dev/{cloud_nuke.yaml.tpl => aws-cloud-nuke.yaml.tpl} (96%) create mode 100644 config/dev/azure-cloud-nuke.yaml.tpl delete mode 100644 test/e2e/controller.go create mode 100644 test/e2e/controller_test.go delete mode 100644 test/e2e/e2e_test.go rename test/{ => e2e}/kubeclient/kubeclient.go (85%) create mode 100644 test/e2e/managedcluster/aws/aws.go rename test/{ => e2e}/managedcluster/azure/azure.go (59%) create mode 100644 test/e2e/managedcluster/clusteridentity/clusteridentity.go create mode 100644 test/e2e/managedcluster/common.go rename test/{ => e2e}/managedcluster/constants.go (56%) rename test/{ => e2e}/managedcluster/managedcluster.go (81%) rename test/{ => e2e}/managedcluster/providervalidator.go (97%) rename test/{ => e2e}/managedcluster/resources/aws-hosted-cp.yaml.tpl (72%) rename test/{ => e2e}/managedcluster/resources/aws-standalone-cp.yaml.tpl (74%) rename test/{ => e2e}/managedcluster/resources/azure-hosted-cp.yaml.tpl (76%) rename test/{ => e2e}/managedcluster/resources/azure-standalone-cp.yaml.tpl (66%) rename test/{ => e2e}/managedcluster/resources/vsphere-hosted-cp.yaml.tpl (89%) rename test/{ => e2e}/managedcluster/resources/vsphere-standalone-cp.yaml.tpl (90%) rename test/{ => e2e}/managedcluster/validate_deleted.go (98%) rename test/{ => e2e}/managedcluster/validate_deployed.go (90%) create mode 100644 test/e2e/managedcluster/vsphere/vsphere.go create mode 100644 test/e2e/provider_aws_test.go create mode 100644 test/e2e/provider_azure_test.go create mode 100644 test/e2e/provider_vsphere_test.go delete mode 100644 test/managedcluster/aws/aws.go delete mode 100644 test/managedcluster/vsphere/vsphere.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 43566691e..000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Build and Unit Test - -concurrency: - group: test-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - push: - branches: - - main - - release-* - tags: - - '*' - paths-ignore: - - '**.md' - pull_request: - branches: - - main - - release-* - paths-ignore: - - 'config/**' - - '**.md' - -env: - GO_VERSION: '1.22' - -jobs: - build: - name: Build and Unit Test - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: Lint - uses: golangci/golangci-lint-action@v6 - with: - args: --timeout 10m0s - - name: Verify all generated pieces are up-to-date - run: make generate-all && git add -N . && git diff --exit-code - - name: Unit tests - run: | - make test - - name: Build - run: | - make build - - name: Image build - run: | - make docker-build diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml new file mode 100644 index 000000000..e426a683c --- /dev/null +++ b/.github/workflows/build_test.yml @@ -0,0 +1,240 @@ +name: CI +on: + pull_request: + types: + - labeled + - opened + - synchronize + - reopened + branches: + - main + - release-* + paths-ignore: + - 'config/**' + - '**.md' +env: + GO_VERSION: '1.22' + REGISTRY_REPO: 'oci://ghcr.io/mirantis/hmc/charts-ci' + +jobs: + build: + concurrency: + group: build-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + name: Build and Unit Test + runs-on: ubuntu-latest + outputs: + version: ${{ steps.vars.outputs.version }} + clustername: ${{ steps.vars.outputs.clustername }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + cache: false + - name: Lint + uses: golangci/golangci-lint-action@v6 + with: + args: --timeout 10m0s + - name: Verify all generated pieces are up-to-date + run: make generate-all && git add -N . && git diff --exit-code + - name: Unit tests + run: | + make test + - name: Set up Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GHCR + uses: docker/login-action@v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Get outputs + id: vars + run: | + echo "version=$(git describe --tags --always)" >> $GITHUB_OUTPUT + echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT + - name: Build and push HMC controller image + uses: docker/build-push-action@v6 + with: + build-args: | + LD_FLAGS=-s -w -X github.com/Mirantis/hmc/internal/build.Version=${{ steps.vars.outputs.version }} + context: . + platforms: linux/amd64 + tags: | + ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }} + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + - name: Prepare and push HMC template charts + run: | + make hmc-chart-release + make helm-push + + controller-e2etest: + name: E2E Controller + runs-on: ubuntu-latest + needs: build + concurrency: + group: controller-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'controller' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cloud-e2etest-logs + path: | + test/e2e/*.log + + provider-cloud-e2etest: + name: E2E Cloud Providers + runs-on: ubuntu-latest + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: build + concurrency: + group: cloud-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + env: + AWS_REGION: us-west-2 + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_REGION: westus2 + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - uses: actions/checkout@v4 + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'provider:cloud' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cloud-e2etest-logs + path: | + test/e2e/*.log + + provider-onprem-e2etest: + name: E2E On-Prem Providers + runs-on: self-hosted + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: build + concurrency: + group: onprem-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + env: + VSPHERE_USER: ${{ secrets.CI_VSPHERE_USER }} + VSPHERE_PASSWORD: ${{ secrets.CI_VSPHERE_PASSWORD }} + VSPHERE_SERVER: ${{ secrets.CI_VSPHERE_SERVER }} + VSPHERE_THUMBPRINT: ${{ secrets.CI_VSPHERE_THUMBPRINT }} + VSPHERE_DATACENTER: ${{ secrets.CI_VSPHERE_DATACENTER }} + VSPHERE_DATASTORE: ${{ secrets.CI_VSPHERE_DATASTORE }} + VSPHERE_RESOURCEPOOL: ${{ secrets.CI_VSPHERE_RESOURCEPOOL }} + VSPHERE_FOLDER: ${{ secrets.CI_VSPHERE_FOLDER }} + VSPHERE_CONTROL_PLANE_ENDPOINT: ${{ secrets.CI_VSPHERE_CONTROL_PLANE_ENDPOINT }} + VSPHERE_VM_TEMPLATE: ${{ secrets.CI_VSPHERE_VM_TEMPLATE }} + VSPHERE_NETWORK: ${{ secrets.CI_VSPHERE_NETWORK }} + VSPHERE_SSH_KEY: ${{ secrets.CI_VSPHERE_SSH_KEY }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'provider:onprem' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: onprem-e2etest-logs + path: | + test/e2e/*.log + + cleanup: + name: Cleanup + needs: + - build + - provider-cloud-e2etest + runs-on: ubuntu-latest + if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.build.result, 'success') }} + timeout-minutes: 15 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: AWS Test Resources + env: + AWS_REGION: us-west-2 + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_REGION: westus2 + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + CLUSTER_NAME: '${{ needs.build.outputs.clustername }}' + run: | + make dev-aws-nuke + make dev-azure-nuke diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 681c985b3..000000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,111 +0,0 @@ -name: E2E Tests - -on: - pull_request: - types: - - labeled - - opened - - synchronize - - reopened - branches: - - main - - release-* - paths-ignore: - - 'config/**' - - '**.md' -env: - GO_VERSION: '1.22' - AWS_REGION: us-west-2 - AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} - AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} - AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} - AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} - NAMESPACE: hmc-system - -jobs: - e2etest: - if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - concurrency: - group: test-e2e-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - name: E2E Tests - runs-on: ubuntu-latest - outputs: - clustername: ${{ steps.vars.outputs.clustername }} - version: ${{ steps.vars.outputs.version }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: Set up Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GHCR - uses: docker/login-action@v3.3.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Get outputs - id: vars - run: | - echo "version=$(git describe --tags --always)" >> $GITHUB_OUTPUT - echo "clustername=ci-$(date +%s)-e2e-test" >> $GITHUB_OUTPUT - - name: Build and push HMC controller image - uses: docker/build-push-action@v6 - with: - build-args: | - LD_FLAGS=-s -w -X github.com/Mirantis/hmc/internal/build.Version=${{ steps.vars.outputs.version }} - context: . - platforms: linux/amd64 - tags: | - ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }} - push: true - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Prepare and push HMC template charts - run: | - make hmc-chart-release - REGISTRY_REPO="oci://ghcr.io/mirantis/hmc/charts-ci" make helm-push - - name: Setup kubectl - uses: azure/setup-kubectl@v4 - - name: Run E2E tests - env: - MANAGED_CLUSTER_NAME: ${{ steps.vars.outputs.clustername }} - REGISTRY_REPO: 'oci://ghcr.io/mirantis/hmc/charts-ci' - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }}' - run: | - make test-e2e - - name: Archive test results - if: ${{ failure() }} - uses: actions/upload-artifact@v4 - with: - name: test-logs - path: | - test/e2e/*.log - cleanup: - name: Cleanup - needs: e2etest - runs-on: ubuntu-latest - if: ${{ always() && !contains(needs.*.result, 'skipped') }} - timeout-minutes: 15 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: AWS Test Resources - env: - CLUSTER_NAME: '${{ needs.e2etest.outputs.clustername }}' - run: | - make dev-aws-nuke diff --git a/.gitignore b/.gitignore index fe4586097..be8549cc9 100644 --- a/.gitignore +++ b/.gitignore @@ -15,7 +15,7 @@ go.work go.work.sum # cloud-nuke config -*cloud_nuke.yaml +*cloud-nuke.yaml # Test artifacts test/e2e/*.log diff --git a/Makefile b/Makefile index c914fafca..6025e5650 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,10 @@ test: generate-all fmt vet envtest tidy external-crd ## Run tests. # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. test-e2e: cli-install - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h + @if [ "$$GINKGO_LABEL_FILTER" ]; then \ + ginkgo_label_flag="-ginkgo.label-filter=$$GINKGO_LABEL_FILTER"; \ + fi; \ + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -ginkgo.timeout=3h -timeout=3h $$ginkgo_label_flag .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint @@ -204,8 +207,6 @@ REGISTRY_PORT ?= 5001 REGISTRY_REPO ?= oci://127.0.0.1:$(REGISTRY_PORT)/charts DEV_PROVIDER ?= aws REGISTRY_IS_OCI = $(shell echo $(REGISTRY_REPO) | grep -q oci && echo true || echo false) -CLUSTER_NAME ?= $(shell $(YQ) '.metadata.name' ./config/dev/deployment.yaml) - AWS_CREDENTIALS=${AWS_B64ENCODED_CREDENTIALS} ifndef ignore-not-found @@ -301,7 +302,7 @@ dev-push: docker-build helm-push .PHONY: dev-templates dev-templates: templates-generate - $(KUBECTL) -n $(NAMESPACE) apply -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/templates + $(KUBECTL) -n $(NAMESPACE) apply --force -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/templates .PHONY: dev-release dev-release: @@ -324,6 +325,9 @@ dev-eks-creds: dev-aws-creds .PHONY: dev-apply ## Apply the development environment by deploying the kind cluster, local registry and the HMC helm chart. dev-apply: kind-deploy registry-deploy dev-push dev-deploy dev-templates dev-release +.PHONY: test-apply +test-apply: set-hmc-version helm-package dev-deploy dev-templates dev-release + .PHONY: dev-destroy dev-destroy: kind-undeploy registry-undeploy ## Destroy the development environment by deleting the kind cluster and local registry. @@ -339,13 +343,23 @@ dev-mcluster-delete: envsubst dev-creds-apply: dev-$(DEV_PROVIDER)-creds .PHONY: dev-aws-nuke -dev-aws-nuke: envsubst awscli yq cloud-nuke ## Warning: Destructive! Nuke all AWS resources deployed by 'DEV_PROVIDER=aws dev-provider-apply', prefix with CLUSTER_NAME to nuke a specific cluster. +dev-aws-nuke: envsubst awscli yq cloud-nuke ## Warning: Destructive! Nuke all AWS resources deployed by 'DEV_PROVIDER=aws dev-mcluster-apply' @CLUSTER_NAME=$(CLUSTER_NAME) YQ=$(YQ) AWSCLI=$(AWSCLI) bash -c "./scripts/aws-nuke-ccm.sh elb" - @CLUSTER_NAME=$(CLUSTER_NAME) $(ENVSUBST) < config/dev/cloud_nuke.yaml.tpl > config/dev/cloud_nuke.yaml - DISABLE_TELEMETRY=true $(CLOUDNUKE) aws --region $$AWS_REGION --force --config config/dev/cloud_nuke.yaml --resource-type vpc,eip,nat-gateway,ec2,ec2-subnet,elb,elbv2,ebs,internet-gateway,network-interface,security-group - @rm config/dev/cloud_nuke.yaml + @CLUSTER_NAME=$(CLUSTER_NAME) $(ENVSUBST) < config/dev/aws-cloud-nuke.yaml.tpl > config/dev/aws-cloud-nuke.yaml + DISABLE_TELEMETRY=true $(CLOUDNUKE) aws --region $$AWS_REGION --force --config config/dev/aws-cloud-nuke.yaml --resource-type vpc,eip,nat-gateway,ec2,ec2-subnet,elb,elbv2,ebs,internet-gateway,network-interface,security-group + @rm config/dev/aws-cloud-nuke.yaml @CLUSTER_NAME=$(CLUSTER_NAME) YQ=$(YQ) AWSCLI=$(AWSCLI) bash -c "./scripts/aws-nuke-ccm.sh ebs" +.PHONY: dev-azure-nuke +dev-azure-nuke: envsubst azure-nuke ## Warning: Destructive! Nuke all Azure resources deployed by 'DEV_PROVIDER=azure dev-mcluster-apply' + @if [ "$(CLUSTER_NAME)" == "" ] || [ "$(AZURE_TENANT_ID)" == "" ] || [ "$(AZURE_REGION)" == "" ]; then \ + echo "CLUSTER_NAME, AZURE_TENANT_ID and AZURE_REGION must be set"; \ + exit 1; \ + fi + @CLUSTER_NAME=$(CLUSTER_NAME) $(ENVSUBST) < config/dev/azure-cloud-nuke.yaml.tpl > config/dev/azure-cloud-nuke.yaml + $(AZURENUKE) run --config config/dev/azure-cloud-nuke.yaml --force --no-dry-run + @rm config/dev/azure-cloud-nuke.yaml + .PHONY: cli-install cli-install: clusterawsadm clusterctl cloud-nuke envsubst yq awscli ## Install the necessary CLI tools for deployment, development and testing. @@ -377,6 +391,7 @@ YQ ?= $(LOCALBIN)/yq-$(YQ_VERSION) CLUSTERAWSADM ?= $(LOCALBIN)/clusterawsadm CLUSTERCTL ?= $(LOCALBIN)/clusterctl CLOUDNUKE ?= $(LOCALBIN)/cloud-nuke +AZURENUKE ?= $(LOCALBIN)/azure-nuke ADDLICENSE ?= $(LOCALBIN)/addlicense-$(ADDLICENSE_VERSION) ENVSUBST ?= $(LOCALBIN)/envsubst-$(ENVSUBST_VERSION) AWSCLI ?= $(LOCALBIN)/aws @@ -389,6 +404,7 @@ HELM_VERSION ?= v3.15.1 KIND_VERSION ?= v0.23.0 YQ_VERSION ?= v4.44.2 CLOUDNUKE_VERSION = v0.37.1 +AZURENUKE_VERSION = v1.1.0 CLUSTERAWSADM_VERSION ?= v2.5.2 CLUSTERCTL_VERSION ?= v1.7.3 ADDLICENSE_VERSION ?= v1.1.1 @@ -448,6 +464,12 @@ $(CLOUDNUKE): | $(LOCALBIN) curl -sL https://github.com/gruntwork-io/cloud-nuke/releases/download/$(CLOUDNUKE_VERSION)/cloud-nuke_$(OS)_$(ARCH) -o $(CLOUDNUKE) chmod +x $(CLOUDNUKE) +.PHONY: azure-nuke +azure-nuke: $(AZURENUKE) ## Download azure-nuke locally if necessary. +$(AZURENUKE): | $(LOCALBIN) + curl -sL https://github.com/ekristen/azure-nuke/releases/download/$(AZURENUKE_VERSION)/azure-nuke-$(AZURENUKE_VERSION)-$(OS)-$(ARCH).tar.gz -o /tmp/azure-nuke.tar.gz + tar xvf /tmp/azure-nuke.tar.gz -C $(LOCALBIN) azure-nuke + .PHONY: clusterawsadm clusterawsadm: $(CLUSTERAWSADM) ## Download clusterawsadm locally if necessary. $(CLUSTERAWSADM): | $(LOCALBIN) @@ -474,7 +496,7 @@ awscli: $(AWSCLI) $(AWSCLI): | $(LOCALBIN) @if [ $(OS) == "linux" ]; then \ curl "https://awscli.amazonaws.com/awscli-exe-linux-$(shell uname -m)-$(AWSCLI_VERSION).zip" -o "/tmp/awscliv2.zip"; \ - unzip -qq /tmp/awscliv2.zip -d /tmp; \ + unzip -oqq /tmp/awscliv2.zip -d /tmp; \ /tmp/aws/install -i $(LOCALBIN)/aws-cli -b $(LOCALBIN) --update; \ fi; \ if [ $(OS) == "darwin" ]; then \ diff --git a/config/dev/cloud_nuke.yaml.tpl b/config/dev/aws-cloud-nuke.yaml.tpl similarity index 96% rename from config/dev/cloud_nuke.yaml.tpl rename to config/dev/aws-cloud-nuke.yaml.tpl index 1888fe965..378bf20c5 100644 --- a/config/dev/cloud_nuke.yaml.tpl +++ b/config/dev/aws-cloud-nuke.yaml.tpl @@ -1,14 +1,13 @@ # This config file is used by cloud-nuke to clean up named resources associated # with a specific managed cluster across an AWS account. CLUSTER_NAME is -# typically the metadata.name of the Deployment. +# typically the metadata.name of the ManagedCluster. # The resources listed here are ALL of the potential resources that can be # filtered by cloud-nuke, except for IAM resources since we'll never touch those. # See: https://github.com/gruntwork-io/cloud-nuke?tab=readme-ov-file#whats-supported # # Usage: -# - 'make aws-dev-nuke' will nuke resources affiliated with config/dev/deployment.yaml -# - 'CLUSTER_NAME=foo make aws-dev-nuke' will nuke resources affiliated with an AWS cluster named 'foo' -# Check cluster names with 'kubectl get clusters -n hmc-system' +# - 'CLUSTER_NAME=foo make dev-aws-nuke' will nuke resources affiliated with an AWS cluster named 'foo' +# Check cluster names with 'kubectl get managedcluster.hmc.mirantis.com -n hmc-system' ACM: include: diff --git a/config/dev/azure-cloud-nuke.yaml.tpl b/config/dev/azure-cloud-nuke.yaml.tpl new file mode 100644 index 000000000..be98b051f --- /dev/null +++ b/config/dev/azure-cloud-nuke.yaml.tpl @@ -0,0 +1,26 @@ +# This config file is used by azure-nuke to clean up named resources associated +# with a specific managed cluster across an Azure account. CLUSTER_NAME is +# typically the metadata.name of the ManagedCluster. +# This will nuke the ResourceGroup affiliated with the ManagedCluster. +# +# Usage: +# 'CLUSTER_NAME=foo AZURE_REGION=westus3 AZURE_TENANT_ID=12345 make dev-azure-nuke' +# +# Check cluster names with 'kubectl get managedcluster.hmc.mirantis.com -n hmc-system' + +regions: + - global + - ${AZURE_REGION} + +resource-types: + includes: + - ResourceGroup + +accounts: + ${AZURE_TENANT_ID}: + filters: + __global__: + - ResourceGroup: + type: "glob" + value: "${CLUSTER_NAME}*" + invert: true diff --git a/config/dev/vsphere-managedcluster.yaml b/config/dev/vsphere-managedcluster.yaml index dbb7eb0bf..c9daaf311 100644 --- a/config/dev/vsphere-managedcluster.yaml +++ b/config/dev/vsphere-managedcluster.yaml @@ -13,7 +13,7 @@ spec: name: vsphere-cluster-identity vsphere: server: ${VSPHERE_SERVER} - thumbprint: ${VSPHERE_THUMBPRINT} + thumbprint: ${VSPHERE_THUMBPRINT} datacenter: ${VSPHERE_DATACENTER} datastore: ${VSPHERE_DATASTORE} resourcePool: ${VSPHERE_RESOURCEPOOL} @@ -21,7 +21,7 @@ spec: username: ${VSPHERE_USER} password: ${VSPHERE_PASSWORD} controlPlaneEndpointIP: ${VSPHERE_CONTROL_PLANE_ENDPOINT} - + controlPlane: ssh: user: ubuntu diff --git a/docs/dev.md b/docs/dev.md index 395fc2e6e..108ec81d4 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -144,6 +144,26 @@ tests that run in CI use names such as `ci-1234567890-e2e-test`. You can always pass `MANAGED_CLUSTER_NAME=` from the get-go to customize the name used by the test. +### Filtering test runs +Provider tests are broken into two types, `onprem` and `cloud`. For CI, +`provider:onprem` tests run on self-hosted runners provided by Mirantis. +`provider:cloud` tests run on GitHub actions runners and interact with cloud +infrastructure providers such as AWS or Azure. + +Each specific provider test also has a label, for example, `provider:aws` can be +used to run only AWS tests. To utilize these filters with the `make test-e2e` +target pass the `GINKGO_LABEL_FILTER` env var, for example: + +``` +GINKGO_LABEL_FILTER="provider:cloud" make test-e2e +``` + +would run all cloud provider tests. To see a list of all available labels run: + +``` +ginkgo labels ./test/e2e +``` + ### Nuke created resources In CI we run `make dev-aws-nuke` to cleanup test resources, you can do so manually with: diff --git a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml index 038fb1a93..708ce8094 100644 --- a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml +++ b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml @@ -114,8 +114,10 @@ spec: images: driver: tag: v3.1.2 + repo: "registry.k8s.io/csi-vsphere/driver" syncer: tag: v3.1.2 + repo: "registry.k8s.io/csi-vsphere/syncer" machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/test/e2e/controller.go b/test/e2e/controller.go deleted file mode 100644 index 8b8bdf244..000000000 --- a/test/e2e/controller.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "strings" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - hmcControllerLabel = "app.kubernetes.io/name=hmc" -) - -// verifyControllersUp validates that controllers for the given providers list -// are running and ready. Optionally specify providers to check for rather than -// waiting for all providers to be ready. -func verifyControllersUp(kc *kubeclient.KubeClient, providers ...managedcluster.ProviderType) error { - if err := validateController(kc, hmcControllerLabel, "hmc-controller-manager"); err != nil { - return err - } - - if providers == nil { - providers = []managedcluster.ProviderType{ - managedcluster.ProviderCAPI, - managedcluster.ProviderAWS, - managedcluster.ProviderAzure, - } - } - - for _, provider := range providers { - // Ensure only one controller pod is running. - if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { - return err - } - } - - return nil -} - -func validateController(kc *kubeclient.KubeClient, labelSelector string, name string) error { - deployList, err := kc.Client.AppsV1().Deployments(kc.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - return fmt.Errorf("failed to list %s controller deployments: %w", name, err) - } - - if len(deployList.Items) < 1 { - return fmt.Errorf("expected at least 1 %s controller deployment, got %d", - name, len(deployList.Items)) - } - - deployment := deployList.Items[0] - - // Ensure the deployment is not being deleted. - if deployment.DeletionTimestamp != nil { - return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", - deployment.Name, deployment.DeletionTimestamp) - } - // Ensure the deployment is running and has the expected name. - if !strings.Contains(deployment.Name, "controller-manager") { - return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) - } - if deployment.Status.ReadyReplicas < 1 { - return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) - } - - return nil -} diff --git a/test/e2e/controller_test.go b/test/e2e/controller_test.go new file mode 100644 index 000000000..7b3897a0b --- /dev/null +++ b/test/e2e/controller_test.go @@ -0,0 +1,29 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Controller", Label("controller"), Ordered, func() { + // Right now we have no Controller specific tests, but our Before/AfterSuite + // deploys and validates the controller, so we need a dummy test here to + // ensure that Before/AfterSuite are ran. With this we can make sure we + // atleast smoke test the controller outside of the larger provider e2e + // tests. When the controller actually has more specific tests this should + // be removed. + It("dummy It so that BeforeSuite/AfterSuite are ran", func() {}) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index bccb11223..eef3a1445 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -15,11 +15,26 @@ package e2e import ( + "bufio" + "context" "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" "testing" + "time" + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" ) // Run e2e tests using the Ginkgo runner. @@ -28,3 +43,188 @@ func TestE2E(t *testing.T) { _, _ = fmt.Fprintf(GinkgoWriter, "Starting hmc suite\n") RunSpecs(t, "e2e suite") } + +var _ = BeforeSuite(func() { + GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) + + By("building and deploying the controller-manager") + cmd := exec.Command("make", "kind-deploy") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + cmd = exec.Command("make", "test-apply") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + By("validating that the hmc-controller and CAPI provider controllers are running and ready") + kc := kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + Eventually(func() error { + err = verifyControllersUp(kc) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) +}) + +var _ = AfterSuite(func() { + if !noCleanup() { + By("collecting logs from local controllers") + kc := kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + collectLogArtifacts(kc, "") + + By("removing the controller-manager") + cmd := exec.Command("make", "dev-destroy") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + } +}) + +// verifyControllersUp validates that controllers for all providers are running +// and ready. +func verifyControllersUp(kc *kubeclient.KubeClient) error { + if err := validateController(kc, utils.HMCControllerLabel, "hmc-controller-manager"); err != nil { + return err + } + + providers := []managedcluster.ProviderType{ + managedcluster.ProviderCAPI, + managedcluster.ProviderAWS, + managedcluster.ProviderAzure, + managedcluster.ProviderVSphere, + } + + for _, provider := range providers { + // Ensure only one controller pod is running. + if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { + return err + } + } + + return nil +} + +func validateController(kc *kubeclient.KubeClient, labelSelector string, name string) error { + deployList, err := kc.Client.AppsV1().Deployments(kc.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + Limit: 1, + }) + if err != nil { + return fmt.Errorf("failed to list %s controller deployments: %w", name, err) + } + + if len(deployList.Items) < 1 { + return fmt.Errorf("expected at least 1 %s controller deployment", name) + } + + deployment := deployList.Items[0] + + // Ensure the deployment is not being deleted. + if deployment.DeletionTimestamp != nil { + return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", + deployment.Name, deployment.DeletionTimestamp) + } + // Ensure the deployment is running and has the expected name. + if !strings.Contains(deployment.Name, "controller-manager") { + return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) + } + if deployment.Status.ReadyReplicas < 1 { + return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) + } + + return nil +} + +// templateBy wraps a Ginkgo By with a block describing the template being +// tested. +func templateBy(t managedcluster.Template, description string) { + GinkgoHelper() + By(fmt.Sprintf("[%s] %s", t, description)) +} + +// collectLogArtifacts collects log output from each the HMC controller, +// CAPI controller and the provider controller(s) as well as output from clusterctl +// and stores them in the test/e2e directory as artifacts. clusterName can be +// optionally provided, passing an empty string will prevent clusterctl output +// from being fetched. If collectLogArtifacts fails it produces a warning +// message to the GinkgoWriter, but does not fail the test. +func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { + GinkgoHelper() + + filterLabels := []string{utils.HMCControllerLabel} + + var host string + hostURL, err := url.Parse(kc.Config.Host) + if err != nil { + utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) + } else { + host = strings.ReplaceAll(hostURL.Host, ":", "_") + } + + if providerTypes == nil { + filterLabels = managedcluster.FilterAllProviders() + } else { + for _, providerType := range providerTypes { + filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) + } + } + + for _, label := range filterLabels { + pods, _ := kc.Client.CoreV1().Pods(kc.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: label, + }) + + for _, pod := range pods.Items { + req := kc.Client.CoreV1().Pods(kc.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + TailLines: ptr.To(int64(1000)), + }) + podLogs, err := req.Stream(context.Background()) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) + continue + } + + output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) + if err != nil { + utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) + continue + } + + r := bufio.NewReader(podLogs) + _, err = r.WriteTo(output) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) + } + + if err = podLogs.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log stream for pod %s: %w", pod.Name, err)) + } + if err = output.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log file for pod %s: %w", pod.Name, err)) + } + } + } + + if clusterName != "" { + cmd := exec.Command("./bin/clusterctl", + "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") + output, err := utils.Run(cmd) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) + return + } + err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) + } + } +} + +func noCleanup() bool { + noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) + if noCleanup != "" { + By(fmt.Sprintf("skipping After node as %s is set", managedcluster.EnvVarNoCleanup)) + } + + return noCleanup != "" +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go deleted file mode 100644 index 87b4ca584..000000000 --- a/test/e2e/e2e_test.go +++ /dev/null @@ -1,578 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "bufio" - "context" - "fmt" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - "github.com/Mirantis/hmc/test/managedcluster/aws" - "github.com/Mirantis/hmc/test/managedcluster/azure" - "github.com/Mirantis/hmc/test/managedcluster/vsphere" - "github.com/Mirantis/hmc/test/utils" -) - -const ( - namespace = "hmc-system" -) - -var _ = Describe("controller", Ordered, func() { - BeforeAll(func() { - By("building and deploying the controller-manager") - cmd := exec.Command("make", "dev-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterAll(func() { - if !noCleanup() { - By("removing the controller-manager") - cmd := exec.Command("make", "dev-destroy") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - } - }) - - Context("Operator", func() { - It("should run successfully", func() { - kc := kubeclient.NewFromLocal(namespace) - - By("validating that the hmc-controller and capi provider controllers are running") - Eventually(func() error { - err := verifyControllersUp(kc) - if err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - // aws.CreateCredentialSecret(context.Background(), kc) - }) - }) - - Describe("AWS Templates", func() { - var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - clusterName string - ) - - BeforeAll(func() { - By("ensuring AWS credentials are set") - kc = kubeclient.NewFromLocal(namespace) - // aws.CreateCredentialSecret(context.Background(), kc) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() && !noCleanup() { - By("collecting failure logs from controllers") - if kc != nil { - collectLogArtifacts(kc, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) - } - if standaloneClient != nil { - collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) - } - - By("deleting resources after failure") - for _, deleteFunc := range []func() error{ - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { - err := deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - } - } - }) - - It("should work with an AWS provider", func() { - // Deploy a standalone cluster and verify it is running/ready. - // Deploy standalone with an xlarge instance since it will also be - // hosting the hosted cluster. - GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") - - templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) - clusterName = sd.GetName() - - standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) - - templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") - - // Download the KUBECONFIG for the standalone cluster and load it - // so we can call Make targets against this cluster. - // TODO: Ideally we shouldn't use Make here and should just convert - // these Make targets into Go code, but this will require a - // helmclient. - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) - - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("make", "dev-deploy") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - cmd = exec.Command("make", "dev-templates") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd = exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - - // Ensure AWS credentials are set in the standalone cluster. - standaloneClient = kc.NewFromCluster(context.Background(), namespace, clusterName) - // aws.CreateCredentialSecret(context.Background(), standaloneClient) - - templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") - Eventually(func() error { - err := verifyControllersUp(standaloneClient, managedcluster.ProviderCAPI, managedcluster.ProviderAWS) - if err != nil { - _, _ = fmt.Fprintf( - GinkgoWriter, "[%s] controller validation failed: %v\n", - string(managedcluster.TemplateAWSHostedCP), err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Populate the environment variables required for the hosted - // cluster. - aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) - - templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) - hdName := hd.GetName() - - // Deploy the hosted cluster on top of the standalone cluster. - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - // Patch the AWSCluster resource as Ready, see: - // https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider - // Use Eventually as the AWSCluster might not be available - // immediately. - templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") - Eventually(func() error { - if err := aws.PatchAWSClusterReady(context.Background(), standaloneClient, hd.GetName()); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "failed to patch AWSCluster to ready: %v, retrying...\n", err) - return err - } - _, _ = fmt.Fprintf(GinkgoWriter, "Patch succeeded\n") - return nil - }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) - - // Verify the hosted cluster is running/ready. - templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Delete the hosted ManagedCluster and verify it is removed. - templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") - err = hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Now delete the standalone ManagedCluster and verify it is - // removed, it is deleted last since it is the basis for the hosted - // cluster. - /* - FIXME(#339): This is currently disabled as the deletion of the - standalone cluster is failing due to outstanding issues. - templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") - err = standaloneDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deletionValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, - managedcluster.ValidationActionDelete, - ) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * - time.Second).Should(Succeed()) - */ - }) - }) - - Context("vSphere templates", func() { - var ( - kc *kubeclient.KubeClient - deleteFunc func() error - clusterName string - err error - ) - - BeforeAll(func() { - // Set here to skip CI runs for now - _, testVsphere := os.LookupEnv("TEST_VSPHERE") - if !testVsphere { - Skip("Skipping vSphere tests") - } - - By("ensuring that env vars are set correctly") - vsphere.CheckEnv() - By("creating kube client") - kc := kubeclient.NewFromLocal(namespace) - By("providing cluster identity") - credSecretName := "vsphere-cluster-identity-secret-e2e" - clusterIdentityName := "vsphere-cluster-identity-e2e" - Expect(vsphere.CreateSecret(kc, credSecretName)).Should(Succeed()) - Expect(vsphere.CreateClusterIdentity(kc, credSecretName, clusterIdentityName)).Should(Succeed()) - By("setting VSPHERE_CLUSTER_IDENTITY env variable") - Expect(os.Setenv("VSPHERE_CLUSTER_IDENTITY", clusterIdentityName)).Should(Succeed()) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() { - By("collecting failure logs from controllers") - collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) - } - - if deleteFunc != nil { - By("deleting the deployment") - err = deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - }) - - It("should deploy standalone managed cluster", func() { - By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) - clusterName = d.GetName() - - deleteFunc := kc.CreateManagedCluster(context.Background(), d) - - By("waiting for infrastructure providers to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDelete, - ) - By("verify the deployment deletes successfully") - err = deleteFunc() - Expect(err).NotTo(HaveOccurred()) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - }) - }) - - Describe("Azure Templates", Label("provider"), func() { - var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - sdName string - ) - - BeforeAll(func() { - By("ensuring Azure credentials are set") - kc = kubeclient.NewFromLocal(namespace) - azure.CreateCredentialSecret(context.Background(), kc) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() && !noCleanup() { - By("collecting failure logs from controllers") - if kc != nil { - collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) - } - if standaloneClient != nil { - collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) - } - - By("deleting resources after failure") - for _, deleteFunc := range []func() error{ - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { - err := deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - } - } - }) - - It("should work with an Azure provider", func() { - templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) - sdName = sd.GetName() - - standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) - - // verify the standalone cluster is deployed correctly - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - sdName, - managedcluster.ValidationActionDeploy, - ) - - templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // setup environment variables for deploying the hosted template (subnet name, etc) - azure.SetAzureEnvironmentVariables(sdName, kc) - - hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) - hdName := hd.GetName() - - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) - - By("Deploy onto standalone cluster") - deployOnAzureCluster(kubeCfgPath) - - templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") - standaloneClient = kc.NewFromCluster(context.Background(), namespace, sdName) - // verify the cluster is ready prior to creating credentials - Eventually(func() error { - err := verifyControllersUp(standaloneClient, managedcluster.ProviderAzure) - if err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("Create azure credential secret") - azure.CreateCredentialSecret(context.Background(), standaloneClient) - - templateBy(managedcluster.TemplateAzureHostedCP, - fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureHostedCP)) - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("verify the deployment deletes successfully") - err := hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - err = standaloneDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - }) - }) -}) - -func deployOnAzureCluster(kubeCfgPath string) { - GinkgoT().Helper() - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("kubectl", "create", "-f", - "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ - "storageclass-azuredisk-csi.yaml") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", - "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("make", "dev-deploy") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("make", "dev-templates") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) -} - -// templateBy wraps a Ginkgo By with a block describing the template being -// tested. -func templateBy(t managedcluster.Template, description string) { - GinkgoHelper() - By(fmt.Sprintf("[%s] %s", t, description)) -} - -// collectLogArtfiacts collects log output from each the HMC controller, -// CAPI controller and the provider controller(s) as well as output from clusterctl -// and stores them in the test/e2e directory as artifacts. If it fails it -// produces a warning message to the GinkgoWriter, but does not fail the test. -func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { - GinkgoHelper() - - filterLabels := []string{hmcControllerLabel} - - var host string - hostURL, err := url.Parse(kc.Config.Host) - if err != nil { - utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) - } else { - host = strings.ReplaceAll(hostURL.Host, ":", "_") - } - - for _, providerType := range providerTypes { - filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) - } - - for _, label := range filterLabels { - pods, _ := kc.Client.CoreV1().Pods(kc.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: label, - }) - - for _, pod := range pods.Items { - req := kc.Client.CoreV1().Pods(kc.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - TailLines: ptr.To(int64(1000)), - }) - podLogs, err := req.Stream(context.Background()) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) - continue - } - - output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) - if err != nil { - utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) - _ = podLogs.Close() - continue - } - - r := bufio.NewReader(podLogs) - if _, err := r.WriteTo(output); err != nil { - utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) - } - - _ = podLogs.Close() - _ = output.Close() - } - } - - cmd := exec.Command("./bin/clusterctl", - "describe", "cluster", clusterName, "--namespace", namespace, "--show-conditions=all") - output, err := utils.Run(cmd) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) - return - } - - err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) - if err != nil { - utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) - } -} - -func noCleanup() bool { - noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) - if noCleanup != "" { - By(fmt.Sprintf("skipping After nodes as %s is set", managedcluster.EnvVarNoCleanup)) - } - - return noCleanup != "" -} diff --git a/test/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go similarity index 85% rename from test/kubeclient/kubeclient.go rename to test/e2e/kubeclient/kubeclient.go index 34edc413c..c5b8fe811 100644 --- a/test/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -16,10 +16,12 @@ package kubeclient import ( "context" + "errors" "fmt" "os" "path/filepath" + "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -74,7 +76,13 @@ func (kc *KubeClient) WriteKubeconfig(ctx context.Context, clusterName string) ( To(Succeed()) deleteFunc := func() error { - return os.Remove(filepath.Join(dir, path)) + if err = os.Remove(filepath.Join(dir, path)); err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + return nil } return path, deleteFunc @@ -135,15 +143,41 @@ func newKubeClient(configBytes []byte, namespace string) *KubeClient { } // GetDynamicClient returns a dynamic client for the given GroupVersionResource. -func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource) dynamic.ResourceInterface { +// +//nolint:revive +func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource, namespaced bool) dynamic.ResourceInterface { GinkgoHelper() client, err := dynamic.NewForConfig(kc.Config) - Expect(err).NotTo(HaveOccurred(), "failed to create dynamic client") + Expect(err).NotTo(HaveOccurred(), "failed to create dynamic client for resource: %s", gvr.String()) + + if !namespaced { + return client.Resource(gvr) + } return client.Resource(gvr).Namespace(kc.Namespace) } +func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionResource, obj *unstructured.Unstructured, namespaced bool) { + GinkgoHelper() + + client := kc.GetDynamicClient(gvr, namespaced) + + kind, name := utils.ObjKindName(obj) + + resp, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = client.Create(context.Background(), obj, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to create %s: %s", kind, name) + } else { + Expect(err).NotTo(HaveOccurred(), "failed to get existing %s: %s", kind, name) + + obj.SetResourceVersion(resp.GetResourceVersion()) + _, err = client.Update(context.Background(), obj, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to update existing %s: %s", kind, name) + } +} + // CreateManagedCluster creates a managedcluster.hmc.mirantis.com in the given // namespace and returns a DeleteFunc to clean up the deployment. // The DeleteFunc is a no-op if the deployment has already been deleted. @@ -159,7 +193,7 @@ func (kc *KubeClient) CreateManagedCluster( Group: "hmc.mirantis.com", Version: "v1alpha1", Resource: "managedclusters", - }) + }, true) _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { @@ -183,7 +217,7 @@ func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unst Resource: "clusters", } - client := kc.GetDynamicClient(gvr) + client := kc.GetDynamicClient(gvr, true) cluster, err := client.Get(ctx, clusterName, metav1.GetOptions{}) if err != nil { @@ -198,7 +232,7 @@ func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unst func (kc *KubeClient) listResource( ctx context.Context, gvr schema.GroupVersionResource, clusterName string, ) ([]unstructured.Unstructured, error) { - client := kc.GetDynamicClient(gvr) + client := kc.GetDynamicClient(gvr, true) resources, err := client.List(ctx, metav1.ListOptions{ LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, diff --git a/test/e2e/managedcluster/aws/aws.go b/test/e2e/managedcluster/aws/aws.go new file mode 100644 index 000000000..441cfc499 --- /dev/null +++ b/test/e2e/managedcluster/aws/aws.go @@ -0,0 +1,73 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package aws contains specific helpers for testing a managed cluster +// that uses the AWS infrastructure provider. +package aws + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" +) + +// PopulateHostedTemplateVars populates the environment variables required for +// the AWS hosted CP template by querying the standalone CP cluster with the +// given kubeclient. +func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) { + GinkgoHelper() + + c := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta2", + Resource: "awsclusters", + }, true) + + awsCluster, err := c.Get(ctx, clusterName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster") + + vpcID, found, err := unstructured.NestedString(awsCluster.Object, "spec", "network", "vpc", "id") + Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster VPC ID") + Expect(found).To(BeTrue(), "AWS cluster has no VPC ID") + + subnets, found, err := unstructured.NestedSlice(awsCluster.Object, "spec", "network", "subnets") + Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster subnets") + Expect(found).To(BeTrue(), "AWS cluster has no subnets") + + subnet, ok := subnets[0].(map[string]any) + Expect(ok).To(BeTrue(), "failed to cast subnet to map") + + subnetID, ok := subnet["resourceID"].(string) + Expect(ok).To(BeTrue(), "failed to cast subnet ID to string") + + subnetAZ, ok := subnet["availabilityZone"].(string) + Expect(ok).To(BeTrue(), "failed to cast subnet availability zone to string") + + securityGroupID, found, err := unstructured.NestedString( + awsCluster.Object, "status", "networkStatus", "securityGroups", "node", "id") + Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster security group ID") + Expect(found).To(BeTrue(), "AWS cluster has no security group ID") + + GinkgoT().Setenv(managedcluster.EnvVarAWSVPCID, vpcID) + GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetID, subnetID) + GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetAvailabilityZone, subnetAZ) + GinkgoT().Setenv(managedcluster.EnvVarAWSSecurityGroupID, securityGroupID) +} diff --git a/test/managedcluster/azure/azure.go b/test/e2e/managedcluster/azure/azure.go similarity index 59% rename from test/managedcluster/azure/azure.go rename to test/e2e/managedcluster/azure/azure.go index fcf262080..2880badcf 100644 --- a/test/managedcluster/azure/azure.go +++ b/test/e2e/managedcluster/azure/azure.go @@ -15,29 +15,22 @@ package azure import ( - "bufio" - "bytes" "context" - "errors" "fmt" - "io" - "os" - "github.com/a8m/envsubst" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" - yamlutil "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/discovery" - "k8s.io/client-go/restmapper" + "k8s.io/utils/ptr" hmc "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" ) func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) map[string]any { @@ -48,7 +41,7 @@ func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) m Resource: "azureclusters", } - dc := kc.GetDynamicClient(resourceID) + dc := kc.GetDynamicClient(resourceID, true) list, err := dc.List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), }) @@ -101,52 +94,38 @@ func SetAzureEnvironmentVariables(clusterName string, kc *kubeclient.KubeClient) GinkgoT().Setenv("AZURE_ROUTE_TABLE", fmt.Sprintf("%s", routeTableName)) } -func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { +// CreateDefaultStorageClass configures the default storage class for Azure +// based on the azure-disk CSI driver that we deploy as part of our templates. +func CreateDefaultStorageClass(kc *kubeclient.KubeClient) { GinkgoHelper() - serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) - yamlFile, err := os.ReadFile("config/dev/azure-credentials.yaml") - Expect(err).NotTo(HaveOccurred()) - - yamlFile, err = envsubst.Bytes(yamlFile) - Expect(err).NotTo(HaveOccurred()) - - c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) - groupResources, err := restmapper.GetAPIGroupResources(c) - Expect(err).NotTo(HaveOccurred()) - - yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) - for { - yamlDoc, err := yamlReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") - } - - credentialResource := &unstructured.Unstructured{} - _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) - Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") - - mapper := restmapper.NewDiscoveryRESTMapper(groupResources) - mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) - Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") - dc := kc.GetDynamicClient(schema.GroupVersionResource{ - Group: credentialResource.GroupVersionKind().Group, - Version: credentialResource.GroupVersionKind().Version, - Resource: mapping.Resource.Resource, - }) - - exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") - } + ctx := context.Background() + + azureDiskSC := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "azure-disk", + Annotations: map[string]string{ + "storageclass.kubernetes.io/is-default-class": "true", + }, + }, + Provisioner: "disk.csi.azure.com", + ReclaimPolicy: ptr.To(corev1.PersistentVolumeReclaimDelete), + VolumeBindingMode: ptr.To(storagev1.VolumeBindingWaitForFirstConsumer), + AllowVolumeExpansion: ptr.To(true), + Parameters: map[string]string{ + "skuName": "StandardSSD_LRS", + }, + } - if exists == nil { - if _, createErr := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { - Expect(createErr).NotTo(HaveOccurred(), "failed to create azure credential secret") - } + sc, err := kc.Client.StorageV1().StorageClasses().Get(ctx, "azure-disk", metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + _, err := kc.Client.StorageV1().StorageClasses().Create(ctx, azureDiskSC, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) } + } else { + azureDiskSC.SetResourceVersion(sc.GetResourceVersion()) + _, err = kc.Client.StorageV1().StorageClasses().Update(ctx, azureDiskSC, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) } } diff --git a/test/e2e/managedcluster/clusteridentity/clusteridentity.go b/test/e2e/managedcluster/clusteridentity/clusteridentity.go new file mode 100644 index 000000000..91e69ea54 --- /dev/null +++ b/test/e2e/managedcluster/clusteridentity/clusteridentity.go @@ -0,0 +1,253 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clusteridentity + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ClusterIdentity struct { + GroupVersionResource schema.GroupVersionResource + Kind string + SecretName string + IdentityName string + SecretData map[string]string + Spec map[string]any + Namespaced bool +} + +// New creates a ClusterIdentity resource, credential and associated secret for +// the given provider using the provided KubeClient and returns details about +// the created ClusterIdentity. +func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *ClusterIdentity { + GinkgoHelper() + + var ( + resource string + kind string + version string + secretStringData map[string]string + spec map[string]any + namespaced bool + ) + + secretName := fmt.Sprintf("%s-cluster-identity-secret", provider) + identityName := fmt.Sprintf("%s-cluster-identity", provider) + + switch provider { + case managedcluster.ProviderAWS: + resource = "awsclusterstaticidentities" + kind = "AWSClusterStaticIdentity" + version = "v1beta2" + secretStringData = map[string]string{ + "AccessKeyID": os.Getenv(managedcluster.EnvVarAWSAccessKeyID), + "SecretAccessKey": os.Getenv(managedcluster.EnvVarAWSSecretAccessKey), + } + spec = map[string]any{ + "secretRef": secretName, + "allowedNamespaces": map[string]any{ + "selector": map[string]any{ + "matchLabels": map[string]any{}, + }, + }, + } + case managedcluster.ProviderAzure: + resource = "azureclusteridentities" + kind = "AzureClusterIdentity" + version = "v1beta1" + secretStringData = map[string]string{ + "clientSecret": os.Getenv(managedcluster.EnvVarAzureClientSecret), + } + spec = map[string]any{ + "allowedNamespaces": map[string]any{}, + "clientID": os.Getenv(managedcluster.EnvVarAzureClientID), + "clientSecret": map[string]any{ + "name": secretName, + "namespace": kc.Namespace, + }, + "tenantID": os.Getenv(managedcluster.EnvVarAzureTenantID), + "type": "ServicePrincipal", + } + namespaced = true + case managedcluster.ProviderVSphere: + resource = "vsphereclusteridentities" + kind = "VSphereClusterIdentity" + version = "v1beta1" + secretStringData = map[string]string{ + "username": os.Getenv(managedcluster.EnvVarVSphereUser), + "password": os.Getenv(managedcluster.EnvVarVSpherePassword), + } + spec = map[string]any{ + "secretName": secretName, + "allowedNamespaces": map[string]any{ + "selector": map[string]any{ + "matchLabels": map[string]any{}, + }, + }, + } + default: + Fail(fmt.Sprintf("Unsupported provider: %s", provider)) + } + + ci := ClusterIdentity{ + GroupVersionResource: schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: version, + Resource: resource, + }, + Kind: kind, + SecretName: secretName, + IdentityName: identityName, + SecretData: secretStringData, + Spec: spec, + Namespaced: namespaced, + } + + validateSecretDataPopulated(secretStringData) + ci.waitForResourceCRD(kc) + ci.createSecret(kc) + ci.createClusterIdentity(kc) + ci.createCredential(kc) + + return &ci +} + +func validateSecretDataPopulated(secretData map[string]string) { + for key, value := range secretData { + Expect(value).ToNot(BeEmpty(), fmt.Sprintf("Secret data key %s should not be empty", key)) + } +} + +// waitForResourceCRD ensures the CRD for the given resource is present by +// trying to list the resources of the given type until it succeeds. +func (ci *ClusterIdentity) waitForResourceCRD(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("waiting for %s CRD to be present", ci.Kind)) + + ctx := context.Background() + + Eventually(func() error { + crds, err := kc.ExtendedClient.ApiextensionsV1().CustomResourceDefinitions().List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list CRDs: %w", err) + } + + for _, crd := range crds.Items { + if crd.Spec.Names.Kind == ci.Kind { + return nil + } + } + + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to find CRD, retrying...\n") + return fmt.Errorf("failed to find CRD for resource: %s", ci.GroupVersionResource.String()) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) +} + +// createSecret creates a secret affiliated with a ClusterIdentity. +func (ci *ClusterIdentity) createSecret(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("creating ClusterIdentity secret: %s", ci.SecretName)) + + ctx := context.Background() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ci.SecretName, + Namespace: kc.Namespace, + }, + StringData: ci.SecretData, + Type: corev1.SecretTypeOpaque, + } + + _, err := kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + resp, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, ci.SecretName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to get existing secret") + + secret.SetResourceVersion(resp.GetResourceVersion()) + _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to update existing secret") + } else { + Expect(err).NotTo(HaveOccurred(), "failed to create secret") + } +} + +func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient) { + GinkgoHelper() + + credName := fmt.Sprintf("%s-cred", ci.IdentityName) + By(fmt.Sprintf("creating Credential: %s", credName)) + + cred := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "hmc.mirantis.com/v1alpha1", + "kind": "Credential", + "metadata": map[string]any{ + "name": credName, + "namespace": kc.Namespace, + }, + "spec": map[string]any{ + "identityRef": map[string]any{ + "apiVersion": ci.GroupVersionResource.Group + "/" + ci.GroupVersionResource.Version, + "kind": ci.Kind, + "name": ci.IdentityName, + "namespace": kc.Namespace, + }, + }, + }, + } + + kc.CreateOrUpdateUnstructuredObject(schema.GroupVersionResource{ + Group: "hmc.mirantis.com", + Version: "v1alpha1", + Resource: "credentials", + }, cred, true) +} + +// createClusterIdentity creates a ClusterIdentity resource. +func (ci *ClusterIdentity) createClusterIdentity(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("creating ClusterIdentity: %s", ci.IdentityName)) + + id := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": ci.GroupVersionResource.Group + "/" + ci.GroupVersionResource.Version, + "kind": ci.Kind, + "metadata": map[string]any{ + "name": ci.IdentityName, + "namespace": kc.Namespace, + }, + "spec": ci.Spec, + }, + } + + kc.CreateOrUpdateUnstructuredObject(ci.GroupVersionResource, id, ci.Namespaced) +} diff --git a/test/e2e/managedcluster/common.go b/test/e2e/managedcluster/common.go new file mode 100644 index 000000000..2e300f2cb --- /dev/null +++ b/test/e2e/managedcluster/common.go @@ -0,0 +1,84 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package managedcluster + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Mirantis/hmc/test/e2e/kubeclient" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// PatchHostedClusterReady patches a hosted clusters' infrastructure resource +// as Ready depending on the given provider. +// See: https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider +// Use Eventually as the resource might not be available immediately following +// a ManagedCluster creation. +func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, clusterName string) { + GinkgoHelper() + + ctx := context.Background() + + var ( + version string + resource string + ) + + switch provider { + case ProviderAWS: + version = "v1beta2" + resource = "awsclusters" + case ProviderAzure: + version = "v1beta1" + resource = "azureclusters" + case ProviderVSphere: + version = "v1beta1" + resource = "vsphereclusters" + default: + Fail(fmt.Sprintf("unsupported provider: %s", provider)) + } + + c := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: version, + Resource: resource, + }, true) + + trueStatus := map[string]any{ + "status": map[string]any{ + "ready": true, + }, + } + + patchBytes, err := json.Marshal(trueStatus) + Expect(err).NotTo(HaveOccurred(), "failed to marshal patch bytes") + + Eventually(func() error { + _, err = c.Patch(ctx, clusterName, types.MergePatchType, + patchBytes, metav1.PatchOptions{}, "status") + if err != nil { + return err + } + _, _ = fmt.Fprintf(GinkgoWriter, "Patch succeeded\n") + return nil + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) +} diff --git a/test/managedcluster/constants.go b/test/e2e/managedcluster/constants.go similarity index 56% rename from test/managedcluster/constants.go rename to test/e2e/managedcluster/constants.go index 5badd6112..4f18a7832 100644 --- a/test/managedcluster/constants.go +++ b/test/e2e/managedcluster/constants.go @@ -16,21 +16,35 @@ package managedcluster const ( // Common - EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" - EnvVarHostedManagedClusterName = "HOSTED_MANAGED_CLUSTER_NAME" - EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" - EnvVarWorkerNumber = "WORKER_NUMBER" - EnvVarNamespace = "NAMESPACE" + EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" + EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" + EnvVarWorkerNumber = "WORKER_NUMBER" + EnvVarNamespace = "NAMESPACE" // EnvVarNoCleanup disables After* cleanup in provider specs to allow for // debugging of test failures. EnvVarNoCleanup = "NO_CLEANUP" // AWS + EnvVarAWSAccessKeyID = "AWS_ACCESS_KEY_ID" + EnvVarAWSSecretAccessKey = "AWS_SECRET_ACCESS_KEY" EnvVarAWSVPCID = "AWS_VPC_ID" EnvVarAWSSubnetID = "AWS_SUBNET_ID" EnvVarAWSSubnetAvailabilityZone = "AWS_SUBNET_AVAILABILITY_ZONE" EnvVarAWSInstanceType = "AWS_INSTANCE_TYPE" EnvVarAWSSecurityGroupID = "AWS_SG_ID" + EnvVarAWSClusterIdentity = "AWS_CLUSTER_IDENTITY" EnvVarPublicIP = "AWS_PUBLIC_IP" - AWSCredentialsSecretName = "aws-variables" + + // VSphere + EnvVarVSphereUser = "VSPHERE_USER" + EnvVarVSpherePassword = "VSPHERE_PASSWORD" + EnvVarVSphereClusterIdentity = "VSPHERE_CLUSTER_IDENTITY" + + // Azure + EnvVarAzureClientSecret = "AZURE_CLIENT_SECRET" + EnvVarAzureClientID = "AZURE_CLIENT_ID" + EnvVarAzureTenantID = "AZURE_TENANT_ID" + EnvVarAzureSubscription = "AZURE_SUBSCRIPTION" + EnvVarAzureClusterIdentity = "AZURE_CLUSTER_IDENTITY" + EnvVarAzureRegion = "AZURE_REGION" ) diff --git a/test/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go similarity index 81% rename from test/managedcluster/managedcluster.go rename to test/e2e/managedcluster/managedcluster.go index 6c1edca02..2b8495719 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -20,14 +20,13 @@ import ( "os" "strings" + "github.com/Mirantis/hmc/test/utils" "github.com/a8m/envsubst" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - "github.com/Mirantis/hmc/internal/utils" ) type ProviderType string @@ -70,30 +69,46 @@ var vsphereStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/vsphere-hosted-cp.yaml.tpl var vsphereHostedCPManagedClusterTemplateBytes []byte +func FilterAllProviders() []string { + return []string{ + utils.HMCControllerLabel, + GetProviderLabel(ProviderAWS), + GetProviderLabel(ProviderAzure), + GetProviderLabel(ProviderCAPI), + GetProviderLabel(ProviderVSphere), + } +} + func GetProviderLabel(provider ProviderType) string { return fmt.Sprintf("%s=%s", providerLabel, provider) } +func setClusterName(templateName Template) { + var generatedName string + + mcName := os.Getenv(EnvVarManagedClusterName) + if mcName == "" { + mcName = "e2e-test-" + uuid.New().String()[:8] + } + + providerName := strings.Split(string(templateName), "-")[0] + + // Append the provider name to the cluster name to ensure uniqueness between + // different deployed ManagedClusters. + generatedName = fmt.Sprintf("%s-%s", mcName, providerName) + if strings.Contains(string(templateName), "hosted") { + generatedName = fmt.Sprintf("%s-%s", mcName, "hosted") + } + + GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) +} + // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. func GetUnstructured(templateName Template) *unstructured.Unstructured { GinkgoHelper() - generatedName := os.Getenv(EnvVarManagedClusterName) - if generatedName == "" { - generatedName = "e2e-test-" + uuid.New().String()[:8] - _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) - GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Using configured cluster name: %q\n", generatedName) - } - - var hostedName string - if strings.Contains(string(templateName), "-hosted") { - hostedName = generatedName + "-hosted" - GinkgoT().Setenv(EnvVarHostedManagedClusterName, hostedName) - _, _ = fmt.Fprintf(GinkgoWriter, "Creating hosted ManagedCluster with name: %q\n", hostedName) - } + setClusterName(templateName) var managedClusterTemplateBytes []byte switch templateName { @@ -119,10 +134,9 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { case TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("unsupported template: %s", templateName)) + Fail(fmt.Sprintf("Unsupported template: %s", templateName)) } - Expect(os.Setenv("NAMESPACE", utils.DefaultSystemNamespace)).NotTo(HaveOccurred()) managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") diff --git a/test/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go similarity index 97% rename from test/managedcluster/providervalidator.go rename to test/e2e/managedcluster/providervalidator.go index 2deae8ff7..a28dd7cf0 100644 --- a/test/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -18,7 +18,7 @@ import ( "context" "fmt" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" . "github.com/onsi/ginkgo/v2" ) @@ -64,7 +64,7 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") - case TemplateAzureStandaloneCP, TemplateVSphereHostedCP: + case TemplateAzureStandaloneCP, TemplateVSphereStandaloneCP: delete(resourcesToValidate, "csi-driver") } } else { diff --git a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl similarity index 72% rename from test/managedcluster/resources/aws-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 64e46a0b1..072ab3abd 100644 --- a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -1,12 +1,13 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${HOSTED_MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-hosted-cp + template: aws-hosted-cp-0-0-1 + credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: - name: aws-cluster-identity + name: ${AWS_CLUSTER_IDENTITY} namespace: ${NAMESPACE} vpcID: ${AWS_VPC_ID} region: ${AWS_REGION} diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl similarity index 74% rename from test/managedcluster/resources/aws-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index f81532fea..f2081b755 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -1,12 +1,13 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-aws + name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-standalone-cp + template: aws-standalone-cp-0-0-1 + credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: - name: aws-cluster-identity + name: ${AWS_CLUSTER_IDENTITY} namespace: ${NAMESPACE} region: ${AWS_REGION} publicIP: ${AWS_PUBLIC_IP:=true} diff --git a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl similarity index 76% rename from test/managedcluster/resources/azure-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl index 6b8f7ad97..c28f703da 100644 --- a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -1,16 +1,17 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-azure + name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-hosted-cp + template: azure-hosted-cp-0-0-1 + credential: ${AZURE_CLUSTER_IDENTITY}-cred config: - location: "westus" + location: "${AZURE_REGION}" subscriptionID: "${AZURE_SUBSCRIPTION_ID}" vmSize: Standard_A4_v2 clusterIdentity: - name: azure-cluster-identity + name: ${AZURE_CLUSTER_IDENTITY} namespace: hmc-system resourceGroup: "${AZURE_RESOURCE_GROUP}" network: diff --git a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl similarity index 66% rename from test/managedcluster/resources/azure-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl index 44d5abf60..5906d4f2b 100644 --- a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -1,21 +1,23 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-azure + name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-standalone-cp + template: azure-standalone-cp-0-0-1 + credential: ${AZURE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: 1 workersNumber: 1 - location: "westus" + location: "${AZURE_REGION}" subscriptionID: "${AZURE_SUBSCRIPTION_ID}" controlPlane: vmSize: Standard_A4_v2 worker: vmSize: Standard_A4_v2 + credential: ${AZURE_CLUSTER_IDENTITY}-cred clusterIdentity: - name: azure-cluster-identity + name: ${AZURE_CLUSTER_IDENTITY} namespace: ${NAMESPACE} tenantID: "${AZURE_TENANT_ID}" clientID: "${AZURE_CLIENT_ID}" diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl similarity index 89% rename from test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index a4c328b77..c524f010b 100644 --- a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -1,9 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-vsphere + name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-hosted-cp + template: vsphere-hosted-cp-0-0-1 + credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} workersNumber: ${WORKERS_NUMBER:=1} diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl similarity index 90% rename from test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 81eb8edf3..1981fe8e8 100644 --- a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -1,9 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-vsphere + name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-standalone-cp + template: vsphere-standalone-cp-0-0-1 + credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} workersNumber: ${WORKERS_NUMBER:=1} diff --git a/test/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go similarity index 98% rename from test/managedcluster/validate_deleted.go rename to test/e2e/managedcluster/validate_deleted.go index 11c801417..7ceeb61ae 100644 --- a/test/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -19,7 +19,7 @@ import ( "errors" "fmt" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/utils" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/test/managedcluster/validate_deployed.go b/test/e2e/managedcluster/validate_deployed.go similarity index 90% rename from test/managedcluster/validate_deployed.go rename to test/e2e/managedcluster/validate_deployed.go index 89c8ca694..b8224b4e2 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/e2e/managedcluster/validate_deployed.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" @@ -62,6 +62,27 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam return err } + if len(machines) == 0 { + // No machines have been created yet, check for MachineDeployments to + // provide some debug information as to why no machines are present. + md, err := kc.ListMachineDeployments(ctx, clusterName) + if err != nil { + return fmt.Errorf("failed to list machine deployments: %w", err) + } + + for _, md := range md { + _, _ = fmt.Fprintf(GinkgoWriter, "No machines found, validating MachineDeployment %s\n", md.GetName()) + + if err := utils.ValidateObjectNamePrefix(&md, clusterName); err != nil { + Fail(err.Error()) + } + + if err := utils.ValidateConditionsTrue(&md); err != nil { + return err + } + } + } + for _, machine := range machines { if err := utils.ValidateObjectNamePrefix(&machine, clusterName); err != nil { Fail(err.Error()) @@ -191,7 +212,7 @@ func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterNa return fmt.Errorf("failed to get test PVC: %w", err) } - if !strings.Contains(*pvc.Spec.StorageClassName, "csi") { + if pvc.Spec.StorageClassName != nil && !strings.Contains(*pvc.Spec.StorageClassName, "csi") { Fail(fmt.Sprintf("%s PersistentVolumeClaim does not have a CSI driver storageClass", pvcName)) } diff --git a/test/e2e/managedcluster/vsphere/vsphere.go b/test/e2e/managedcluster/vsphere/vsphere.go new file mode 100644 index 000000000..0d5db9ca0 --- /dev/null +++ b/test/e2e/managedcluster/vsphere/vsphere.go @@ -0,0 +1,36 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vsphere + +import ( + "github.com/Mirantis/hmc/test/e2e/managedcluster" +) + +func CheckEnv() { + managedcluster.ValidateDeploymentVars([]string{ + "VSPHERE_USER", + "VSPHERE_PASSWORD", + "VSPHERE_SERVER", + "VSPHERE_THUMBPRINT", + "VSPHERE_DATACENTER", + "VSPHERE_DATASTORE", + "VSPHERE_RESOURCEPOOL", + "VSPHERE_FOLDER", + "VSPHERE_CONTROL_PLANE_ENDPOINT", + "VSPHERE_VM_TEMPLATE", + "VSPHERE_NETWORK", + "VSPHERE_SSH_KEY", + }) +} diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go new file mode 100644 index 000000000..552f53eca --- /dev/null +++ b/test/e2e/provider_aws_test.go @@ -0,0 +1,188 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/aws" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/utils" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + clusterName string + ) + + BeforeAll(func() { + By("providing cluster identity") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + ci := clusteridentity.New(kc, managedcluster.ProviderAWS) + Expect(os.Setenv(managedcluster.EnvVarAWSClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterAll(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + if standaloneClient != nil { + By("collecting failure logs from hosted controllers") + collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) + } + } + + By("deleting resources") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + }) + + It("should work with an AWS provider", func() { + // Deploy a standalone cluster and verify it is running/ready. + // Deploy standalone with an xlarge instance since it will also be + // hosting the hosted cluster. + GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") + + templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) + clusterName = sd.GetName() + + standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + + templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") + + // Download the KUBECONFIG for the standalone cluster and load it + // so we can call Make targets against this cluster. + // TODO: Ideally we shouldn't use Make here and should just convert + // these Make targets into Go code, but this will require a + // helmclient. + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) + + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) + + templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, clusterName) + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf( + GinkgoWriter, "[%s] controller validation failed: %v\n", + string(managedcluster.TemplateAWSHostedCP), err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Ensure AWS credentials are set in the standalone cluster. + clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) + + // Populate the environment variables required for the hosted + // cluster. + aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) + + templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") + hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) + hdName := hd.GetName() + + // Deploy the hosted cluster on top of the standalone cluster. + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAWS, hdName) + + // Verify the hosted cluster is running/ready. + templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAWSHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Delete the hosted ManagedCluster and verify it is removed. + templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") + err = hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + // Now delete the standalone ManagedCluster and verify it is + // removed, it is deleted last since it is the basis for the hosted + // cluster. + /* + FIXME(#339): This is currently disabled as the deletion of the + standalone cluster is failing due to outstanding issues. + templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deletionValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * + time.Second).Should(Succeed()) + */ + }) +}) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go new file mode 100644 index 000000000..6681668f3 --- /dev/null +++ b/test/e2e/provider_azure_test.go @@ -0,0 +1,175 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/azure" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/utils" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + hostedKubecfgDeleteFunc func() error + sdName string + ) + + BeforeAll(func() { + By("ensuring Azure credentials are set") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + ci := clusteridentity.New(kc, managedcluster.ProviderAzure) + Expect(os.Setenv(managedcluster.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + By("collecting failure logs from controllers") + if kc != nil { + collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + if standaloneClient != nil { + collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + } + + By("deleting resources") + for _, deleteFunc := range []func() error{ + hostedKubecfgDeleteFunc, + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + }) + + It("should work with an Azure provider", func() { + templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + sdName = sd.GetName() + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + azure.SetAzureEnvironmentVariables(sdName, kc) + + hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hdName := hd.GetName() + + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + + By("Deploy onto standalone cluster") + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) + + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) + + By("Create default storage class for azure-disk CSI driver") + azure.CreateDefaultStorageClass(standaloneClient) + + templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAzureHostedCP, "Patching AzureCluster to ready") + managedcluster.PatchHostedClusterReady(standaloneClient, managedcluster.ProviderAzure, hdName) + + templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err = hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) +}) diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go new file mode 100644 index 000000000..613ab1cfe --- /dev/null +++ b/test/e2e/provider_vsphere_test.go @@ -0,0 +1,97 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "os" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + deleteFunc func() error + clusterName string + err error + ) + + BeforeAll(func() { + By("ensuring that env vars are set correctly") + vsphere.CheckEnv() + By("creating kube client") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + By("providing cluster identity") + ci := clusteridentity.New(kc, managedcluster.ProviderVSphere) + By("setting VSPHERE_CLUSTER_IDENTITY env variable") + Expect(os.Setenv(managedcluster.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() { + By("collecting failure logs from controllers") + collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) + } + + // Run the deletion as part of the cleanup and validate it here. + // VSphere doesn't have any form of cleanup outside of reconciling a + // cluster deletion so we need to keep the test active while we wait + // for CAPV to clean up the resources. + // TODO: Add an exterior cleanup mechanism for VSphere like + // 'dev-aws-nuke' to clean up resources in the event that the test + // fails to do so. + if deleteFunc != nil && !noCleanup() { + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + } + }) + + It("should deploy standalone managed cluster", func() { + By("creating a managed cluster") + d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + clusterName = d.GetName() + + deleteFunc = kc.CreateManagedCluster(context.Background(), d) + + By("waiting for infrastructure providers to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) +}) diff --git a/test/managedcluster/aws/aws.go b/test/managedcluster/aws/aws.go deleted file mode 100644 index 0bdccafa3..000000000 --- a/test/managedcluster/aws/aws.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package aws contains specific helpers for testing a managed cluster -// that uses the AWS infrastructure provider. -package aws - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "io" - "os" - - "github.com/a8m/envsubst" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" - "k8s.io/apimachinery/pkg/types" - yamlutil "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/restmapper" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" -) - -func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { - GinkgoHelper() - serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) - yamlFile, err := os.ReadFile("config/dev/aws-credentials.yaml") - Expect(err).NotTo(HaveOccurred()) - - yamlFile, err = envsubst.Bytes(yamlFile) - Expect(err).NotTo(HaveOccurred()) - - c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) - groupResources, err := restmapper.GetAPIGroupResources(c) - Expect(err).NotTo(HaveOccurred()) - - yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) - for { - yamlDoc, err := yamlReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") - } - - credentialResource := &unstructured.Unstructured{} - _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) - Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") - - mapper := restmapper.NewDiscoveryRESTMapper(groupResources) - mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) - Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") - - dc := kc.GetDynamicClient(schema.GroupVersionResource{ - Group: credentialResource.GroupVersionKind().Group, - Version: credentialResource.GroupVersionKind().Version, - Resource: mapping.Resource.Resource, - }) - - exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") - } - - if exists == nil { - if _, err := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { - Expect(err).NotTo(HaveOccurred(), "failed to create azure credential secret") - } - } - } -} - -// PopulateHostedTemplateVars populates the environment variables required for -// the AWS hosted CP template by querying the standalone CP cluster with the -// given kubeclient. -func PopulateHostedTemplateVars(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) { - GinkgoHelper() - - c := getAWSClusterClient(kc) - awsCluster, err := c.Get(ctx, clusterName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster") - - vpcID, found, err := unstructured.NestedString(awsCluster.Object, "spec", "network", "vpc", "id") - Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster VPC ID") - Expect(found).To(BeTrue(), "AWS cluster has no VPC ID") - - subnets, found, err := unstructured.NestedSlice(awsCluster.Object, "spec", "network", "subnets") - Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster subnets") - Expect(found).To(BeTrue(), "AWS cluster has no subnets") - - subnet, ok := subnets[0].(map[string]any) - Expect(ok).To(BeTrue(), "failed to cast subnet to map") - - subnetID, ok := subnet["resourceID"].(string) - Expect(ok).To(BeTrue(), "failed to cast subnet ID to string") - - subnetAZ, ok := subnet["availabilityZone"].(string) - Expect(ok).To(BeTrue(), "failed to cast subnet availability zone to string") - - securityGroupID, found, err := unstructured.NestedString( - awsCluster.Object, "status", "networkStatus", "securityGroups", "node", "id") - Expect(err).NotTo(HaveOccurred(), "failed to get AWS cluster security group ID") - Expect(found).To(BeTrue(), "AWS cluster has no security group ID") - - GinkgoT().Setenv(managedcluster.EnvVarAWSVPCID, vpcID) - GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetID, subnetID) - GinkgoT().Setenv(managedcluster.EnvVarAWSSubnetAvailabilityZone, subnetAZ) - GinkgoT().Setenv(managedcluster.EnvVarAWSSecurityGroupID, securityGroupID) -} - -func PatchAWSClusterReady(ctx context.Context, kc *kubeclient.KubeClient, clusterName string) error { - GinkgoHelper() - - c := getAWSClusterClient(kc) - - trueStatus := map[string]any{ - "status": map[string]any{ - "ready": true, - }, - } - - patchBytes, err := json.Marshal(trueStatus) - Expect(err).NotTo(HaveOccurred(), "failed to marshal patch bytes") - - _, err = c.Patch(ctx, clusterName, types.MergePatchType, - patchBytes, metav1.PatchOptions{}, "status") - if err != nil { - return err - } - - return nil -} - -func getAWSClusterClient(kc *kubeclient.KubeClient) dynamic.ResourceInterface { - return kc.GetDynamicClient(schema.GroupVersionResource{ - Group: "infrastructure.cluster.x-k8s.io", - Version: "v1beta2", - Resource: "awsclusters", - }) -} diff --git a/test/managedcluster/vsphere/vsphere.go b/test/managedcluster/vsphere/vsphere.go deleted file mode 100644 index 620a42cfb..000000000 --- a/test/managedcluster/vsphere/vsphere.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vsphere - -import ( - "context" - "fmt" - "os" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" -) - -func CreateSecret(kc *kubeclient.KubeClient, secretName string) error { - ctx := context.Background() - _, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - - if !apierrors.IsNotFound(err) { - return nil - } - username := os.Getenv("VSPHERE_USER") - password := os.Getenv("VSPHERE_PASSWORD") - - _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - StringData: map[string]string{ - "username": username, - "password": password, - }, - Type: corev1.SecretTypeOpaque, - }, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create vSphere credentials secret: %w", err) - } - - return nil -} - -func CreateClusterIdentity(kc *kubeclient.KubeClient, secretName string, identityName string) error { - ctx := context.Background() - client, err := dynamic.NewForConfig(kc.Config) - if err != nil { - return fmt.Errorf("failed to create dynamic client: %w", err) - } - - gvr := schema.GroupVersionResource{ - Group: "infrastructure.cluster.x-k8s.io", - Version: "v1beta1", - Resource: "vsphereclusteridentities", - } - - clusterIdentity := &unstructured.Unstructured{ - Object: map[string]any{ - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "kind": "VSphereClusterIdentity", - "metadata": map[string]any{ - "name": identityName, - }, - "spec": map[string]any{ - "secretName": secretName, - "allowedNamespaces": map[string]any{ - "selector": map[string]any{ - "matchLabels": map[string]any{}, - }, - }, - }, - }, - } - - result, err := client.Resource(gvr).Create(ctx, clusterIdentity, metav1.CreateOptions{}) - if err != nil { - fmt.Printf("%+v", result) - return fmt.Errorf("failed to create vsphereclusteridentity: %w", err) - } - - return nil -} - -func CheckEnv() { - managedcluster.ValidateDeploymentVars([]string{ - "VSPHERE_USER", - "VSPHERE_PASSWORD", - "VSPHERE_SERVER", - "VSPHERE_THUMBPRINT", - "VSPHERE_DATACENTER", - "VSPHERE_DATASTORE", - "VSPHERE_RESOURCEPOOL", - "VSPHERE_FOLDER", - "VSPHERE_CONTROL_PLANE_ENDPOINT", - "VSPHERE_VM_TEMPLATE", - "VSPHERE_NETWORK", - "VSPHERE_SSH_KEY", - }) -} diff --git a/test/utils/utils.go b/test/utils/utils.go index 6c45e714e..8c64a729a 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -27,6 +27,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + HMCControllerLabel = "app.kubernetes.io/name=hmc" +) + // Run executes the provided command within this context and returns it's // output. Run does not wait for the command to finish, use Wait instead. func Run(cmd *exec.Cmd) ([]byte, error) { From ba1d43a31304f1cd4bab0ee061fa786237a9a731 Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Thu, 10 Oct 2024 12:28:09 -0700 Subject: [PATCH 21/29] Update workflow to use pull_request_target Signed-off-by: Kyle Squizzato --- .github/workflows/build_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index e426a683c..c08236bb8 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -1,6 +1,6 @@ name: CI on: - pull_request: + pull_request_target: types: - labeled - opened From c34be1b29581b4d78a379bfc0e35314fdecf04ef Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Thu, 10 Oct 2024 17:04:39 -0700 Subject: [PATCH 22/29] Update TODO comments, validate both Azure controllers Signed-off-by: Kyle Squizzato --- .github/workflows/build_test.yml | 4 ++++ .../templates/k0scontrolplane.yaml | 2 -- test/e2e/e2e_suite_test.go | 12 +++++++++--- test/e2e/managedcluster/common.go | 3 +-- test/e2e/managedcluster/validate_deleted.go | 2 +- test/e2e/provider_aws_test.go | 4 ++-- test/e2e/provider_vsphere_test.go | 2 +- 7 files changed, 18 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index c08236bb8..7e24c3767 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -12,6 +12,10 @@ on: paths-ignore: - 'config/**' - '**.md' + push: + tags: + - '*' + env: GO_VERSION: '1.22' REGISTRY_REPO: 'oci://ghcr.io/mirantis/hmc/charts-ci' diff --git a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml index 708ce8094..038fb1a93 100644 --- a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml +++ b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml @@ -114,10 +114,8 @@ spec: images: driver: tag: v3.1.2 - repo: "registry.k8s.io/csi-vsphere/driver" syncer: tag: v3.1.2 - repo: "registry.k8s.io/csi-vsphere/syncer" machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index eef3a1445..8f0c1ee3f 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -105,16 +105,22 @@ func verifyControllersUp(kc *kubeclient.KubeClient) error { } func validateController(kc *kubeclient.KubeClient, labelSelector string, name string) error { + controllerItems := 1 + if strings.Contains(labelSelector, managedcluster.GetProviderLabel(managedcluster.ProviderAzure)) { + // Azure provider has two controllers. + controllerItems = 2 + } + deployList, err := kc.Client.AppsV1().Deployments(kc.Namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: labelSelector, - Limit: 1, + Limit: int64(controllerItems), }) if err != nil { return fmt.Errorf("failed to list %s controller deployments: %w", name, err) } - if len(deployList.Items) < 1 { - return fmt.Errorf("expected at least 1 %s controller deployment", name) + if len(deployList.Items) < controllerItems { + return fmt.Errorf("expected at least %d %s controller deployments, got %d", controllerItems, name, len(deployList.Items)) } deployment := deployList.Items[0] diff --git a/test/e2e/managedcluster/common.go b/test/e2e/managedcluster/common.go index 2e300f2cb..f237f855f 100644 --- a/test/e2e/managedcluster/common.go +++ b/test/e2e/managedcluster/common.go @@ -51,8 +51,7 @@ func PatchHostedClusterReady(kc *kubeclient.KubeClient, provider ProviderType, c version = "v1beta1" resource = "azureclusters" case ProviderVSphere: - version = "v1beta1" - resource = "vsphereclusters" + return default: Fail(fmt.Sprintf("unsupported provider: %s", provider)) } diff --git a/test/e2e/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go index 7ceeb61ae..dce3211f6 100644 --- a/test/e2e/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -36,7 +36,7 @@ func validateClusterDeleted(ctx context.Context, kc *kubeclient.KubeClient, clus if cluster != nil { phase, _, _ := unstructured.NestedString(cluster.Object, "status", "phase") if phase != "Deleting" { - // TODO: We should have a threshold error system for situations + // TODO(#474): We should have a threshold error system for situations // like this, we probably don't want to wait the full Eventually // for something like this, but we can't immediately fail the test // either. diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go index 552f53eca..048eaed9d 100644 --- a/test/e2e/provider_aws_test.go +++ b/test/e2e/provider_aws_test.go @@ -98,8 +98,8 @@ var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Order // Download the KUBECONFIG for the standalone cluster and load it // so we can call Make targets against this cluster. - // TODO: Ideally we shouldn't use Make here and should just convert - // these Make targets into Go code, but this will require a + // TODO(#472): Ideally we shouldn't use Make here and should just + // convert these Make targets into Go code, but this will require a // helmclient. var kubeCfgPath string kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go index 613ab1cfe..a6079665d 100644 --- a/test/e2e/provider_vsphere_test.go +++ b/test/e2e/provider_vsphere_test.go @@ -59,7 +59,7 @@ var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere" // VSphere doesn't have any form of cleanup outside of reconciling a // cluster deletion so we need to keep the test active while we wait // for CAPV to clean up the resources. - // TODO: Add an exterior cleanup mechanism for VSphere like + // TODO(#473) Add an exterior cleanup mechanism for VSphere like // 'dev-aws-nuke' to clean up resources in the event that the test // fails to do so. if deleteFunc != nil && !noCleanup() { From e2ffd2be698e778ea41ac788024e9e5ba486e92f Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Fri, 11 Oct 2024 10:18:56 -0700 Subject: [PATCH 23/29] Iterate over deployList objects Signed-off-by: Kyle Squizzato --- test/e2e/e2e_suite_test.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 8f0c1ee3f..3c61a50bf 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -123,19 +123,19 @@ func validateController(kc *kubeclient.KubeClient, labelSelector string, name st return fmt.Errorf("expected at least %d %s controller deployments, got %d", controllerItems, name, len(deployList.Items)) } - deployment := deployList.Items[0] - - // Ensure the deployment is not being deleted. - if deployment.DeletionTimestamp != nil { - return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", - deployment.Name, deployment.DeletionTimestamp) - } - // Ensure the deployment is running and has the expected name. - if !strings.Contains(deployment.Name, "controller-manager") { - return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) - } - if deployment.Status.ReadyReplicas < 1 { - return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) + for _, deployment := range deployList.Items { + // Ensure the deployment is not being deleted. + if deployment.DeletionTimestamp != nil { + return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", + deployment.Name, deployment.DeletionTimestamp) + } + // Ensure the deployment is running and has the expected name. + if !strings.Contains(deployment.Name, "controller-manager") { + return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) + } + if deployment.Status.ReadyReplicas < 1 { + return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) + } } return nil From 76f1b294b7c46cb6e02f1c3962272ae8b1657334 Mon Sep 17 00:00:00 2001 From: Kyle Squizzato Date: Thu, 17 Oct 2024 15:32:43 -0700 Subject: [PATCH 24/29] Strip leading v from VERSION in Get outputs job Signed-off-by: Kyle Squizzato --- .github/workflows/build_test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml index 7e24c3767..c524221d8 100644 --- a/.github/workflows/build_test.yml +++ b/.github/workflows/build_test.yml @@ -60,7 +60,8 @@ jobs: - name: Get outputs id: vars run: | - echo "version=$(git describe --tags --always)" >> $GITHUB_OUTPUT + GIT_VERSION=$(git describe --tags --always) + echo "version=${GIT_VERSION:1}" >> $GITHUB_OUTPUT echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT - name: Build and push HMC controller image uses: docker/build-push-action@v6 From 7f73bfb4a4dc254a9f318fd466fc913ca4f031c6 Mon Sep 17 00:00:00 2001 From: Randy Bias Date: Thu, 17 Oct 2024 10:25:47 +0800 Subject: [PATCH 25/29] Update README.md Cleaned up the introduction in the README. Made it more clear the reference to 0x2A as the internal codename for HMC. Signed-off-by: Randy Bias --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 329d2f01d..0dcbba244 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,17 @@ -# Mirantis Hybrid Multi Cluster +# Mirantis Hybrid Multi Cluster (HMC), Codename: Project 0x2A ## Overview -Mirantis Hybrid Multi Cluster is part of Mirantis Project 2A which is focused on delivering a +Mirantis Hybrid Multi Cluster is part of Mirantis Project 0x2A which is focused on delivering a open source approach to providing an enterprise grade multi-cluster kubernetes managment solution -based entirely on standard open source tooling. +based entirely on standard open source tooling that works across private or public clouds. + +We like to say that Project 0x2A (42) is the answer to life, the universe, and everything ... +Or, at least, the Kubernetes sprawl we find ourselves faced with in real life! ## Documentation -Detailed documentation is available in [Project 2A Docs](https://mirantis.github.io/project-2a-docs/) +Detailed documentation is available in [Project 0x2A Docs](https://mirantis.github.io/project-2a-docs/) ## Installation From c36f5c9354f5237bd5b922d6e2e5e0b87e5f5732 Mon Sep 17 00:00:00 2001 From: Slava Lysunkin Date: Tue, 15 Oct 2024 14:23:26 -0500 Subject: [PATCH 26/29] Fix an issue with EKS cluster removal --- .../controller/managedcluster_controller.go | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index e29981f24..a55303346 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -62,10 +62,6 @@ type ManagedClusterReconciler struct { DynamicClient *dynamic.DynamicClient } -type providerSchema struct { - machine, cluster schema.GroupVersionKind -} - var ( gvkAWSCluster = schema.GroupVersionKind{ Group: "infrastructure.cluster.x-k8s.io", @@ -608,24 +604,31 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace return err } - providerGVKs := map[string]providerSchema{ - "aws": {machine: gvkMachine, cluster: gvkAWSCluster}, - "azure": {machine: gvkMachine, cluster: gvkAzureCluster}, + for _, provider := range providers.BootstrapProviders { + if provider.Name == "eks" { + // no need to do anything for EKS clusters + return nil + } + } + + providerGVKs := map[string]schema.GroupVersionKind{ + "aws": gvkAWSCluster, + "azure": gvkAzureCluster, } // Associate the provider with it's GVK - for _, provider := range providers { + for _, provider := range providers.InfrastructureProviders { gvk, ok := providerGVKs[provider.Name] if !ok { continue } - cluster, err := r.getCluster(ctx, namespace, name, gvk.cluster) + cluster, err := r.getCluster(ctx, namespace, name, gvk) if err != nil { return err } - found, err := r.machinesAvailable(ctx, namespace, cluster.Name, gvk.machine) + found, err := r.objectsAvailable(ctx, namespace, cluster.Name, gvkMachine) if err != nil { return err } @@ -638,15 +641,15 @@ func (r *ManagedClusterReconciler) releaseCluster(ctx context.Context, namespace return nil } -func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNamespace, templateName string) ([]hmc.ProviderTuple, error) { +func (r *ManagedClusterReconciler) getProviders(ctx context.Context, templateNamespace, templateName string) (hmc.ProvidersTupled, error) { template := &hmc.ClusterTemplate{} templateRef := client.ObjectKey{Name: templateName, Namespace: templateNamespace} if err := r.Get(ctx, templateRef, template); err != nil { ctrl.LoggerFrom(ctx).Error(err, "Failed to get ClusterTemplate", "template namespace", templateNamespace, "template name", templateName) - return nil, err + return hmc.ProvidersTupled{}, err } - return template.Status.Providers.InfrastructureProviders, nil + return template.Status.Providers, nil } func (r *ManagedClusterReconciler) getCluster(ctx context.Context, namespace, name string, gvk schema.GroupVersionKind) (*metav1.PartialObjectMetadata, error) { @@ -679,7 +682,7 @@ func (r *ManagedClusterReconciler) removeClusterFinalizer(ctx context.Context, c return nil } -func (r *ManagedClusterReconciler) machinesAvailable(ctx context.Context, namespace, clusterName string, gvk schema.GroupVersionKind) (bool, error) { +func (r *ManagedClusterReconciler) objectsAvailable(ctx context.Context, namespace, clusterName string, gvk schema.GroupVersionKind) (bool, error) { opts := &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.ClusterNameLabelKey: clusterName}), Namespace: namespace, From c47254db440188cbe823654fd48aa59be9ab69d0 Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Fri, 18 Oct 2024 13:10:19 +0700 Subject: [PATCH 27/29] Update template versions for 0.0.3 Signed-off-by: Andrei Pavlov --- .../provider/cluster-api-provider-aws/Chart.yaml | 2 +- .../cluster-api-provider-azure/Chart.yaml | 2 +- .../cluster-api-provider-vsphere/Chart.yaml | 3 +-- templates/provider/cluster-api/Chart.yaml | 2 +- templates/provider/hmc-templates/Chart.yaml | 7 +------ .../provider/hmc-templates/files/release.yaml | 16 ++++++++-------- .../templates/cluster-api-provider-aws.yaml | 4 ++-- .../templates/cluster-api-provider-azure.yaml | 4 ++-- .../templates/cluster-api-provider-vsphere.yaml | 4 ++-- .../files/templates/cluster-api.yaml | 4 ++-- .../hmc-templates/files/templates/hmc.yaml | 4 ++-- .../hmc-templates/files/templates/k0smotron.yaml | 4 ++-- templates/provider/hmc/Chart.yaml | 7 +------ templates/provider/k0smotron/Chart.yaml | 2 +- 14 files changed, 27 insertions(+), 38 deletions(-) diff --git a/templates/provider/cluster-api-provider-aws/Chart.yaml b/templates/provider/cluster-api-provider-aws/Chart.yaml index 1645247aa..b3b3ab54f 100644 --- a/templates/provider/cluster-api-provider-aws/Chart.yaml +++ b/templates/provider/cluster-api-provider-aws/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/templates/provider/cluster-api-provider-azure/Chart.yaml b/templates/provider/cluster-api-provider-azure/Chart.yaml index 1836ecc63..172b7eee3 100644 --- a/templates/provider/cluster-api-provider-azure/Chart.yaml +++ b/templates/provider/cluster-api-provider-azure/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/templates/provider/cluster-api-provider-vsphere/Chart.yaml b/templates/provider/cluster-api-provider-vsphere/Chart.yaml index ec5260231..4a4db32ab 100644 --- a/templates/provider/cluster-api-provider-vsphere/Chart.yaml +++ b/templates/provider/cluster-api-provider-vsphere/Chart.yaml @@ -13,12 +13,11 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. appVersion: "1.11.1" annotations: - hmc.mirantis.com/type: provider hmc.mirantis.com/infrastructure-providers: vsphere diff --git a/templates/provider/cluster-api/Chart.yaml b/templates/provider/cluster-api/Chart.yaml index c7083c864..e2fb4937d 100644 --- a/templates/provider/cluster-api/Chart.yaml +++ b/templates/provider/cluster-api/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. diff --git a/templates/provider/hmc-templates/Chart.yaml b/templates/provider/hmc-templates/Chart.yaml index e1c64c662..b98df9e22 100644 --- a/templates/provider/hmc-templates/Chart.yaml +++ b/templates/provider/hmc-templates/Chart.yaml @@ -13,9 +13,4 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" +version: 0.0.3 diff --git a/templates/provider/hmc-templates/files/release.yaml b/templates/provider/hmc-templates/files/release.yaml index e6de2946c..10a1739da 100644 --- a/templates/provider/hmc-templates/files/release.yaml +++ b/templates/provider/hmc-templates/files/release.yaml @@ -1,23 +1,23 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: Release metadata: - name: hmc-0-0-2 + name: hmc-0-0-3 annotations: helm.sh/resource-policy: keep spec: - version: 0.0.2 + version: 0.0.3 hmc: - template: hmc-0-0-1 + template: hmc-0-0-3 capi: - template: cluster-api-0-0-1 + template: cluster-api-0-0-2 providers: - name: k0smotron - template: k0smotron-0-0-1 + template: k0smotron-0-0-2 - name: cluster-api-provider-azure - template: cluster-api-provider-azure-0-0-1 + template: cluster-api-provider-azure-0-0-2 - name: cluster-api-provider-vsphere - template: cluster-api-provider-vsphere-0-0-1 + template: cluster-api-provider-vsphere-0-0-2 - name: cluster-api-provider-aws - template: cluster-api-provider-aws-0-0-1 + template: cluster-api-provider-aws-0-0-2 - name: projectsveltos template: projectsveltos-0-40-0 diff --git a/templates/provider/hmc-templates/files/templates/cluster-api-provider-aws.yaml b/templates/provider/hmc-templates/files/templates/cluster-api-provider-aws.yaml index 5a5a61baf..57aa9e415 100644 --- a/templates/provider/hmc-templates/files/templates/cluster-api-provider-aws.yaml +++ b/templates/provider/hmc-templates/files/templates/cluster-api-provider-aws.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: cluster-api-provider-aws-0-0-1 + name: cluster-api-provider-aws-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: cluster-api-provider-aws - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/cluster-api-provider-azure.yaml b/templates/provider/hmc-templates/files/templates/cluster-api-provider-azure.yaml index d4b8b8a4d..797f36b8b 100644 --- a/templates/provider/hmc-templates/files/templates/cluster-api-provider-azure.yaml +++ b/templates/provider/hmc-templates/files/templates/cluster-api-provider-azure.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: cluster-api-provider-azure-0-0-1 + name: cluster-api-provider-azure-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: cluster-api-provider-azure - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/cluster-api-provider-vsphere.yaml b/templates/provider/hmc-templates/files/templates/cluster-api-provider-vsphere.yaml index 6c2b51337..981fd4799 100644 --- a/templates/provider/hmc-templates/files/templates/cluster-api-provider-vsphere.yaml +++ b/templates/provider/hmc-templates/files/templates/cluster-api-provider-vsphere.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: cluster-api-provider-vsphere-0-0-1 + name: cluster-api-provider-vsphere-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: cluster-api-provider-vsphere - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/cluster-api.yaml b/templates/provider/hmc-templates/files/templates/cluster-api.yaml index 8066887c1..cd9d10923 100644 --- a/templates/provider/hmc-templates/files/templates/cluster-api.yaml +++ b/templates/provider/hmc-templates/files/templates/cluster-api.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: cluster-api-0-0-1 + name: cluster-api-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: cluster-api - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/hmc.yaml b/templates/provider/hmc-templates/files/templates/hmc.yaml index 0c2d923f2..5344bbf58 100644 --- a/templates/provider/hmc-templates/files/templates/hmc.yaml +++ b/templates/provider/hmc-templates/files/templates/hmc.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: hmc-0-0-1 + name: hmc-0-0-3 annotations: helm.sh/resource-policy: keep spec: helm: chartName: hmc - chartVersion: 0.0.1 + chartVersion: 0.0.3 diff --git a/templates/provider/hmc-templates/files/templates/k0smotron.yaml b/templates/provider/hmc-templates/files/templates/k0smotron.yaml index ee4a7d170..440c579a9 100644 --- a/templates/provider/hmc-templates/files/templates/k0smotron.yaml +++ b/templates/provider/hmc-templates/files/templates/k0smotron.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ProviderTemplate metadata: - name: k0smotron-0-0-1 + name: k0smotron-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: k0smotron - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc/Chart.yaml b/templates/provider/hmc/Chart.yaml index 741a762a8..f84afe240 100644 --- a/templates/provider/hmc/Chart.yaml +++ b/templates/provider/hmc/Chart.yaml @@ -13,12 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" +version: 0.0.3 dependencies: - name: flux2 diff --git a/templates/provider/k0smotron/Chart.yaml b/templates/provider/k0smotron/Chart.yaml index c453ca418..47ce14d17 100644 --- a/templates/provider/k0smotron/Chart.yaml +++ b/templates/provider/k0smotron/Chart.yaml @@ -13,7 +13,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. From a4c12b1d8463b63bdb27b2a3540f9e4a9a9cb728 Mon Sep 17 00:00:00 2001 From: Andrei Pavlov Date: Fri, 18 Oct 2024 15:31:29 +0700 Subject: [PATCH 28/29] Update k0s version to 1.31.1+k0s.1 Signed-off-by: Andrei Pavlov --- config/dev/aws-managedcluster.yaml | 2 +- config/dev/azure-managedcluster.yaml | 2 +- config/dev/eks-managedcluster.yaml | 2 +- config/dev/vsphere-managedcluster.yaml | 2 +- templates/cluster/aws-hosted-cp/Chart.yaml | 4 ++-- templates/cluster/aws-hosted-cp/values.yaml | 2 +- templates/cluster/aws-standalone-cp/Chart.yaml | 4 ++-- templates/cluster/aws-standalone-cp/values.yaml | 2 +- templates/cluster/azure-hosted-cp/Chart.yaml | 4 ++-- templates/cluster/azure-hosted-cp/values.yaml | 2 +- templates/cluster/azure-standalone-cp/Chart.yaml | 4 ++-- templates/cluster/azure-standalone-cp/values.yaml | 2 +- templates/cluster/vsphere-hosted-cp/Chart.yaml | 4 ++-- templates/cluster/vsphere-hosted-cp/values.yaml | 6 +----- templates/cluster/vsphere-standalone-cp/Chart.yaml | 4 ++-- templates/cluster/vsphere-standalone-cp/values.yaml | 2 +- .../{aws-hosted-cp-0-0-1.yaml => aws-hosted-cp-0-0-2.yaml} | 4 ++-- ...tandalone-cp-0-0-1.yaml => aws-standalone-cp-0-0-2.yaml} | 4 ++-- ...zure-hosted-cp-0-0-1.yaml => azure-hosted-cp-0-0-2.yaml} | 4 ++-- ...ndalone-cp-0-0-1.yaml => azure-standalone-cp-0-0-2.yaml} | 4 ++-- ...re-hosted-cp-0-0-1.yaml => vsphere-hosted-cp-0-0-2.yaml} | 4 ++-- ...alone-cp-0-0-1.yaml => vsphere-standalone-cp-0-0-2.yaml} | 4 ++-- test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl | 2 +- .../e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl | 4 +--- test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl | 2 +- .../managedcluster/resources/azure-standalone-cp.yaml.tpl | 2 +- .../e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl | 2 +- .../managedcluster/resources/vsphere-standalone-cp.yaml.tpl | 2 +- 28 files changed, 40 insertions(+), 46 deletions(-) rename templates/provider/hmc-templates/files/templates/{aws-hosted-cp-0-0-1.yaml => aws-hosted-cp-0-0-2.yaml} (75%) rename templates/provider/hmc-templates/files/templates/{aws-standalone-cp-0-0-1.yaml => aws-standalone-cp-0-0-2.yaml} (74%) rename templates/provider/hmc-templates/files/templates/{azure-hosted-cp-0-0-1.yaml => azure-hosted-cp-0-0-2.yaml} (75%) rename templates/provider/hmc-templates/files/templates/{azure-standalone-cp-0-0-1.yaml => azure-standalone-cp-0-0-2.yaml} (74%) rename templates/provider/hmc-templates/files/templates/{vsphere-hosted-cp-0-0-1.yaml => vsphere-hosted-cp-0-0-2.yaml} (74%) rename templates/provider/hmc-templates/files/templates/{vsphere-standalone-cp-0-0-1.yaml => vsphere-standalone-cp-0-0-2.yaml} (73%) diff --git a/config/dev/aws-managedcluster.yaml b/config/dev/aws-managedcluster.yaml index dd303141c..6400d55d3 100644 --- a/config/dev/aws-managedcluster.yaml +++ b/config/dev/aws-managedcluster.yaml @@ -18,7 +18,7 @@ spec: instanceType: t3.small workersNumber: 1 installBeachHeadServices: false - template: aws-standalone-cp-0-0-1 + template: aws-standalone-cp-0-0-2 services: - template: kyverno-3-2-6 name: kyverno diff --git a/config/dev/azure-managedcluster.yaml b/config/dev/azure-managedcluster.yaml index 4dfab5e69..d0e563797 100644 --- a/config/dev/azure-managedcluster.yaml +++ b/config/dev/azure-managedcluster.yaml @@ -4,7 +4,7 @@ metadata: name: azure-dev namespace: ${NAMESPACE} spec: - template: azure-standalone-cp-0-0-1 + template: azure-standalone-cp-0-0-2 credential: azure-cluster-identity-cred config: controlPlaneNumber: 1 diff --git a/config/dev/eks-managedcluster.yaml b/config/dev/eks-managedcluster.yaml index a6731163a..b4d8f995e 100644 --- a/config/dev/eks-managedcluster.yaml +++ b/config/dev/eks-managedcluster.yaml @@ -7,5 +7,5 @@ spec: config: region: us-east-2 workersNumber: 1 - template: aws-eks-0-0-1 + template: aws-eks-0-0-2 credential: "aws-cluster-identity-cred" diff --git a/config/dev/vsphere-managedcluster.yaml b/config/dev/vsphere-managedcluster.yaml index c9daaf311..2fbd81f37 100644 --- a/config/dev/vsphere-managedcluster.yaml +++ b/config/dev/vsphere-managedcluster.yaml @@ -4,7 +4,7 @@ metadata: name: vsphere-dev namespace: ${NAMESPACE} spec: - template: vsphere-standalone-cp-0-0-1 + template: vsphere-standalone-cp-0-0-2 credential: vsphere-cluster-identity-cred config: controlPlaneNumber: 1 diff --git a/templates/cluster/aws-hosted-cp/Chart.yaml b/templates/cluster/aws-hosted-cp/Chart.yaml index f6cc01a41..64f6d8f68 100644 --- a/templates/cluster/aws-hosted-cp/Chart.yaml +++ b/templates/cluster/aws-hosted-cp/Chart.yaml @@ -7,12 +7,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "v1.31.1+k0s.1" annotations: hmc.mirantis.com/infrastructure-providers: aws hmc.mirantis.com/control-plane-providers: k0smotron diff --git a/templates/cluster/aws-hosted-cp/values.yaml b/templates/cluster/aws-hosted-cp/values.yaml index 4949d631b..c19e244b7 100644 --- a/templates/cluster/aws-hosted-cp/values.yaml +++ b/templates/cluster/aws-hosted-cp/values.yaml @@ -46,4 +46,4 @@ k0smotron: # K0s parameters k0s: - version: v1.30.4+k0s.0 + version: v1.31.1+k0s.1 diff --git a/templates/cluster/aws-standalone-cp/Chart.yaml b/templates/cluster/aws-standalone-cp/Chart.yaml index 0ec9d2967..01f0fcada 100644 --- a/templates/cluster/aws-standalone-cp/Chart.yaml +++ b/templates/cluster/aws-standalone-cp/Chart.yaml @@ -6,12 +6,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "v1.31.1+k0s.1" annotations: hmc.mirantis.com/infrastructure-providers: aws hmc.mirantis.com/control-plane-providers: k0s diff --git a/templates/cluster/aws-standalone-cp/values.yaml b/templates/cluster/aws-standalone-cp/values.yaml index fd0d52ae0..13499710a 100644 --- a/templates/cluster/aws-standalone-cp/values.yaml +++ b/templates/cluster/aws-standalone-cp/values.yaml @@ -46,4 +46,4 @@ worker: # K0s parameters k0s: - version: v1.30.4+k0s.0 \ No newline at end of file + version: v1.31.1+k0s.1 diff --git a/templates/cluster/azure-hosted-cp/Chart.yaml b/templates/cluster/azure-hosted-cp/Chart.yaml index e12c1b4bb..1a614b992 100644 --- a/templates/cluster/azure-hosted-cp/Chart.yaml +++ b/templates/cluster/azure-hosted-cp/Chart.yaml @@ -7,12 +7,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "v1.31.1+k0s.1" annotations: hmc.mirantis.com/infrastructure-providers: azure hmc.mirantis.com/control-plane-providers: k0s diff --git a/templates/cluster/azure-hosted-cp/values.yaml b/templates/cluster/azure-hosted-cp/values.yaml index 3440db477..79aa4929a 100644 --- a/templates/cluster/azure-hosted-cp/values.yaml +++ b/templates/cluster/azure-hosted-cp/values.yaml @@ -47,4 +47,4 @@ k0smotron: # K0s parameters k0s: - version: v1.30.4+k0s.0 + version: v1.31.1+k0s.1 diff --git a/templates/cluster/azure-standalone-cp/Chart.yaml b/templates/cluster/azure-standalone-cp/Chart.yaml index aaa1cc10e..16c716767 100644 --- a/templates/cluster/azure-standalone-cp/Chart.yaml +++ b/templates/cluster/azure-standalone-cp/Chart.yaml @@ -6,12 +6,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "1.31.1+k0s.1" annotations: hmc.mirantis.com/infrastructure-providers: azure hmc.mirantis.com/control-plane-providers: k0s diff --git a/templates/cluster/azure-standalone-cp/values.yaml b/templates/cluster/azure-standalone-cp/values.yaml index 9b9bb0520..801889805 100644 --- a/templates/cluster/azure-standalone-cp/values.yaml +++ b/templates/cluster/azure-standalone-cp/values.yaml @@ -45,4 +45,4 @@ worker: # K0s parameters k0s: - version: v1.30.4+k0s.0 + version: v1.31.1+k0s.1 diff --git a/templates/cluster/vsphere-hosted-cp/Chart.yaml b/templates/cluster/vsphere-hosted-cp/Chart.yaml index 4619a40e9..63c943e5e 100644 --- a/templates/cluster/vsphere-hosted-cp/Chart.yaml +++ b/templates/cluster/vsphere-hosted-cp/Chart.yaml @@ -7,12 +7,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "v1.31.1+k0s.1" annotations: hmc.mirantis.com/type: deployment hmc.mirantis.com/infrastructure-providers: vsphere diff --git a/templates/cluster/vsphere-hosted-cp/values.yaml b/templates/cluster/vsphere-hosted-cp/values.yaml index ca9334205..94c91bef4 100644 --- a/templates/cluster/vsphere-hosted-cp/values.yaml +++ b/templates/cluster/vsphere-hosted-cp/values.yaml @@ -34,10 +34,6 @@ memory: 4096 vmTemplate: "" network: "" -# K0s parameters -k0s: - version: v1.30.4+k0s.0 - # K0smotron parameters k0smotron: service: @@ -47,4 +43,4 @@ k0smotron: # K0s parameters k0s: - version: v1.30.4+k0s.0 + version: v1.31.1+k0s.1 diff --git a/templates/cluster/vsphere-standalone-cp/Chart.yaml b/templates/cluster/vsphere-standalone-cp/Chart.yaml index 23f270373..c96eadd20 100644 --- a/templates/cluster/vsphere-standalone-cp/Chart.yaml +++ b/templates/cluster/vsphere-standalone-cp/Chart.yaml @@ -6,12 +6,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.30.4+k0s.0" +appVersion: "v1.31.1+k0s.1" annotations: hmc.mirantis.com/type: deployment hmc.mirantis.com/infrastructure-providers: vsphere diff --git a/templates/cluster/vsphere-standalone-cp/values.yaml b/templates/cluster/vsphere-standalone-cp/values.yaml index 71415b7d1..0ee9c2d3d 100644 --- a/templates/cluster/vsphere-standalone-cp/values.yaml +++ b/templates/cluster/vsphere-standalone-cp/values.yaml @@ -47,4 +47,4 @@ worker: # K0s parameters k0s: - version: v1.30.4+k0s.0 + version: v1.31.1+k0s.1 diff --git a/templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-2.yaml similarity index 75% rename from templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-2.yaml index 0e45d5f19..11503b2a1 100644 --- a/templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/aws-hosted-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: aws-hosted-cp-0-0-1 + name: aws-hosted-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: aws-hosted-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-2.yaml similarity index 74% rename from templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-2.yaml index 4b9cf71c9..116b2f4eb 100644 --- a/templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/aws-standalone-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: aws-standalone-cp-0-0-1 + name: aws-standalone-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: aws-standalone-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-2.yaml similarity index 75% rename from templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-2.yaml index 09444efb5..dd7570c10 100644 --- a/templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/azure-hosted-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: azure-hosted-cp-0-0-1 + name: azure-hosted-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: azure-hosted-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-2.yaml similarity index 74% rename from templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-2.yaml index a3400be17..d07a5d866 100644 --- a/templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/azure-standalone-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: azure-standalone-cp-0-0-1 + name: azure-standalone-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: azure-standalone-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-2.yaml similarity index 74% rename from templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-2.yaml index 65e39e5ce..327a7c2c5 100644 --- a/templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/vsphere-hosted-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: vsphere-hosted-cp-0-0-1 + name: vsphere-hosted-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: vsphere-hosted-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-1.yaml b/templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-2.yaml similarity index 73% rename from templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-1.yaml rename to templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-2.yaml index 901cb2aab..0bb171972 100644 --- a/templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-1.yaml +++ b/templates/provider/hmc-templates/files/templates/vsphere-standalone-cp-0-0-2.yaml @@ -1,10 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ClusterTemplate metadata: - name: vsphere-standalone-cp-0-0-1 + name: vsphere-standalone-cp-0-0-2 annotations: helm.sh/resource-policy: keep spec: helm: chartName: vsphere-standalone-cp - chartVersion: 0.0.1 + chartVersion: 0.0.2 diff --git a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 072ab3abd..b8758c9ab 100644 --- a/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-hosted-cp-0-0-1 + template: aws-hosted-cp-0-0-2 credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: diff --git a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index f2081b755..8d2ceab31 100644 --- a/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: aws-standalone-cp-0-0-1 + template: aws-standalone-cp-0-0-2 credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: @@ -17,5 +17,3 @@ spec: instanceType: ${AWS_INSTANCE_TYPE:=t3.small} worker: instanceType: ${AWS_INSTANCE_TYPE:=t3.small} - - diff --git a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl index c28f703da..7cbe61b71 100644 --- a/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-hosted-cp-0-0-1 + template: azure-hosted-cp-0-0-2 credential: ${AZURE_CLUSTER_IDENTITY}-cred config: location: "${AZURE_REGION}" diff --git a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl index 5906d4f2b..7f1083a83 100644 --- a/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -4,7 +4,7 @@ metadata: name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: - template: azure-standalone-cp-0-0-1 + template: azure-standalone-cp-0-0-2 credential: ${AZURE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: 1 diff --git a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index c524f010b..03a201cce 100644 --- a/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-hosted-cp-0-0-1 + template: vsphere-hosted-cp-0-0-2 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} diff --git a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 1981fe8e8..47c7dc538 100644 --- a/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-standalone-cp-0-0-1 + template: vsphere-standalone-cp-0-0-2 credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} From 72c29324037d75b0aa1eeaec64b9302113833a8d Mon Sep 17 00:00:00 2001 From: Wahab Ali Date: Wed, 16 Oct 2024 10:05:29 -0400 Subject: [PATCH 29/29] Reconcile MultiClusterService --- Makefile | 8 +- api/v1alpha1/managedcluster_types.go | 7 +- api/v1alpha1/multiclusterservice_types.go | 14 +- config/dev/aws-managedcluster.yaml | 1 + config/dev/multiclusterservice.yaml | 13 ++ .../controller/managedcluster_controller.go | 103 ++------- .../multiclusterservice_controller.go | 161 +++++++++++++- .../multiclusterservice_controller_test.go | 203 +++++++++++++++--- internal/sveltos/profile.go | 181 ++++++++++------ .../files/templates/ingress-nginx-4-11-3.yaml | 10 + .../hmc.mirantis.com_managedclusters.yaml | 23 +- ...hmc.mirantis.com_multiclusterservices.yaml | 23 +- .../hmc/templates/rbac/controller/roles.yaml | 1 + .../Chart.lock | 0 .../Chart.yaml | 0 .../service/ingress-nginx-4-11-3/Chart.lock | 6 + .../service/ingress-nginx-4-11-3/Chart.yaml | 10 + 17 files changed, 557 insertions(+), 207 deletions(-) create mode 100644 config/dev/multiclusterservice.yaml create mode 100644 templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml rename templates/service/{ingress-nginx => ingress-nginx-4-11-0}/Chart.lock (100%) rename templates/service/{ingress-nginx => ingress-nginx-4-11-0}/Chart.yaml (100%) create mode 100644 templates/service/ingress-nginx-4-11-3/Chart.lock create mode 100644 templates/service/ingress-nginx-4-11-3/Chart.yaml diff --git a/Makefile b/Makefile index 6025e5650..9222b7c1b 100644 --- a/Makefile +++ b/Makefile @@ -379,6 +379,8 @@ FLUX_SOURCE_REPO_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmrepositories-$(FLUX_SOURC FLUX_SOURCE_CHART_CRD ?= $(EXTERNAL_CRD_DIR)/source-helmchart-$(FLUX_SOURCE_VERSION).yaml FLUX_HELM_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/fluxcd/helm-controller/api") | .Version') FLUX_HELM_CRD ?= $(EXTERNAL_CRD_DIR)/helm-$(FLUX_HELM_VERSION).yaml +SVELTOS_VERSION ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/projectsveltos/libsveltos") | .Version') +SVELTOS_CRD ?= $(EXTERNAL_CRD_DIR)/sveltos-$(SVELTOS_VERSION).yaml ## Tool Binaries KUBECTL ?= kubectl @@ -445,8 +447,12 @@ $(FLUX_SOURCE_REPO_CRD): $(EXTERNAL_CRD_DIR) rm -f $(FLUX_SOURCE_REPO_CRD) curl -s https://raw.githubusercontent.com/fluxcd/source-controller/$(FLUX_SOURCE_VERSION)/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml > $(FLUX_SOURCE_REPO_CRD) +$(SVELTOS_CRD): $(EXTERNAL_CRD_DIR) + rm -f $(SVELTOS_CRD) + curl -s https://raw.githubusercontent.com/projectsveltos/sveltos/$(SVELTOS_VERSION)/manifest/crds/sveltos_crds.yaml > $(SVELTOS_CRD) + .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/api/v1alpha1/managedcluster_types.go b/api/v1alpha1/managedcluster_types.go index 89a7f9bf9..779c25676 100644 --- a/api/v1alpha1/managedcluster_types.go +++ b/api/v1alpha1/managedcluster_types.go @@ -71,13 +71,16 @@ type ManagedClusterSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=2147483646 - // Priority sets the priority for the services defined in this spec. + // ServicesPriority sets the priority for the services defined in this spec. // Higher value means higher priority and lower means lower. // In case of conflict with another object managing the service, // the one with higher priority will get to deploy its services. - Priority int32 `json:"priority,omitempty"` + ServicesPriority int32 `json:"servicesPriority,omitempty"` // DryRun specifies whether the template should be applied after validation or only validated. DryRun bool `json:"dryRun,omitempty"` + + // +kubebuilder:default:=false + // StopOnConflict specifies what to do in case of a conflict. // E.g. If another object is already managing a service. // By default the remaining services will be deployed even if conflict is detected. diff --git a/api/v1alpha1/multiclusterservice_types.go b/api/v1alpha1/multiclusterservice_types.go index 74916e7ef..5e3633447 100644 --- a/api/v1alpha1/multiclusterservice_types.go +++ b/api/v1alpha1/multiclusterservice_types.go @@ -19,6 +19,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // MultiClusterServiceFinalizer is finalizer applied to MultiClusterService objects. + MultiClusterServiceFinalizer = "hmc.mirantis.com/multicluster-service" + // MultiClusterServiceKind is the string representation of a MultiClusterServiceKind. + MultiClusterServiceKind = "MultiClusterService" +) + // ServiceSpec represents a Service to be managed type ServiceSpec struct { // Values is the helm values to be passed to the template. @@ -52,11 +59,14 @@ type MultiClusterServiceSpec struct { // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=2147483646 - // Priority sets the priority for the services defined in this spec. + // ServicesPriority sets the priority for the services defined in this spec. // Higher value means higher priority and lower means lower. // In case of conflict with another object managing the service, // the one with higher priority will get to deploy its services. - Priority int32 `json:"priority,omitempty"` + ServicesPriority int32 `json:"servicesPriority,omitempty"` + + // +kubebuilder:default:=false + // StopOnConflict specifies what to do in case of a conflict. // E.g. If another object is already managing a service. // By default the remaining services will be deployed even if conflict is detected. diff --git a/config/dev/aws-managedcluster.yaml b/config/dev/aws-managedcluster.yaml index 6400d55d3..c2c17b3b2 100644 --- a/config/dev/aws-managedcluster.yaml +++ b/config/dev/aws-managedcluster.yaml @@ -19,6 +19,7 @@ spec: workersNumber: 1 installBeachHeadServices: false template: aws-standalone-cp-0-0-2 + servicesPriority: 100 services: - template: kyverno-3-2-6 name: kyverno diff --git a/config/dev/multiclusterservice.yaml b/config/dev/multiclusterservice.yaml new file mode 100644 index 000000000..afcf2c9b1 --- /dev/null +++ b/config/dev/multiclusterservice.yaml @@ -0,0 +1,13 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: MultiClusterService +metadata: + name: global-ingress +spec: + servicesPriority: 1000 + clusterSelector: + matchLabels: + app.kubernetes.io/managed-by: Helm + services: + - template: ingress-nginx-4-11-3 + name: ingress-nginx + namespace: ingress-nginx diff --git a/internal/controller/managedcluster_controller.go b/internal/controller/managedcluster_controller.go index a55303346..9924ee897 100644 --- a/internal/controller/managedcluster_controller.go +++ b/internal/controller/managedcluster_controller.go @@ -48,7 +48,6 @@ import ( hmc "github.com/Mirantis/hmc/api/v1alpha1" "github.com/Mirantis/hmc/internal/helm" "github.com/Mirantis/hmc/internal/telemetry" - "github.com/Mirantis/hmc/internal/utils" ) const ( @@ -377,65 +376,12 @@ func (r *ManagedClusterReconciler) Update(ctx context.Context, managedCluster *h // updateServices reconciles services provided in ManagedCluster.Spec.Services. // TODO(https://github.com/Mirantis/hmc/issues/361): Set status to ManagedCluster object at appropriate places. func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.ManagedCluster) (ctrl.Result, error) { - l := ctrl.LoggerFrom(ctx) - opts := []sveltos.HelmChartOpts{} - - // NOTE: The Profile object will be updated with no helm - // charts if len(mc.Spec.Services) == 0. This will result in the - // helm charts being uninstalled on matching clusters if - // Profile originally had len(m.Spec.Sevices) > 0. - for _, svc := range mc.Spec.Services { - if svc.Disable { - l.Info(fmt.Sprintf("Skip adding Template (%s) to Profile (%s) because Disable=true", svc.Template, mc.Name)) - continue - } - - tmpl := &hmc.ServiceTemplate{} - tmplRef := client.ObjectKey{Name: svc.Template, Namespace: mc.Namespace} - if err := r.Get(ctx, tmplRef, tmpl); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get Template (%s): %w", tmplRef.String(), err) - } - - source, err := r.getServiceTemplateSource(ctx, tmpl) - if err != nil { - return ctrl.Result{}, fmt.Errorf("could not get repository url: %w", err) - } - - opts = append(opts, sveltos.HelmChartOpts{ - Values: svc.Values, - RepositoryURL: source.Spec.URL, - // We don't have repository name so chart name becomes repository name. - RepositoryName: tmpl.Spec.Helm.ChartName, - ChartName: func() string { - if source.Spec.Type == utils.RegistryTypeOCI { - return tmpl.Spec.Helm.ChartName - } - // Sveltos accepts ChartName in / format for non-OCI. - // We don't have a repository name, so we can use / instead. - // See: https://projectsveltos.github.io/sveltos/addons/helm_charts/. - return fmt.Sprintf("%s/%s", tmpl.Spec.Helm.ChartName, tmpl.Spec.Helm.ChartName) - }(), - ChartVersion: tmpl.Spec.Helm.ChartVersion, - ReleaseName: svc.Name, - ReleaseNamespace: func() string { - if svc.Namespace != "" { - return svc.Namespace - } - return svc.Name - }(), - // The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because - // the source.Spec.Insecure field is meant to be used for connecting to repositories - // over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for. - // See: https://github.com/fluxcd/source-controller/pull/1288 - PlainHTTP: source.Spec.Insecure, - }) + opts, err := helmChartOpts(ctx, r.Client, mc.Namespace, mc.Spec.Services) + if err != nil { + return ctrl.Result{}, err } if _, err := sveltos.ReconcileProfile(ctx, r.Client, mc.Namespace, mc.Name, - map[string]string{ - hmc.FluxHelmChartNamespaceKey: mc.Namespace, - hmc.FluxHelmChartNameKey: mc.Name, - }, sveltos.ReconcileProfileOpts{ OwnerReference: &metav1.OwnerReference{ APIVersion: hmc.GroupVersion.String(), @@ -443,8 +389,14 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M Name: mc.Name, UID: mc.UID, }, + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + hmc.FluxHelmChartNamespaceKey: mc.Namespace, + hmc.FluxHelmChartNameKey: mc.Name, + }, + }, HelmChartOpts: opts, - Priority: mc.Spec.Priority, + Priority: mc.Spec.ServicesPriority, StopOnConflict: mc.Spec.StopOnConflict, }); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile Profile: %w", err) @@ -458,36 +410,6 @@ func (r *ManagedClusterReconciler) updateServices(ctx context.Context, mc *hmc.M return ctrl.Result{RequeueAfter: DefaultRequeueInterval}, nil } -// getServiceTemplateSource returns the source (HelmRepository) used by the ServiceTemplate. -// It is fetched by querying for ServiceTemplate -> HelmChart -> HelmRepository. -func (r *ManagedClusterReconciler) getServiceTemplateSource(ctx context.Context, tmpl *hmc.ServiceTemplate) (*sourcev1.HelmRepository, error) { - tmplRef := client.ObjectKey{Namespace: tmpl.Namespace, Name: tmpl.Name} - - if tmpl.Status.ChartRef == nil { - return nil, fmt.Errorf("status for ServiceTemplate (%s) has not been updated yet", tmplRef.String()) - } - - hc := &sourcev1.HelmChart{} - if err := r.Get(ctx, client.ObjectKey{ - Namespace: tmpl.Status.ChartRef.Namespace, - Name: tmpl.Status.ChartRef.Name, - }, hc); err != nil { - return nil, fmt.Errorf("failed to get HelmChart (%s): %w", tmplRef.String(), err) - } - - repo := &sourcev1.HelmRepository{} - if err := r.Get(ctx, client.ObjectKey{ - // Using chart's namespace because it's source - // (helm repository in this case) should be within the same namespace. - Namespace: hc.Namespace, - Name: hc.Spec.SourceRef.Name, - }, repo); err != nil { - return nil, fmt.Errorf("failed to get HelmRepository (%s): %w", tmplRef.String(), err) - } - - return repo, nil -} - func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, managedCluster *hmc.ManagedCluster, hcChart *chart.Chart) error { install := action.NewInstall(actionConfig) install.DryRun = true @@ -584,6 +506,11 @@ func (r *ManagedClusterReconciler) Delete(ctx context.Context, managedCluster *h return ctrl.Result{}, err } + // Without explicitly deleting the Profile object, we run into a race condition + // which prevents Sveltos objects from being removed from the management cluster. + // It is detailed in https://github.com/projectsveltos/addon-controller/issues/732. + // We may try to remove the explicit call to Delete once a fix for it has been merged. + // TODO(https://github.com/Mirantis/hmc/issues/526). err = sveltos.DeleteProfile(ctx, r.Client, managedCluster.Namespace, managedCluster.Name) if err != nil { return ctrl.Result{}, err diff --git a/internal/controller/multiclusterservice_controller.go b/internal/controller/multiclusterservice_controller.go index 46e5ab497..06077127b 100644 --- a/internal/controller/multiclusterservice_controller.go +++ b/internal/controller/multiclusterservice_controller.go @@ -16,11 +16,19 @@ package controller import ( "context" + "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/sveltos" + "github.com/Mirantis/hmc/internal/utils" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // MultiClusterServiceReconciler reconciles a MultiClusterService object @@ -29,10 +37,157 @@ type MultiClusterServiceReconciler struct { } // Reconcile reconciles a MultiClusterService object. -func (*MultiClusterServiceReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { - _ = ctrl.LoggerFrom(ctx) +func (r *MultiClusterServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := ctrl.LoggerFrom(ctx) + l.Info("Reconciling MultiClusterService") - // TODO(https://github.com/Mirantis/hmc/issues/455): Implement me. + mcsvc := &hmc.MultiClusterService{} + err := r.Get(ctx, req.NamespacedName, mcsvc) + if apierrors.IsNotFound(err) { + l.Info("MultiClusterService not found, ignoring since object must be deleted") + return ctrl.Result{}, nil + } + if err != nil { + l.Error(err, "Failed to get MultiClusterService") + return ctrl.Result{}, err + } + + if !mcsvc.DeletionTimestamp.IsZero() { + l.Info("Deleting MultiClusterService") + return r.reconcileDelete(ctx, mcsvc) + } + + if controllerutil.AddFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer) { + if err := r.Client.Update(ctx, mcsvc); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update MultiClusterService %s with finalizer %s: %w", mcsvc.Name, hmc.MultiClusterServiceFinalizer, err) + } + return ctrl.Result{}, nil + } + + // By using DefaultSystemNamespace we are enforcing that MultiClusterService + // may only use ServiceTemplates that are present in the hmc-system namespace. + opts, err := helmChartOpts(ctx, r.Client, utils.DefaultSystemNamespace, mcsvc.Spec.Services) + if err != nil { + return ctrl.Result{}, err + } + + if _, err := sveltos.ReconcileClusterProfile(ctx, r.Client, mcsvc.Name, + sveltos.ReconcileProfileOpts{ + OwnerReference: &metav1.OwnerReference{ + APIVersion: hmc.GroupVersion.String(), + Kind: hmc.MultiClusterServiceKind, + Name: mcsvc.Name, + UID: mcsvc.UID, + }, + LabelSelector: mcsvc.Spec.ClusterSelector, + HelmChartOpts: opts, + Priority: mcsvc.Spec.ServicesPriority, + StopOnConflict: mcsvc.Spec.StopOnConflict, + }); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile ClusterProfile: %w", err) + } + + return ctrl.Result{}, nil +} + +// helmChartOpts returns slice of helm chart options to use with Sveltos. +// Namespace is the namespace of the referred templates in services slice. +func helmChartOpts(ctx context.Context, c client.Client, namespace string, services []hmc.ServiceSpec) ([]sveltos.HelmChartOpts, error) { + l := ctrl.LoggerFrom(ctx) + opts := []sveltos.HelmChartOpts{} + + // NOTE: The Profile/ClusterProfile object will be updated with + // no helm charts if len(mc.Spec.Services) == 0. This will result + // in the helm charts being uninstalled on matching clusters if + // Profile/ClusterProfile originally had len(m.Spec.Sevices) > 0. + for _, svc := range services { + if svc.Disable { + l.Info(fmt.Sprintf("Skip adding ServiceTemplate %s because Disable=true", svc.Template)) + continue + } + + tmpl := &hmc.ServiceTemplate{} + // Here we can use the same namespace for all services + // because if the services slice is part of: + // 1. ManagedCluster: Then the referred template must be in its own namespace. + // 2. MultiClusterService: Then the referred template must be in hmc-system namespace. + tmplRef := types.NamespacedName{Name: svc.Template, Namespace: namespace} + if err := c.Get(ctx, tmplRef, tmpl); err != nil { + return nil, fmt.Errorf("failed to get ServiceTemplate %s: %w", tmplRef.String(), err) + } + + if tmpl.GetCommonStatus() == nil || tmpl.GetCommonStatus().ChartRef == nil { + return nil, fmt.Errorf("status for ServiceTemplate %s/%s has not been updated yet", tmpl.Namespace, tmpl.Name) + } + + chart := &sourcev1.HelmChart{} + chartRef := types.NamespacedName{ + Namespace: tmpl.GetCommonStatus().ChartRef.Namespace, + Name: tmpl.GetCommonStatus().ChartRef.Name, + } + if err := c.Get(ctx, chartRef, chart); err != nil { + return nil, fmt.Errorf("failed to get HelmChart %s referenced by ServiceTemplate %s: %w", chartRef.String(), tmplRef.String(), err) + } + + repo := &sourcev1.HelmRepository{} + repoRef := types.NamespacedName{ + // Using chart's namespace because it's source + // should be within the same namespace. + Namespace: chart.Namespace, + Name: chart.Spec.SourceRef.Name, + } + if err := c.Get(ctx, repoRef, repo); err != nil { + return nil, fmt.Errorf("failed to get HelmRepository %s: %w", repoRef.String(), err) + } + + chartName := tmpl.Spec.Helm.ChartName + if chartName == "" { + chartName = tmpl.Spec.Helm.ChartRef.Name + } + + opts = append(opts, sveltos.HelmChartOpts{ + Values: svc.Values, + RepositoryURL: repo.Spec.URL, + // We don't have repository name so chart name becomes repository name. + RepositoryName: chartName, + ChartName: func() string { + if repo.Spec.Type == utils.RegistryTypeOCI { + return chartName + } + // Sveltos accepts ChartName in / format for non-OCI. + // We don't have a repository name, so we can use / instead. + // See: https://projectsveltos.github.io/sveltos/addons/helm_charts/. + return fmt.Sprintf("%s/%s", chartName, chartName) + }(), + ChartVersion: tmpl.Spec.Helm.ChartVersion, + ReleaseName: svc.Name, + ReleaseNamespace: func() string { + if svc.Namespace != "" { + return svc.Namespace + } + return svc.Name + }(), + // The reason it is passed to PlainHTTP instead of InsecureSkipTLSVerify is because + // the source.Spec.Insecure field is meant to be used for connecting to repositories + // over plain HTTP, which is different than what InsecureSkipTLSVerify is meant for. + // See: https://github.com/fluxcd/source-controller/pull/1288 + PlainHTTP: repo.Spec.Insecure, + }) + } + + return opts, nil +} + +func (r *MultiClusterServiceReconciler) reconcileDelete(ctx context.Context, mcsvc *hmc.MultiClusterService) (ctrl.Result, error) { + if err := sveltos.DeleteClusterProfile(ctx, r.Client, mcsvc.Name); err != nil { + return ctrl.Result{}, err + } + + if controllerutil.RemoveFinalizer(mcsvc, hmc.MultiClusterServiceFinalizer) { + if err := r.Client.Update(ctx, mcsvc); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer %s from MultiClusterService %s: %w", hmc.MultiClusterServiceFinalizer, mcsvc.Name, err) + } + } return ctrl.Result{}, nil } diff --git a/internal/controller/multiclusterservice_controller_test.go b/internal/controller/multiclusterservice_controller_test.go index e14ad3dff..b5fe5954f 100644 --- a/internal/controller/multiclusterservice_controller_test.go +++ b/internal/controller/multiclusterservice_controller_test.go @@ -16,66 +16,211 @@ package controller import ( "context" + "time" + helmcontrollerv2 "github.com/fluxcd/helm-controller/api/v2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + "helm.sh/helm/v3/pkg/chart" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - hmcmirantiscomv1alpha1 "github.com/Mirantis/hmc/api/v1alpha1" + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/internal/utils" + sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" ) var _ = Describe("MultiClusterService Controller", func() { Context("When reconciling a resource", func() { - const resourceName = "test-resource" + const ( + testNamespace = utils.DefaultSystemNamespace + serviceTemplateName = "test-service-0-1-0" + helmRepoName = "test-helmrepo" + helmChartName = "test-helmchart" + helmChartReleaseName = "test-helmchart-release" + helmChartVersion = "0.1.0" + helmChartURL = "http://source-controller.hmc-system.svc.cluster.local./helmchart/hmc-system/test-chart/0.1.0.tar.gz" + multiClusterServiceName = "test-multiclusterservice" + ) + + fakeDownloadHelmChartFunc := func(context.Context, *sourcev1.Artifact) (*chart.Chart, error) { + return &chart.Chart{ + Metadata: &chart.Metadata{ + APIVersion: "v2", + Version: helmChartVersion, + Name: helmChartName, + }, + }, nil + } ctx := context.Background() - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed - } - multiclusterservice := &hmcmirantiscomv1alpha1.MultiClusterService{} + namespace := &corev1.Namespace{} + helmChart := &sourcev1.HelmChart{} + helmRepo := &sourcev1.HelmRepository{} + serviceTemplate := &hmc.ServiceTemplate{} + multiClusterService := &hmc.MultiClusterService{} + clusterProfile := &sveltosv1beta1.ClusterProfile{} + + helmRepositoryRef := types.NamespacedName{Namespace: testNamespace, Name: helmRepoName} + helmChartRef := types.NamespacedName{Namespace: testNamespace, Name: helmChartName} + serviceTemplateRef := types.NamespacedName{Namespace: testNamespace, Name: serviceTemplateName} + multiClusterServiceRef := types.NamespacedName{Name: multiClusterServiceName} + clusterProfileRef := types.NamespacedName{Name: multiClusterServiceName} BeforeEach(func() { - By("creating the custom resource for the Kind MultiClusterService") - err := k8sClient.Get(ctx, typeNamespacedName, multiclusterservice) - if err != nil && errors.IsNotFound(err) { - resource := &hmcmirantiscomv1alpha1.MultiClusterService{ + By("creating Namespace") + err := k8sClient.Get(ctx, types.NamespacedName{Name: testNamespace}, namespace) + if err != nil && apierrors.IsNotFound(err) { + namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + } + Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) + } + + By("creating HelmRepository") + err = k8sClient.Get(ctx, types.NamespacedName{Name: helmRepoName, Namespace: testNamespace}, helmRepo) + if err != nil && apierrors.IsNotFound(err) { + helmRepo = &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: helmRepoName, + Namespace: testNamespace, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: "oci://test/helmrepo", + }, + } + Expect(k8sClient.Create(ctx, helmRepo)).To(Succeed()) + } + + By("creating HelmChart") + err = k8sClient.Get(ctx, types.NamespacedName{Name: helmChartName, Namespace: testNamespace}, helmChart) + if err != nil && apierrors.IsNotFound(err) { + helmChart = &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: helmChartName, + Namespace: testNamespace, + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: helmRepoName, + }, + }, + } + Expect(k8sClient.Create(ctx, helmChart)).To(Succeed()) + } + + By("updating HelmChart status with artifact URL") + helmChart.Status.URL = helmChartURL + helmChart.Status.Artifact = &sourcev1.Artifact{ + URL: helmChartURL, + LastUpdateTime: metav1.Now(), + } + Expect(k8sClient.Status().Update(ctx, helmChart)).Should(Succeed()) + + By("creating ServiceTemplate") + err = k8sClient.Get(ctx, serviceTemplateRef, serviceTemplate) + if err != nil && apierrors.IsNotFound(err) { + serviceTemplate = &hmc.ServiceTemplate{ ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", + Name: serviceTemplateName, + Namespace: testNamespace, + Labels: map[string]string{ + hmc.HMCManagedLabelKey: "true", + }, + }, + Spec: hmc.ServiceTemplateSpec{ + Helm: hmc.HelmSpec{ + ChartVersion: helmChartVersion, + ChartRef: &helmcontrollerv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: helmChartName, + Namespace: testNamespace, + }, + }, }, - // TODO(user): Specify other spec details if needed. } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + Expect(k8sClient.Create(ctx, serviceTemplate)).To(Succeed()) + + By("creating MultiClusterService") + err = k8sClient.Get(ctx, multiClusterServiceRef, multiClusterService) + if err != nil && apierrors.IsNotFound(err) { + multiClusterService = &hmc.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: multiClusterServiceName, + Finalizers: []string{ + // Reconcile attempts to add this finalizer and returns immediately + // if successful. So adding this finalizer here manually in order + // to avoid having to call reconcile multiple times for this test. + hmc.MultiClusterServiceFinalizer, + }, + }, + Spec: hmc.MultiClusterServiceSpec{ + Services: []hmc.ServiceSpec{ + { + Template: serviceTemplateName, + Name: helmChartReleaseName, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, multiClusterService)).To(Succeed()) } }) AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &hmcmirantiscomv1alpha1.MultiClusterService{} - err := k8sClient.Get(ctx, typeNamespacedName, resource) + By("cleaning up") + multiClusterServiceResource := &hmc.MultiClusterService{} + Expect(k8sClient.Get(ctx, multiClusterServiceRef, multiClusterServiceResource)).NotTo(HaveOccurred()) + + reconciler := &MultiClusterServiceReconciler{Client: k8sClient} + Expect(k8sClient.Delete(ctx, multiClusterService)).To(Succeed()) + // Running reconcile to remove the finalizer and delete the MultiClusterService + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: multiClusterServiceRef}) Expect(err).NotTo(HaveOccurred()) + Eventually(k8sClient.Get(ctx, multiClusterServiceRef, multiClusterService), 1*time.Minute, 5*time.Second).Should(HaveOccurred()) + + Expect(k8sClient.Get(ctx, clusterProfileRef, &sveltosv1beta1.ClusterProfile{})).To(HaveOccurred()) + + serviceTemplateResource := &hmc.ServiceTemplate{} + Expect(k8sClient.Get(ctx, serviceTemplateRef, serviceTemplateResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, serviceTemplateResource)).To(Succeed()) + + helmChartResource := &sourcev1.HelmChart{} + Expect(k8sClient.Get(ctx, helmChartRef, helmChartResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, helmChartResource)).To(Succeed()) - By("Cleanup the specific resource instance MultiClusterService") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + helmRepositoryResource := &sourcev1.HelmRepository{} + Expect(k8sClient.Get(ctx, helmRepositoryRef, helmRepositoryResource)).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, helmRepositoryResource)).To(Succeed()) }) + It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &MultiClusterServiceReconciler{ - Client: k8sClient, + By("reconciling ServiceTemplate used by MultiClusterService") + templateReconciler := TemplateReconciler{ + Client: k8sClient, + downloadHelmChartFunc: fakeDownloadHelmChartFunc, } + serviceTemplateReconciler := &ServiceTemplateReconciler{TemplateReconciler: templateReconciler} + _, err := serviceTemplateReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: serviceTemplateRef}) + Expect(err).NotTo(HaveOccurred()) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, - }) + By("reconciling MultiClusterService") + multiClusterServiceReconciler := &MultiClusterServiceReconciler{Client: k8sClient} + + _, err = multiClusterServiceReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: multiClusterServiceRef}) Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. + + Eventually(k8sClient.Get(ctx, clusterProfileRef, clusterProfile), 1*time.Minute, 5*time.Second).ShouldNot(HaveOccurred()) }) }) }) diff --git a/internal/sveltos/profile.go b/internal/sveltos/profile.go index 7c4f60ccf..8bba498b8 100644 --- a/internal/sveltos/profile.go +++ b/internal/sveltos/profile.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "math" + "unsafe" hmc "github.com/Mirantis/hmc/api/v1alpha1" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" @@ -32,6 +33,7 @@ import ( type ReconcileProfileOpts struct { OwnerReference *metav1.OwnerReference + LabelSelector metav1.LabelSelector HelmChartOpts []HelmChartOpts Priority int32 StopOnConflict bool @@ -49,97 +51,145 @@ type HelmChartOpts struct { InsecureSkipTLSVerify bool } +// ReconcileClusterProfile reconciles a Sveltos ClusterProfile object. +func ReconcileClusterProfile( + ctx context.Context, + cl client.Client, + name string, + opts ReconcileProfileOpts, +) (*sveltosv1beta1.ClusterProfile, error) { + l := ctrl.LoggerFrom(ctx) + obj := objectMeta(opts.OwnerReference) + obj.SetName(name) + + cp := &sveltosv1beta1.ClusterProfile{ + ObjectMeta: obj, + } + + operation, err := ctrl.CreateOrUpdate(ctx, cl, cp, func() error { + spec, err := Spec(&opts) + if err != nil { + return err + } + cp.Spec = *spec + + return nil + }) + if err != nil { + return nil, err + } + + if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { + l.Info(fmt.Sprintf("Successfully %s ClusterProfile %s", string(operation), cp.Name)) + } + + return cp, nil +} + // ReconcileProfile reconciles a Sveltos Profile object. -func ReconcileProfile(ctx context.Context, +func ReconcileProfile( + ctx context.Context, cl client.Client, namespace string, name string, - matchLabels map[string]string, opts ReconcileProfileOpts, ) (*sveltosv1beta1.Profile, error) { l := ctrl.LoggerFrom(ctx) + obj := objectMeta(opts.OwnerReference) + obj.SetNamespace(namespace) + obj.SetName(name) - cp := &sveltosv1beta1.Profile{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, + p := &sveltosv1beta1.Profile{ + ObjectMeta: obj, } - tier, err := PriorityToTier(opts.Priority) + operation, err := ctrl.CreateOrUpdate(ctx, cl, p, func() error { + spec, err := Spec(&opts) + if err != nil { + return err + } + p.Spec = *spec + + return nil + }) if err != nil { return nil, err } - operation, err := ctrl.CreateOrUpdate(ctx, cl, cp, func() error { - if cp.Labels == nil { - cp.Labels = make(map[string]string) - } + if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { + l.Info(fmt.Sprintf("Successfully %s Profile %s", string(operation), p.Name)) + } - cp.Labels[hmc.HMCManagedLabelKey] = hmc.HMCManagedLabelValue - if opts.OwnerReference != nil { - cp.OwnerReferences = []metav1.OwnerReference{*opts.OwnerReference} - } + return p, nil +} + +// Spec returns a spec object to be used with +// a Sveltos Profile or ClusterProfile object. +func Spec(opts *ReconcileProfileOpts) (*sveltosv1beta1.Spec, error) { + tier, err := PriorityToTier(opts.Priority) + if err != nil { + return nil, err + } - cp.Spec = sveltosv1beta1.Spec{ - ClusterSelector: libsveltosv1beta1.Selector{ - LabelSelector: metav1.LabelSelector{ - MatchLabels: matchLabels, - }, + spec := &sveltosv1beta1.Spec{ + ClusterSelector: libsveltosv1beta1.Selector{ + LabelSelector: opts.LabelSelector, + }, + Tier: tier, + ContinueOnConflict: !opts.StopOnConflict, + HelmCharts: make([]sveltosv1beta1.HelmChart, 0, len(opts.HelmChartOpts)), + } + + for _, hc := range opts.HelmChartOpts { + helmChart := sveltosv1beta1.HelmChart{ + RepositoryURL: hc.RepositoryURL, + RepositoryName: hc.RepositoryName, + ChartName: hc.ChartName, + ChartVersion: hc.ChartVersion, + ReleaseName: hc.ReleaseName, + ReleaseNamespace: hc.ReleaseNamespace, + HelmChartAction: sveltosv1beta1.HelmChartActionInstall, + RegistryCredentialsConfig: &sveltosv1beta1.RegistryCredentialsConfig{ + PlainHTTP: hc.PlainHTTP, + InsecureSkipTLSVerify: hc.InsecureSkipTLSVerify, }, - Tier: tier, - ContinueOnConflict: !opts.StopOnConflict, } - for _, hc := range opts.HelmChartOpts { - helmChart := sveltosv1beta1.HelmChart{ - RepositoryURL: hc.RepositoryURL, - RepositoryName: hc.RepositoryName, - ChartName: hc.ChartName, - ChartVersion: hc.ChartVersion, - ReleaseName: hc.ReleaseName, - ReleaseNamespace: hc.ReleaseNamespace, - HelmChartAction: sveltosv1beta1.HelmChartActionInstall, - RegistryCredentialsConfig: &sveltosv1beta1.RegistryCredentialsConfig{ - PlainHTTP: hc.PlainHTTP, - InsecureSkipTLSVerify: hc.InsecureSkipTLSVerify, - }, - } + if hc.PlainHTTP { + // InsecureSkipTLSVerify is redundant in this case. + helmChart.RegistryCredentialsConfig.InsecureSkipTLSVerify = false + } - if hc.PlainHTTP { - // InsecureSkipTLSVerify is redundant in this case. - helmChart.RegistryCredentialsConfig.InsecureSkipTLSVerify = false + if hc.Values != nil && len(hc.Values.Raw) > 0 { + b, err := yaml.JSONToYAML(hc.Values.Raw) + if err != nil { + return nil, fmt.Errorf("failed to convert values from JSON to YAML for service %s: %w", hc.RepositoryName, err) } - if hc.Values != nil { - b, err := hc.Values.MarshalJSON() - if err != nil { - return fmt.Errorf("failed to marshal values to JSON for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) - } + helmChart.Values = unsafe.String(&b[0], len(b)) + } - b, err = yaml.JSONToYAML(b) - if err != nil { - return fmt.Errorf("failed to convert values from JSON to YAML for service (%s) in ManagedCluster: %w", hc.RepositoryName, err) - } + spec.HelmCharts = append(spec.HelmCharts, helmChart) + } - helmChart.Values = string(b) - } + return spec, nil +} - cp.Spec.HelmCharts = append(cp.Spec.HelmCharts, helmChart) - } - return nil - }) - if err != nil { - return nil, err +func objectMeta(owner *metav1.OwnerReference) metav1.ObjectMeta { + obj := metav1.ObjectMeta{ + Labels: map[string]string{ + hmc.HMCManagedLabelKey: hmc.HMCManagedLabelValue, + }, } - if operation == controllerutil.OperationResultCreated || operation == controllerutil.OperationResultUpdated { - l.Info(fmt.Sprintf("Successfully %s Profile (%s/%s)", string(operation), cp.Namespace, cp.Name)) + if owner != nil { + obj.OwnerReferences = []metav1.OwnerReference{*owner} } - return cp, nil + return obj } +// DeleteProfile deletes a Sveltos Profile object. func DeleteProfile(ctx context.Context, cl client.Client, namespace string, name string) error { err := cl.Delete(ctx, &sveltosv1beta1.Profile{ ObjectMeta: metav1.ObjectMeta{ @@ -151,6 +201,17 @@ func DeleteProfile(ctx context.Context, cl client.Client, namespace string, name return client.IgnoreNotFound(err) } +// DeleteClusterProfile deletes a Sveltos ClusterProfile object. +func DeleteClusterProfile(ctx context.Context, cl client.Client, name string) error { + err := cl.Delete(ctx, &sveltosv1beta1.ClusterProfile{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }) + + return client.IgnoreNotFound(err) +} + // PriorityToTier converts priority value to Sveltos tier value. func PriorityToTier(priority int32) (int32, error) { var mini int32 = 1 diff --git a/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml b/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml new file mode 100644 index 000000000..6e33c5e85 --- /dev/null +++ b/templates/provider/hmc-templates/files/templates/ingress-nginx-4-11-3.yaml @@ -0,0 +1,10 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ServiceTemplate +metadata: + name: ingress-nginx-4-11-3 + annotations: + helm.sh/resource-policy: keep +spec: + helm: + chartName: ingress-nginx + chartVersion: 4.11.3 diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml index 6c33ac0bb..d3ad6d5f5 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_managedclusters.yaml @@ -69,17 +69,6 @@ spec: description: DryRun specifies whether the template should be applied after validation or only validated. type: boolean - priority: - default: 100 - description: |- - Priority sets the priority for the services defined in this spec. - Higher value means higher priority and lower means lower. - In case of conflict with another object managing the service, - the one with higher priority will get to deploy its services. - format: int32 - maximum: 2147483646 - minimum: 1 - type: integer services: description: |- Services is a list of services created via ServiceTemplates @@ -113,7 +102,19 @@ spec: - template type: object type: array + servicesPriority: + default: 100 + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + maximum: 2147483646 + minimum: 1 + type: integer stopOnConflict: + default: false description: |- StopOnConflict specifies what to do in case of a conflict. E.g. If another object is already managing a service. diff --git a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml index 953f6b87c..dc2a7fa93 100644 --- a/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml +++ b/templates/provider/hmc/templates/crds/hmc.mirantis.com_multiclusterservices.yaml @@ -87,17 +87,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - priority: - default: 100 - description: |- - Priority sets the priority for the services defined in this spec. - Higher value means higher priority and lower means lower. - In case of conflict with another object managing the service, - the one with higher priority will get to deploy its services. - format: int32 - maximum: 2147483646 - minimum: 1 - type: integer services: description: |- Services is a list of services created via ServiceTemplates @@ -131,7 +120,19 @@ spec: - template type: object type: array + servicesPriority: + default: 100 + description: |- + ServicesPriority sets the priority for the services defined in this spec. + Higher value means higher priority and lower means lower. + In case of conflict with another object managing the service, + the one with higher priority will get to deploy its services. + format: int32 + maximum: 2147483646 + minimum: 1 + type: integer stopOnConflict: + default: false description: |- StopOnConflict specifies what to do in case of a conflict. E.g. If another object is already managing a service. diff --git a/templates/provider/hmc/templates/rbac/controller/roles.yaml b/templates/provider/hmc/templates/rbac/controller/roles.yaml index 739275821..48c52a7cd 100644 --- a/templates/provider/hmc/templates/rbac/controller/roles.yaml +++ b/templates/provider/hmc/templates/rbac/controller/roles.yaml @@ -171,6 +171,7 @@ rules: - config.projectsveltos.io resources: - profiles + - clusterprofiles verbs: {{ include "rbac.editorVerbs" . | nindent 4 }} - apiGroups: - hmc.mirantis.com diff --git a/templates/service/ingress-nginx/Chart.lock b/templates/service/ingress-nginx-4-11-0/Chart.lock similarity index 100% rename from templates/service/ingress-nginx/Chart.lock rename to templates/service/ingress-nginx-4-11-0/Chart.lock diff --git a/templates/service/ingress-nginx/Chart.yaml b/templates/service/ingress-nginx-4-11-0/Chart.yaml similarity index 100% rename from templates/service/ingress-nginx/Chart.yaml rename to templates/service/ingress-nginx-4-11-0/Chart.yaml diff --git a/templates/service/ingress-nginx-4-11-3/Chart.lock b/templates/service/ingress-nginx-4-11-3/Chart.lock new file mode 100644 index 000000000..51eb4dc3f --- /dev/null +++ b/templates/service/ingress-nginx-4-11-3/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: ingress-nginx + repository: https://kubernetes.github.io/ingress-nginx + version: 4.11.3 +digest: sha256:0963a4470e5fe0ce97023b16cfc9c3cde18b74707c6379947542e09afa6d5346 +generated: "2024-10-16T10:19:41.054555-04:00" diff --git a/templates/service/ingress-nginx-4-11-3/Chart.yaml b/templates/service/ingress-nginx-4-11-3/Chart.yaml new file mode 100644 index 000000000..8fe3cc1d5 --- /dev/null +++ b/templates/service/ingress-nginx-4-11-3/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: ingress-nginx +description: A Helm chart to refer the official ingress-nginx helm chart +type: application +version: 4.11.3 +appVersion: "1.11.3" +dependencies: + - name: ingress-nginx + version: 4.11.3 + repository: https://kubernetes.github.io/ingress-nginx