From b3763010c1b6be3fcbebb7b95f81dba3d3704819 Mon Sep 17 00:00:00 2001 From: Cameron Wall <75807506+cameronmwall@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:05:32 -0400 Subject: [PATCH] move chart generation to dev tools (#1581) * move chart generation to dev tools Signed-off-by: Cameron Wall * rbac update Signed-off-by: Cameron Wall --------- Signed-off-by: Cameron Wall --- Makefile.dev | 4 +- ...ster-management.io_clusterpermissions.yaml | 431 +++++++++++++++ config/rbac/role.yaml | 22 + hack/bundle-automation/copy-charts.py | 132 ----- hack/bundle-automation/generate-charts.py | 497 ------------------ hack/bundle-automation/generate-shell.py | 18 + pkg/templates/rbac_gen.go | 3 + 7 files changed, 476 insertions(+), 631 deletions(-) create mode 100644 config/crd/bases/rbac.open-cluster-management.io_clusterpermissions.yaml delete mode 100755 hack/bundle-automation/copy-charts.py delete mode 100755 hack/bundle-automation/generate-charts.py diff --git a/Makefile.dev b/Makefile.dev index 70e62cb82d..f2e6989d45 100644 --- a/Makefile.dev +++ b/Makefile.dev @@ -40,11 +40,11 @@ regenerate-operator-sha-commits: ## Regenerates the operator bundles regenerate-charts: ## Regenerates the charts pip3 install -r hack/bundle-automation/chart-requirements.txt - python3 ./hack/bundle-automation/generate-charts.py --destination pkg/templates/ + python3 ./hack/bundle-automation/generate-shell.py --update-charts copy-charts: ## Regenerates the operator bundles pip3 install -r hack/bundle-automation/requirements.txt - python3 ./hack/bundle-automation/copy-charts.py --destination pkg/templates/ + python3 ./hack/bundle-automation/generate-shell.py --copy-charts # different from `in-cluster-install` (call no secrets, no observability-crd) mock-install: prereqs subscriptions docker-build docker-push deploy diff --git a/config/crd/bases/rbac.open-cluster-management.io_clusterpermissions.yaml b/config/crd/bases/rbac.open-cluster-management.io_clusterpermissions.yaml new file mode 100644 index 0000000000..dd5b67331b --- /dev/null +++ b/config/crd/bases/rbac.open-cluster-management.io_clusterpermissions.yaml @@ -0,0 +1,431 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: clusterpermissions.rbac.open-cluster-management.io +spec: + group: rbac.open-cluster-management.io + names: + kind: ClusterPermission + listKind: ClusterPermissionList + plural: clusterpermissions + singular: clusterpermission + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ClusterPermission is the Schema for the clusterpermissions API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterPermissionSpec defines the desired state of ClusterPermission + properties: + clusterRole: + description: ClusterRole represents the ClusterRole that is being + created on the managed cluster + properties: + rules: + description: Rules holds all the PolicyRules for this ClusterRole + items: + description: PolicyRule holds information that describes a policy + rule, but does not contain information about who the rule + applies to or which namespace the rule applies to. + properties: + apiGroups: + description: APIGroups is the name of the APIGroup that + contains the resources. If multiple API groups are specified, + any action requested against one of the enumerated resources + in any API group will be allowed. "" represents the core + API group and "*" represents all API groups. + items: + type: string + type: array + nonResourceURLs: + description: NonResourceURLs is a set of partial urls that + a user should have access to. *s are allowed, but only + as the full, final step in the path Since non-resource + URLs are not namespaced, this field is only applicable + for ClusterRoles referenced from a ClusterRoleBinding. + Rules can either apply to API resources (such as "pods" + or "secrets") or non-resource URL paths (such as "/api"), but + not both. + items: + type: string + type: array + resourceNames: + description: ResourceNames is an optional white list of + names that the rule applies to. An empty set means that + everything is allowed. + items: + type: string + type: array + resources: + description: Resources is a list of resources this rule + applies to. '*' represents all resources. + items: + type: string + type: array + verbs: + description: Verbs is a list of Verbs that apply to ALL + the ResourceKinds contained in this rule. '*' represents + all verbs. + items: + type: string + type: array + required: + - verbs + type: object + type: array + required: + - rules + type: object + clusterRoleBinding: + description: ClusterRoleBinding represents the ClusterRoleBinding + that is being created on the managed cluster + properties: + subject: + description: Subject contains a reference to the object or user + identities a ClusterPermission binding applies to. Besides the + typical subject for a binding, a ManagedServiceAccount can be + used as a subject as well. + properties: + apiGroup: + description: APIGroup holds the API group of the referenced + subject. Defaults to "" for ServiceAccount subjects. Defaults + to "rbac.authorization.k8s.io" for User and Group subjects. + type: string + kind: + description: Kind of object being referenced. Values defined + by this API group are "User", "Group", and "ServiceAccount". + If the Authorizer does not recognized the kind value, the + Authorizer should report an error. + type: string + name: + description: Name of the object being referenced. + type: string + namespace: + description: Namespace of the referenced object. If the object + kind is non-namespace, such as "User" or "Group", and this + value is not empty the Authorizer should report an error. + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - subject + type: object + roleBindings: + description: RoleBindings represents RoleBindings that are being created + on the managed cluster + items: + description: RoleBinding represents the RoleBinding that is being + created on the managed cluster + properties: + namespace: + description: Namespace of the Role for that is being created + on the managed cluster + type: string + namespaceSelector: + description: 'NamespaceSelector define the general labelSelector + which namespace to apply the rules to Note: the namespace + must exists on the hub cluster' + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + roleRef: + description: RoleRef contains information that points to the + role being used + properties: + kind: + description: Kind is the type of resource being referenced + type: string + required: + - kind + type: object + subject: + description: Subject contains a reference to the object or user + identities a ClusterPermission binding applies to. Besides + the typical subject for a binding, a ManagedServiceAccount + can be used as a subject as well. + properties: + apiGroup: + description: APIGroup holds the API group of the referenced + subject. Defaults to "" for ServiceAccount subjects. Defaults + to "rbac.authorization.k8s.io" for User and Group subjects. + type: string + kind: + description: Kind of object being referenced. Values defined + by this API group are "User", "Group", and "ServiceAccount". + If the Authorizer does not recognized the kind value, + the Authorizer should report an error. + type: string + name: + description: Name of the object being referenced. + type: string + namespace: + description: Namespace of the referenced object. If the + object kind is non-namespace, such as "User" or "Group", + and this value is not empty the Authorizer should report + an error. + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + required: + - roleRef + - subject + type: object + type: array + roles: + description: Roles represents roles that are being created on the + managed cluster + items: + description: Role represents the Role that is being created on the + managed cluster + properties: + namespace: + description: Namespace of the Role for that is being created + on the managed cluster + type: string + namespaceSelector: + description: 'NamespaceSelector define the general labelSelector + which namespace to apply the rules to Note: the namespace + must exists on the hub cluster' + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + rules: + description: Rules holds all the PolicyRules for this Role + items: + description: PolicyRule holds information that describes a + policy rule, but does not contain information about who + the rule applies to or which namespace the rule applies + to. + properties: + apiGroups: + description: APIGroups is the name of the APIGroup that + contains the resources. If multiple API groups are + specified, any action requested against one of the enumerated + resources in any API group will be allowed. "" represents + the core API group and "*" represents all API groups. + items: + type: string + type: array + nonResourceURLs: + description: NonResourceURLs is a set of partial urls + that a user should have access to. *s are allowed, + but only as the full, final step in the path Since non-resource + URLs are not namespaced, this field is only applicable + for ClusterRoles referenced from a ClusterRoleBinding. + Rules can either apply to API resources (such as "pods" + or "secrets") or non-resource URL paths (such as "/api"), but + not both. + items: + type: string + type: array + resourceNames: + description: ResourceNames is an optional white list of + names that the rule applies to. An empty set means + that everything is allowed. + items: + type: string + type: array + resources: + description: Resources is a list of resources this rule + applies to. '*' represents all resources. + items: + type: string + type: array + verbs: + description: Verbs is a list of Verbs that apply to ALL + the ResourceKinds contained in this rule. '*' represents + all verbs. + items: + type: string + type: array + required: + - verbs + type: object + type: array + required: + - rules + type: object + type: array + type: object + status: + description: ClusterPermissionStatus defines the observed state of ClusterPermission + properties: + conditions: + description: Conditions is the condition list. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d8f386dd53..588f9abe26 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -1266,6 +1266,12 @@ rules: - patch - update - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - deletecollection - apiGroups: - discovery.open-cluster-management.io resources: @@ -1439,6 +1445,15 @@ rules: - patch - update - watch +- apiGroups: + - multicluster.x-k8s.io + resources: + - serviceimports + verbs: + - delete + - get + - list + - update - apiGroups: - networking.k8s.io resources: @@ -1787,6 +1802,13 @@ rules: - delete - get - update +- apiGroups: + - submariner.io + resources: + - clusters + - endpoints + verbs: + - deletecollection - apiGroups: - submarineraddon.open-cluster-management.io resources: diff --git a/hack/bundle-automation/copy-charts.py b/hack/bundle-automation/copy-charts.py deleted file mode 100755 index 3885d4467a..0000000000 --- a/hack/bundle-automation/copy-charts.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 Red Hat, Inc. -# Copyright Contributors to the Open Cluster Management project -# Assumes: Python 3.6+ - -import argparse -import os -import shutil -import yaml -import logging -import subprocess -from git import Repo, exc - -from validate_csv import * - -# Copy chart-templates to a new helmchart directory -def copyHelmChart(destinationChartPath, repo, chart): - chartName = chart['name'] - logging.info("Copying templates into new '%s' chart directory ...", chartName) - # Create main folder - chartPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp", repo, chart["chart-path"]) - if os.path.exists(destinationChartPath): - shutil.rmtree(destinationChartPath) - - # Copy Chart.yaml, values.yaml, and templates dir - chartTemplatesPath = os.path.join(chartPath, "templates/") - destinationTemplateDir = os.path.join(destinationChartPath, "templates/") - os.makedirs(destinationTemplateDir) - - # fetch template files - for file_name in os.listdir(chartTemplatesPath): - # construct full file path - source = chartTemplatesPath + file_name - destination = destinationTemplateDir + file_name - # copy only files - if os.path.isfile(source): - shutil.copyfile(source, destination) - - chartYamlPath = os.path.join(chartPath, "Chart.yaml") - if not os.path.exists(chartYamlPath): - logging.info("No Chart.yaml for chart: ", chartName) - return - shutil.copyfile(chartYamlPath, os.path.join(destinationChartPath, "Chart.yaml")) - - shutil.copyfile(os.path.join(chartPath, "values.yaml"), os.path.join(destinationChartPath, "values.yaml")) - - logging.info("Chart copied.\n") - -def addCRDs(repo, chart, outputDir): - if not 'chart-path' in chart: - logging.critical("Could not validate chart path in given chart: " + chart) - exit(1) - - chartPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp", repo, chart["chart-path"]) - if not os.path.exists(chartPath): - logging.critical("Could not validate chartPath at given path: " + chartPath) - exit(1) - - crdPath = os.path.join(chartPath, "crds") - if not os.path.exists(crdPath): - logging.info("No CRDs for repo: ", repo) - return - - destinationPath = os.path.join(outputDir, "crds", chart['name']) - if os.path.exists(destinationPath): # If path exists, remove and re-clone - shutil.rmtree(destinationPath) - os.makedirs(destinationPath) - for filename in os.listdir(crdPath): - if not filename.endswith(".yaml"): - continue - filepath = os.path.join(crdPath, filename) - with open(filepath, 'r') as f: - resourceFile = yaml.safe_load(f) - - if resourceFile["kind"] == "CustomResourceDefinition": - shutil.copyfile(filepath, os.path.join(destinationPath, filename)) - -def chartConfigAcceptable(chart): - helmChart = chart["name"] - if helmChart == "": - logging.critical("Unable to generate helm chart without a name.") - return False - return True - -def main(): - ## Initialize ArgParser - parser = argparse.ArgumentParser() - parser.add_argument("--destination", dest="destination", type=str, required=False, help="Destination directory of the created helm chart") - - args = parser.parse_args() - destination = args.destination - - logging.basicConfig(level=logging.DEBUG) - - # Config.yaml holds the configurations for Operator bundle locations to be used - configYaml = os.path.join(os.path.dirname(os.path.realpath(__file__)),"copy-config.yaml") - with open(configYaml, 'r') as f: - config = yaml.safe_load(f) - - # Loop through each repo in the config.yaml - for repo in config: - logging.info("Cloning: %s", repo["repo_name"]) - repo_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp/" + repo["repo_name"]) # Path to clone repo to - if os.path.exists(repo_path): # If path exists, remove and re-clone - shutil.rmtree(repo_path) - repository = Repo.clone_from(repo["github_ref"], repo_path) # Clone repo to above path - if 'branch' in repo: - repository.git.checkout(repo['branch']) # If a branch is specified, checkout that branch - - # Loop through each operator in the repo identified by the config - for chart in repo["charts"]: - if not chartConfigAcceptable(chart): - logging.critical("Unable to generate helm chart without configuration requirements.") - exit(1) - - logging.info("Helm Chartifying - %s!\n", chart["name"]) - - logging.info("Adding CRDs - %s!\n", chart["name"]) - # Copy over all CRDs to the destination directory - addCRDs(repo["repo_name"], chart, destination) - - logging.info("Creating helm chart: '%s' ...", chart["name"]) - - always_or_toggle = chart['always-or-toggle'] - destinationChartPath = os.path.join(destination, "charts", always_or_toggle, chart['name']) - - # Template Helm Chart Directory from 'chart-templates' - logging.info("Templating helm chart '%s' ...", chart["name"]) - copyHelmChart(destinationChartPath, repo["repo_name"], chart) - -if __name__ == "__main__": - main() diff --git a/hack/bundle-automation/generate-charts.py b/hack/bundle-automation/generate-charts.py deleted file mode 100755 index a9f4d1d5c3..0000000000 --- a/hack/bundle-automation/generate-charts.py +++ /dev/null @@ -1,497 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 Red Hat, Inc. -# Copyright Contributors to the Open Cluster Management project -# Assumes: Python 3.6+ - -import argparse -import os -import shutil -import yaml -import logging -import subprocess -from git import Repo, exc - -from validate_csv import * - -# Parse an image reference, return dict containing image reference information -def parse_image_ref(image_ref): - # Image ref: [registry-and-ns/]repository-name[:tag][@digest] - parsed_ref = dict() - - remaining_ref = image_ref - at_pos = remaining_ref.rfind("@") - if at_pos > 0: - parsed_ref["digest"] = remaining_ref[at_pos+1:] - remaining_ref = remaining_ref[0:at_pos] - else: - parsed_ref["digest"] = None - colon_pos = remaining_ref.rfind(":") - if colon_pos > 0: - parsed_ref["tag"] = remaining_ref[colon_pos+1:] - remaining_ref = remaining_ref[0:colon_pos] - else: - parsed_ref["tag"] = None - slash_pos = remaining_ref.rfind("/") - if slash_pos > 0: - parsed_ref["repository"] = remaining_ref[slash_pos+1:] - rgy_and_ns = remaining_ref[0:slash_pos] - else: - parsed_ref["repository"] = remaining_ref - rgy_and_ns = "localhost" - parsed_ref["registry_and_namespace"] = rgy_and_ns - - rgy, ns = split_at(rgy_and_ns, "/", favor_right=False) - if not ns: - ns = "" - - parsed_ref["registry"] = rgy - parsed_ref["namespace"] = ns - - slash_pos = image_ref.rfind("/") - if slash_pos > 0: - repo_and_suffix = image_ref[slash_pos+1:] - else: - repo_and_suffix = image_ref - parsed_ref["repository_and_suffix"] = repo_and_suffix - - return parsed_ref - -def updateServiceAccount(yamlContent): - yamlContent['metadata'].pop('namespace') - -def updateClusterRoleBinding(yamlContent): - subjectsList = yamlContent['subjects'] - for sub in subjectsList: - sub['namespace'] = '{{ .Values.global.namespace }}' - -# Copy chart-templates to a new helmchart directory -def updateResources(outputDir, repo, chart): - logging.info(" Updating resources!") - # Create main folder - always_or_toggle = chart['always-or-toggle'] - templateDir = os.path.join(outputDir, "charts", always_or_toggle, chart['name'], "templates") - print(templateDir) - for tempFile in os.listdir(templateDir): - filePath = os.path.join(templateDir, tempFile) - with open(filePath, 'r') as f: - yamlContent = yaml.safe_load(f) - kind = yamlContent["kind"] - if kind == "ServiceAccount": - logging.info(" Updating ServiceAccount!") - updateServiceAccount(yamlContent) - elif kind == "ClusterRoleBinding": - logging.info(" Updating ClusterRoleBinding!") - updateClusterRoleBinding(yamlContent) - else: - logging.info(" No updates for kind %s at this step.", kind) - continue - with open(filePath, 'w') as f: - yaml.dump(yamlContent, f, width=float("inf")) - - - -# Copy chart-templates to a new helmchart directory -def copyHelmChart(destinationChartPath, repo, chart): - chartName = chart['name'] - logging.info("Copying templates into new '%s' chart directory ...", chartName) - # Create main folder - chartPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp", repo, chart["chart-path"]) - if os.path.exists(destinationChartPath): - shutil.rmtree(destinationChartPath) - - # Copy Chart.yaml, values.yaml, and templates dir - - destinationTemplateDir = os.path.join(destinationChartPath, "templates") - os.makedirs(destinationTemplateDir) - - specificValues = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chart-values", chart['name'], "values.yaml") - if os.path.exists(specificValues): - shutil.copyfile(specificValues, os.path.join(chartPath, "values.yaml")) - helmTemplateOutput = subprocess.getoutput(['helm template '+ chartPath]) - yamlList = helmTemplateOutput.split('---') - for outputContent in yamlList: - yamlContent = yaml.safe_load(outputContent) - if yamlContent is None: - continue - newFileName = yamlContent['kind'].lower() + '.yaml' - newFilePath= os.path.join(destinationTemplateDir, newFileName) - a_file = open(newFilePath, "w") - a_file.writelines(outputContent) - a_file.close() - - chartYamlPath = os.path.join(chartPath, "Chart.yaml") - if not os.path.exists(chartYamlPath): - logging.info("No Chart.yaml for chart: ", chartName) - return - shutil.copyfile(chartYamlPath, os.path.join(destinationChartPath, "Chart.yaml")) - - shutil.copyfile(os.path.join(chartPath, "values.yaml"), os.path.join(destinationChartPath, "values.yaml")) - # Copying template values.yaml instead of values.yaml from chart - shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "chart-templates", "values.yaml"), os.path.join(destinationChartPath, "values.yaml")) - - logging.info("Chart copied.\n") - -# Given a resource Kind, return all filepaths of that resource type in a chart directory -def findTemplatesOfType(helmChart, kind): - resources = [] - for filename in os.listdir(os.path.join(helmChart, "templates")): - if filename.endswith(".yaml") or filename.endswith(".yml"): - filePath = os.path.join(helmChart, "templates", filename) - with open(filePath, 'r') as f: - fileYml = yaml.safe_load(f) - if fileYml['kind'] == kind: - resources.append(filePath) - continue - else: - continue - return resources - -# For each deployment, identify the image references if any exist in the environment variable fields, insert helm flow control code to reference it, and add image-key to the values.yaml file. -# If the image-key referenced in the deployment does not exist in `imageMappings` in the Config.yaml, this will fail. Images must be explicitly defined -def fixEnvVarImageReferences(helmChart, imageKeyMapping): - logging.info("Fixing image references in container 'env' section in deployments and values.yaml ...") - valuesYaml = os.path.join(helmChart, "values.yaml") - with open(valuesYaml, 'r') as f: - values = yaml.safe_load(f) - deployments = findTemplatesOfType(helmChart, 'Deployment') - - imageKeys = [] - for deployment in deployments: - with open(deployment, 'r') as f: - deploy = yaml.safe_load(f) - - containers = deploy['spec']['template']['spec']['containers'] - for container in containers: - if 'env' not in container: - continue - - for env in container['env']: - image_key = env['name'] - if image_key.endswith('_IMAGE') == False: - continue - image_key = parse_image_ref(env['value'])['repository'] - try: - image_key = imageKeyMapping[image_key] - except KeyError: - logging.critical("No image key mapping provided for imageKey: %s" % image_key) - exit(1) - imageKeys.append(image_key) - env['value'] = "{{ .Values.global.imageOverrides." + image_key + " }}" - with open(deployment, 'w') as f: - yaml.dump(deploy, f, width=float("inf")) - - for imageKey in imageKeys: - values['global']['imageOverrides'][imageKey] = "" - with open(valuesYaml, 'w') as f: - yaml.dump(values, f, width=float("inf")) - logging.info("Image container env references in deployments and values.yaml updated successfully.\n") - -# For each deployment, identify the image references if any exist in the image field, insert helm flow control code to reference it, and add image-key to the values.yaml file. -# If the image-key referenced in the deployment does not exist in `imageMappings` in the Config.yaml, this will fail. Images must be explicitly defined -def fixImageReferences(helmChart, imageKeyMapping): - logging.info("Fixing image and pull policy references in deployments and values.yaml ...") - valuesYaml = os.path.join(helmChart, "values.yaml") - with open(valuesYaml, 'r') as f: - values = yaml.safe_load(f) - - deployments = findTemplatesOfType(helmChart, 'Deployment') - imageKeys = [] - temp = "" ## temporarily read image ref - for deployment in deployments: - with open(deployment, 'r') as f: - deploy = yaml.safe_load(f) - - containers = deploy['spec']['template']['spec']['containers'] - for container in containers: - image_key = parse_image_ref(container['image'])["repository"] - try: - image_key = imageKeyMapping[image_key] - except KeyError: - logging.critical("No image key mapping provided for imageKey: %s" % image_key) - exit(1) - imageKeys.append(image_key) - # temp = container['image'] - container['image'] = "{{ .Values.global.imageOverrides." + image_key + " }}" - container['imagePullPolicy'] = "{{ .Values.global.pullPolicy }}" - args = container['args'] - refreshed_args = [] - for arg in args: - if "--agent-image-name" not in arg: - refreshed_args.append(arg) - else: - refreshed_args.append("--agent-image-name="+"{{ .Values.global.imageOverrides." + image_key + " }}") - container['args'] = refreshed_args - with open(deployment, 'w') as f: - yaml.dump(deploy, f, width=float("inf")) - - del values['global']['imageOverrides']['imageOverride'] - for imageKey in imageKeys: - values['global']['imageOverrides'][imageKey] = "" # set to temp to debug - with open(valuesYaml, 'w') as f: - yaml.dump(values, f, width=float("inf")) - logging.info("Image references and pull policy in deployments and values.yaml updated successfully.\n") - -# injectHelmFlowControl injects advanced helm flow control which would typically make a .yaml file more difficult to parse. This should be called last. -def injectHelmFlowControl(deployment): - logging.info("Adding Helm flow control for NodeSelector and Proxy Overrides ...") - deploy = open(deployment, "r") - lines = deploy.readlines() - for i, line in enumerate(lines): - if line.strip() == "nodeSelector: \'\'": - lines[i] = """{{- with .Values.hubconfig.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} -{{- end }} -""" - if line.strip() == "imagePullSecrets: \'\'": - lines[i] = """{{- if .Values.global.pullSecret }} - imagePullSecrets: - - name: {{ .Values.global.pullSecret }} -{{- end }} -""" - if line.strip() == "tolerations: \'\'": - lines[i] = """{{- with .Values.hubconfig.tolerations }} - tolerations: - {{- range . }} - - {{ if .Key }} key: {{ .Key }} {{- end }} - {{ if .Operator }} operator: {{ .Operator }} {{- end }} - {{ if .Value }} value: {{ .Value }} {{- end }} - {{ if .Effect }} effect: {{ .Effect }} {{- end }} - {{ if .TolerationSeconds }} tolerationSeconds: {{ .TolerationSeconds }} {{- end }} - {{- end }} -{{- end }} -""" - - - if line.strip() == "env:" or line.strip() == "env: {}": - lines[i] = """ env: -{{- if .Values.hubconfig.proxyConfigs }} - - name: HTTP_PROXY - value: {{ .Values.hubconfig.proxyConfigs.HTTP_PROXY }} - - name: HTTPS_PROXY - value: {{ .Values.hubconfig.proxyConfigs.HTTPS_PROXY }} - - name: NO_PROXY - value: {{ .Values.hubconfig.proxyConfigs.NO_PROXY }} -{{- end }} -""" - - if 'replicas:' in line.strip(): - lines[i] = """ replicas: {{ .Values.hubconfig.replicaCount }} -""" - a_file = open(deployment, "w") - a_file.writelines(lines) - a_file.close() - logging.info("Added Helm flow control for NodeSelector and Proxy Overrides.\n") - -def addPullSecretOverride(deployment): - deploy = open(deployment, "r") - lines = deploy.readlines() - for i, line in enumerate(lines): - if line.strip() == "env:" or line.strip() == "env: {}": - logging.info("Adding image pull secret environment variable to managed-serviceaccount deployment") - lines[i] = """ env: -{{- if .Values.global.pullSecret }} - - name: AGENT_IMAGE_PULL_SECRET - value: {{ .Values.global.pullSecret }} -{{- end }} -""" - a_file = open(deployment, "w") - a_file.writelines(lines) - a_file.close() - -# updateDeployments adds standard configuration to the deployments (antiaffinity, security policies, and tolerations) -def updateDeployments(chartName, helmChart, exclusions, inclusions): - logging.info("Updating deployments with antiaffinity, security policies, and tolerations ...") - deploySpecYaml = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chart-templates/templates/deploymentspec.yaml") - with open(deploySpecYaml, 'r') as f: - deploySpec = yaml.safe_load(f) - - deployments = findTemplatesOfType(helmChart, 'Deployment') - for deployment in deployments: - with open(deployment, 'r') as f: - deploy = yaml.safe_load(f) - deploy['metadata'].pop('namespace') - deploy['spec']['replicas'] = "{{ .Values.hubconfig.replicaCount }}" - affinityList = deploySpec['affinity']['podAntiAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] - for antiaffinity in affinityList: - antiaffinity['podAffinityTerm']['labelSelector']['matchExpressions'][0]['values'][0] = deploy['metadata']['name'] - deploy['spec']['template']['spec']['affinity'] = deploySpec['affinity'] - deploy['spec']['template']['spec']['tolerations'] = '' - deploy['spec']['template']['spec']['hostNetwork'] = False - deploy['spec']['template']['spec']['hostPID'] = False - deploy['spec']['template']['spec']['hostIPC'] = False - if 'securityContext' not in deploy['spec']['template']['spec']: - deploy['spec']['template']['spec']['securityContext'] = {} - deploy['spec']['template']['spec']['securityContext']['runAsNonRoot'] = True - deploy['spec']['template']['metadata']['labels']['ocm-antiaffinity-selector'] = deploy['metadata']['name'] - deploy['spec']['template']['spec']['nodeSelector'] = "" - deploy['spec']['template']['spec']['imagePullSecrets'] = '' - - containers = deploy['spec']['template']['spec']['containers'] - for container in containers: - if 'securityContext' not in container: - container['securityContext'] = {} - if 'env' not in container: - container['env'] = {} - container['securityContext']['allowPrivilegeEscalation'] = False - container['securityContext']['capabilities'] = {} - container['securityContext']['capabilities']['drop'] = ['ALL'] - container['securityContext']['privileged'] = False - container['securityContext']['runAsNonRoot'] = True - if 'readOnlyRootFilesystem' not in exclusions: - container['securityContext']['readOnlyRootFilesystem'] = True - - with open(deployment, 'w') as f: - yaml.dump(deploy, f, width=float("inf")) - logging.info("Deployments updated with antiaffinity, security policies, and tolerations successfully. \n") - - injectHelmFlowControl(deployment) - if 'pullSecretOverride' in inclusions: - addPullSecretOverride(deployment) - -# updateRBAC adds standard configuration to the RBAC resources (clusterroles, roles, clusterrolebindings, and rolebindings) -def updateRBAC(helmChart, chartName): - logging.info("Updating clusterroles, roles, clusterrolebindings, and rolebindings ...") - clusterroles = findTemplatesOfType(helmChart, 'ClusterRole') - roles = findTemplatesOfType(helmChart, 'Role') - clusterrolebindings = findTemplatesOfType(helmChart, 'ClusterRoleBinding') - rolebindings = findTemplatesOfType(helmChart, 'RoleBinding') - - for rbacFile in clusterroles + roles + clusterrolebindings + rolebindings: - with open(rbacFile, 'r') as f: - rbac = yaml.safe_load(f) - rbac['metadata']['name'] = "{{ .Values.org }}:{{ .Chart.Name }}:" + chartName - if rbac['kind'] in ['RoleBinding', 'ClusterRoleBinding']: - rbac['roleRef']['name'] = "{{ .Values.org }}:{{ .Chart.Name }}:" + chartName - with open(rbacFile, 'w') as f: - yaml.dump(rbac, f, width=float("inf")) - logging.info("Clusterroles, roles, clusterrolebindings, and rolebindings updated. \n") - - -def injectRequirements(helmChart, chartName, imageKeyMapping, exclusions, inclusions): - logging.info("Updating Helm chart '%s' with onboarding requirements ...", helmChart) - fixImageReferences(helmChart, imageKeyMapping) - fixEnvVarImageReferences(helmChart, imageKeyMapping) - updateRBAC(helmChart, chartName) - updateDeployments(chartName, helmChart, exclusions, inclusions) - logging.info("Updated Chart '%s' successfully\n", helmChart) - -def split_at(the_str, the_delim, favor_right=True): - - split_pos = the_str.find(the_delim) - if split_pos > 0: - left_part = the_str[0:split_pos] - right_part = the_str[split_pos+1:] - else: - if favor_right: - left_part = None - right_part = the_str - else: - left_part = the_str - right_part = None - - return (left_part, right_part) - -def addCRDs(repo, chart, outputDir): - if not 'chart-path' in chart: - logging.critical("Could not validate chart path in given chart: " + chart) - exit(1) - - chartPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp", repo, chart["chart-path"]) - if not os.path.exists(chartPath): - logging.critical("Could not validate chartPath at given path: " + chartPath) - exit(1) - - crdPath = os.path.join(chartPath, "crds") - if not os.path.exists(crdPath): - logging.info("No CRDs for repo: ", repo) - return - - destinationPath = os.path.join(outputDir, chart['name'], "crds") - if os.path.exists(destinationPath): # If path exists, remove and re-clone - shutil.rmtree(destinationPath) - os.makedirs(destinationPath) - for filename in os.listdir(crdPath): - if not filename.endswith(".yaml"): - continue - filepath = os.path.join(crdPath, filename) - with open(filepath, 'r') as f: - resourceFile = yaml.safe_load(f) - - if resourceFile["kind"] == "CustomResourceDefinition": - shutil.copyfile(filepath, os.path.join(destinationPath, filename)) - -def chartConfigAcceptable(chart): - helmChart = chart["name"] - if helmChart == "": - logging.critical("Unable to generate helm chart without a name.") - return False - return True - -def main(): - ## Initialize ArgParser - parser = argparse.ArgumentParser() - parser.add_argument("--destination", dest="destination", type=str, required=False, help="Destination directory of the created helm chart") - parser.add_argument("--skipOverrides", dest="skipOverrides", type=bool, help="If true, overrides such as helm flow control will not be applied") - parser.add_argument("--lint", dest="lint", action='store_true', help="If true, bundles will only be linted to ensure they can be transformed successfully. Default is False.") - parser.set_defaults(skipOverrides=False) - parser.set_defaults(lint=False) - - args = parser.parse_args() - skipOverrides = args.skipOverrides - destination = args.destination - lint = args.lint - - if lint == False and not destination: - logging.critical("Destination directory is required when not linting.") - exit(1) - - logging.basicConfig(level=logging.DEBUG) - - # Config.yaml holds the configurations for Operator bundle locations to be used - configYaml = os.path.join(os.path.dirname(os.path.realpath(__file__)),"charts-config.yaml") - with open(configYaml, 'r') as f: - config = yaml.safe_load(f) - - # Loop through each repo in the config.yaml - for repo in config: - logging.info("Cloning: %s", repo["repo_name"]) - repo_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tmp/" + repo["repo_name"]) # Path to clone repo to - if os.path.exists(repo_path): # If path exists, remove and re-clone - shutil.rmtree(repo_path) - repository = Repo.clone_from(repo["github_ref"], repo_path) # Clone repo to above path - if 'branch' in repo: - repository.git.checkout(repo['branch']) # If a branch is specified, checkout that branch - - # Loop through each operator in the repo identified by the config - for chart in repo["charts"]: - if not chartConfigAcceptable(chart): - logging.critical("Unable to generate helm chart without configuration requirements.") - exit(1) - - logging.info("Helm Chartifying - %s!\n", chart["name"]) - - logging.info("Adding CRDs - %s!\n", chart["name"]) - # Copy over all CRDs to the destination directory - addCRDs(repo["repo_name"], chart, destination) - - logging.info("Creating helm chart: '%s' ...", chart["name"]) - - always_or_toggle = chart['always-or-toggle'] - destinationChartPath = os.path.join(destination, "charts", always_or_toggle, chart['name']) - - # Template Helm Chart Directory from 'chart-templates' - logging.info("Templating helm chart '%s' ...", chart["name"]) - copyHelmChart(destinationChartPath, repo["repo_name"], chart) - - updateResources(destination, repo["repo_name"], chart) - - if not skipOverrides: - logging.info("Adding Overrides (set --skipOverrides=true to skip) ...") - exclusions = chart["exclusions"] if "exclusions" in chart else [] - inclusions = chart["inclusions"] if "inclusions" in chart else [] - injectRequirements(destinationChartPath, chart["name"], chart["imageMappings"], exclusions, inclusions) - logging.info("Overrides added. \n") - -if __name__ == "__main__": - main() diff --git a/hack/bundle-automation/generate-shell.py b/hack/bundle-automation/generate-shell.py index 997553dd5e..946969d3bb 100644 --- a/hack/bundle-automation/generate-shell.py +++ b/hack/bundle-automation/generate-shell.py @@ -57,6 +57,22 @@ def main(args): prepare_operation(script_dir, operation_script, operation_args) logging.info("Bundles updated successfully.") + elif args.update_charts: + logging.info("Preparing to update operator...") + operation_script = "generate-charts.py" + operation_args = "--destination pkg/templates/" + + prepare_operation(script_dir, operation_script, operation_args) + logging.info("Bundles updated successfully.") + + elif args.copy_charts: + logging.info("Preparing to copy charts...") + operation_script = "move-charts.py" + operation_args = "--destination pkg/templates/" + + prepare_operation(script_dir, operation_script, operation_args) + logging.info("Bundles updated successfully.") + elif args.update_commits: logging.info("Preparing to update commit SHAs...") operation_script = "generate-sha-commits.py" @@ -77,6 +93,8 @@ def main(args): parser.add_argument("--lint-bundles", action="store_true", help="Perform linting for operator bundles") parser.add_argument("--update-charts-from-bundles", action="store_true", help="Regenerate operator charts from bundles") parser.add_argument("--update-commits", action="store_true", help="Regenerate operator bundles with commit SHA") + parser.add_argument("--update-charts", action="store_true", help="Regenerate operator charts") + parser.add_argument("--copy-charts", action="store_true", help="Copy operator charts") parser.add_argument("--repo", help="Repository name") parser.add_argument("--branch", default='main', help="Branch name") diff --git a/pkg/templates/rbac_gen.go b/pkg/templates/rbac_gen.go index 67c6f93002..67462d77a6 100644 --- a/pkg/templates/rbac_gen.go +++ b/pkg/templates/rbac_gen.go @@ -162,6 +162,7 @@ package main //+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=core.observatorium.io,resources=observatoria,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices,verbs=deletecollection //+kubebuilder:rbac:groups=discovery.open-cluster-management.io,resources=discoveryconfigs;discoveredclusters,verbs=list;watch //+kubebuilder:rbac:groups=extensions.hive.openshift.io,resources=agentclusterinstalls,verbs=list;watch //+kubebuilder:rbac:groups=hive.openshift.io,resources=clusterclaims;clusterdeployments;clusterpools;clusterimagesets;clusterprovisions;clusterdeprovisions;machinepools,verbs=list;watch @@ -180,6 +181,7 @@ package main //+kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;create //+kubebuilder:rbac:groups=multicluster.openshift.io,resources=multiclusterengines,verbs=get;list //+kubebuilder:rbac:groups=multicluster.openshift.io,resources=multiclusterengines,verbs=list;watch +//+kubebuilder:rbac:groups=multicluster.x-k8s.io,resources=serviceimports,verbs=list;get;update;delete //+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;create;update;delete;deletecollection;watch //+kubebuilder:rbac:groups=oauth.openshift.io,resources=oauthclients,verbs=create;delete;get;list;patch;update;watch //+kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=*;multiclusterobservabilities;endpointmonitorings,verbs=create;delete;get;list;patch;update;watch @@ -224,6 +226,7 @@ package main //+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=list;watch //+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=watch;get;list //+kubebuilder:rbac:groups=submariner.io,resources=brokers,verbs=create;get;update;delete +//+kubebuilder:rbac:groups=submariner.io,resources=endpoints;clusters,verbs=deletecollection //+kubebuilder:rbac:groups=submarineraddon.open-cluster-management.io,resources=submarinerconfigs,verbs=get;list;watch;update;patch //+kubebuilder:rbac:groups=submarineraddon.open-cluster-management.io,resources=submarinerconfigs,verbs=list;watch //+kubebuilder:rbac:groups=submarineraddon.open-cluster-management.io,resources=submarinerconfigs/status,verbs=update;patch