From 783a47c5295d0000166fad5e8f8d924fbfbeb692 Mon Sep 17 00:00:00 2001 From: Sergey Bataev Date: Wed, 10 Jul 2024 22:01:06 +0300 Subject: [PATCH] [opentelemetry-kube-stack] Fix values.schema.json for accepting targetAllocator.image type (#1251) * fix collectors.defaultCRConfig.targetAllocator.image type * fix serviceMonitorSelector * remove id from values.schema.json, same as collector * move defaultCRConfig up from collectors * generate examples * bump chart version * create example for targetAllocator * add enabled to TA --------- Co-authored-by: Jacob Aronoff --- charts/opentelemetry-kube-stack/Chart.yaml | 2 +- .../examples/cloud-demo/rendered/bridge.yaml | 2 +- .../cloud-demo/rendered/collector.yaml | 4 +- .../cloud-demo/rendered/instrumentation.yaml | 2 +- .../rendered/clusterrole.yaml | 124 ++++++++ .../rendered/collector.yaml | 146 ++++++++++ .../operator-webhook-with-cert-manager.yaml | 192 +++++++++++++ .../admission-webhooks/operator-webhook.yaml | 3 + .../opentelemetry-operator/certmanager.yaml | 43 +++ .../opentelemetry-operator/clusterrole.yaml | 265 ++++++++++++++++++ .../clusterrolebinding.yaml | 44 +++ .../opentelemetry-operator/deployment.yaml | 105 +++++++ .../rendered/opentelemetry-operator/role.yaml | 43 +++ .../opentelemetry-operator/rolebinding.yaml | 23 ++ .../opentelemetry-operator/service.yaml | 51 ++++ .../serviceaccount.yaml | 15 + .../tests/test-certmanager-connection.yaml | 38 +++ .../tests/test-service-connection.yaml | 76 +++++ .../examples/targetallocator-prom/values.yaml | 70 +++++ .../values.schema.json | 37 +-- 20 files changed, 1248 insertions(+), 37 deletions(-) create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/clusterrole.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook-with-cert-manager.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/certmanager.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrole.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrolebinding.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/deployment.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/role.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/rolebinding.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/serviceaccount.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-certmanager-connection.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-service-connection.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml diff --git a/charts/opentelemetry-kube-stack/Chart.yaml b/charts/opentelemetry-kube-stack/Chart.yaml index de329ac91..9f19fe526 100644 --- a/charts/opentelemetry-kube-stack/Chart.yaml +++ b/charts/opentelemetry-kube-stack/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: opentelemetry-kube-stack -version: 0.0.7 +version: 0.0.8 description: | OpenTelemetry Quickstart chart for Kubernetes. Installs an operator and collector for an easy way to get started with Kubernetes observability. diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml index 50094f1e6..f924b5351 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml @@ -5,7 +5,7 @@ kind: OpAMPBridge metadata: name: example labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.7 + helm.sh/chart: opentelemetry-kube-stack-0.0.8 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm annotations: diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml index 7ee28978f..808750334 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml @@ -6,7 +6,7 @@ metadata: name: example-cluster-stats namespace: default labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.7 + helm.sh/chart: opentelemetry-kube-stack-0.0.8 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm opentelemetry.io/opamp-reporting: "true" @@ -192,7 +192,7 @@ metadata: name: example-daemon namespace: default labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.7 + helm.sh/chart: opentelemetry-kube-stack-0.0.8 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm opentelemetry.io/opamp-reporting: "true" diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml index 20b87b4ba..1286f96f0 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml @@ -5,7 +5,7 @@ kind: Instrumentation metadata: name: example labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.7 + helm.sh/chart: opentelemetry-kube-stack-0.0.8 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm annotations: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/clusterrole.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/clusterrole.yaml new file mode 100644 index 000000000..ab1702d61 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/clusterrole.yaml @@ -0,0 +1,124 @@ +--- +# Source: opentelemetry-kube-stack/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-collector +rules: +- apiGroups: [""] + resources: + - namespaces + - nodes + - nodes/proxy + - nodes/metrics + - nodes/stats + - services + - endpoints + - pods + - events + - secrets + verbs: ["get", "list", "watch"] +- apiGroups: ["monitoring.coreos.com"] + resources: + - servicemonitors + - podmonitors + verbs: ["get", "list", "watch"] +- apiGroups: + - extensions + resources: + - ingresses + verbs: ["get", "list", "watch"] +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: ["get", "list", "watch"] +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics", "/metrics/cadvisor"] + verbs: ["get"] + +- apiGroups: + - "" + resources: + - events + - namespaces + - namespaces/status + - nodes + - nodes/spec + - pods + - pods/status + - replicationcontrollers + - replicationcontrollers/status + - resourcequotas + - services + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["watch", "list"] +--- +# Source: opentelemetry-kube-stack/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-daemon +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-collector +subjects: +- kind: ServiceAccount + # quirk of the Operator + name: "example-daemon-collector" + namespace: default diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml new file mode 100644 index 000000000..c641a444a --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml @@ -0,0 +1,146 @@ +--- +# Source: opentelemetry-kube-stack/templates/collector.yaml +apiVersion: opentelemetry.io/v1beta1 +kind: OpenTelemetryCollector +metadata: + name: example-daemon + namespace: default + labels: + helm.sh/chart: opentelemetry-kube-stack-0.0.8 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-failed +spec: + managementState: managed + mode: daemonset + config: + exporters: + debug: {} + otlp: + endpoint: ingest.example.com:443 + headers: + access-token: ${ACCESS_TOKEN} + processors: + batch: + send_batch_max_size: 1500 + send_batch_size: 1000 + timeout: 1s + resourcedetection/env: + detectors: + - env + override: false + timeout: 2s + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + prometheus: + config: + scrape_configs: + - job_name: otel-collector + scrape_interval: 30s + static_configs: + - targets: + - 0.0.0.0:8888 + target_allocator: + collector_id: ${POD_NAME} + endpoint: http://otelcol-targetallocator + interval: 30s + service: + pipelines: + logs: + exporters: + - debug + - otlp + processors: + - resourcedetection/env + - batch + receivers: + - otlp + metrics: + exporters: + - debug + - otlp + processors: + - resourcedetection/env + - batch + receivers: + - prometheus + traces: + exporters: + - debug + - otlp + processors: + - resourcedetection/env + - batch + receivers: + - otlp + image: "otel/opentelemetry-collector-k8s:0.103.1" + imagePullPolicy: IfNotPresent + upgradeStrategy: automatic + hostNetwork: false + shareProcessNamespace: false + terminationGracePeriodSeconds: 30 + resources: + limits: + cpu: 100m + memory: 250Mi + requests: + cpu: 100m + memory: 128Mi + securityContext: + {} + targetAllocator: + enabled: true + image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main + prometheusCR: + enabled: true + podMonitorSelector: + matchLabels: + app: my-app + scrapeInterval: 30s + serviceMonitorSelector: + matchExpressions: + - key: kubernetes.io/app-name + operator: In + values: + - my-app + volumeMounts: + env: + - name: OTEL_K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OTEL_K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OTEL_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: OTEL_K8S_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: OTEL_K8S_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: OTEL_RESOURCE_ATTRIBUTES + value: "k8s.cluster.name=demo" + + - name: ACCESS_TOKEN + valueFrom: + secretKeyRef: + key: access_token + name: otel-collector-secret + volumes: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook-with-cert-manager.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook-with-cert-manager.yaml new file mode 100644 index 000000000..7775bac4f --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook-with-cert-manager.yaml @@ -0,0 +1,192 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/admission-webhooks/operator-webhook-with-cert-manager.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: webhook + name: example-opentelemetry-operator-mutation +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /mutate-opentelemetry-io-v1alpha1-instrumentation + port: 443 + failurePolicy: Ignore + name: minstrumentation.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - instrumentations + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /mutate-opentelemetry-io-v1beta1-opentelemetrycollector + port: 443 + failurePolicy: Ignore + name: mopentelemetrycollectorbeta.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - opentelemetrycollectors + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /mutate-v1-pod + port: 443 + failurePolicy: Ignore + name: mpod.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/admission-webhooks/operator-webhook-with-cert-manager.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: webhook + name: example-opentelemetry-operator-validation +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /validate-opentelemetry-io-v1alpha1-instrumentation + port: 443 + failurePolicy: Ignore + name: vinstrumentationcreateupdate.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - instrumentations + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /validate-opentelemetry-io-v1alpha1-instrumentation + port: 443 + failurePolicy: Ignore + name: vinstrumentationdelete.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1alpha1 + operations: + - DELETE + resources: + - instrumentations + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /validate-opentelemetry-io-v1beta1-opentelemetrycollector + port: 443 + failurePolicy: Ignore + name: vopentelemetrycollectorcreateupdatebeta.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - opentelemetrycollectors + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: example-opentelemetry-operator-webhook + namespace: default + path: /validate-opentelemetry-io-v1beta1-opentelemetrycollector + port: 443 + failurePolicy: Ignore + name: vopentelemetrycollectordeletebeta.kb.io + rules: + - apiGroups: + - opentelemetry.io + apiVersions: + - v1beta1 + operations: + - DELETE + resources: + - opentelemetrycollectors + scope: Namespaced + sideEffects: None + timeoutSeconds: 10 diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook.yaml new file mode 100644 index 000000000..5b9ff5511 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/admission-webhooks/operator-webhook.yaml @@ -0,0 +1,3 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/admission-webhooks/operator-webhook.yaml +--- diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/certmanager.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/certmanager.yaml new file mode 100644 index 000000000..dc2bf94ca --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/certmanager.yaml @@ -0,0 +1,43 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/certmanager.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: webhook + name: example-opentelemetry-operator-serving-cert + namespace: default +spec: + dnsNames: + - example-opentelemetry-operator-webhook.default.svc + - example-opentelemetry-operator-webhook.default.svc.cluster.local + issuerRef: + kind: Issuer + name: example-opentelemetry-operator-selfsigned-issuer + secretName: example-opentelemetry-operator-controller-manager-service-cert + subject: + organizationalUnits: + - example-opentelemetry-operator +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/certmanager.yaml +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: webhook + name: example-opentelemetry-operator-selfsigned-issuer + namespace: default +spec: + selfSigned: {} diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrole.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrole.yaml new file mode 100644 index 000000000..fc6ad7de6 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrole.yaml @@ -0,0 +1,265 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-manager +rules: + - apiGroups: + - "" + resources: + - configmaps + - persistentvolumeclaims + - persistentvolumes + - pods + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - list + - watch + - apiGroups: + - apps + resources: + - daemonsets + - deployments + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + - extensions + resources: + - replicasets + verbs: + - get + - list + - watch + - apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - apiGroups: + - config.openshift.io + resources: + - infrastructures + - infrastructures/status + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - update + - apiGroups: + - monitoring.coreos.com + resources: + - podmonitors + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - opentelemetry.io + resources: + - instrumentations + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - opentelemetry.io + resources: + - opampbridges + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - opentelemetry.io + resources: + - opampbridges/finalizers + verbs: + - update + - apiGroups: + - opentelemetry.io + resources: + - opampbridges/status + verbs: + - get + - patch + - update + - apiGroups: + - opentelemetry.io + resources: + - opentelemetrycollectors + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - opentelemetry.io + resources: + - opentelemetrycollectors/finalizers + verbs: + - get + - patch + - update + - apiGroups: + - opentelemetry.io + resources: + - opentelemetrycollectors/status + verbs: + - get + - patch + - update + - apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-metrics +rules: + - nonResourceURLs: + - /metrics + verbs: + - get +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-proxy +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrolebinding.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrolebinding.yaml new file mode 100644 index 000000000..e6918c520 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/clusterrolebinding.yaml @@ -0,0 +1,44 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-operator-manager +subjects: + - kind: ServiceAccount + name: opentelemetry-operator + namespace: default +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-proxy +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-operator-proxy +subjects: + - kind: ServiceAccount + name: opentelemetry-operator + namespace: default diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/deployment.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/deployment.yaml new file mode 100644 index 000000000..6da6e7947 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/deployment.yaml @@ -0,0 +1,105 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/component: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/component: controller-manager + spec: + hostNetwork: false + containers: + - args: + - --metrics-addr=0.0.0.0:8080 + - --enable-leader-election + - --health-probe-addr=:8081 + - --webhook-port=9443 + - --collector-image=otel/opentelemetry-collector-k8s:0.102.1 + command: + - /manager + env: + - name: ENABLE_WEBHOOKS + value: "true" + image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.102.0" + name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + - containerPort: 9443 + name: webhook-server + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 64Mi + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: "quay.io/brancz/kube-rbac-proxy:v0.15.0" + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + serviceAccountName: opentelemetry-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: example-opentelemetry-operator-controller-manager-service-cert + securityContext: + fsGroup: 65532 + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/role.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/role.yaml new file mode 100644 index 000000000..968764e98 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/role.yaml @@ -0,0 +1,43 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-leader-election + namespace: default +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/rolebinding.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/rolebinding.yaml new file mode 100644 index 000000000..e153dfc5b --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/rolebinding.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-leader-election + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: example-opentelemetry-operator-leader-election +subjects: + - kind: ServiceAccount + name: opentelemetry-operator + namespace: default diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/service.yaml new file mode 100644 index 000000000..50b51e134 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/service.yaml @@ -0,0 +1,51 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator + namespace: default +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + - name: metrics + port: 8080 + protocol: TCP + targetPort: metrics + selector: + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/component: controller-manager +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + name: example-opentelemetry-operator-webhook + namespace: default +spec: + ports: + - port: 443 + protocol: TCP + targetPort: webhook-server + selector: + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/serviceaccount.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/serviceaccount.yaml new file mode 100644 index 000000000..576f75f34 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/serviceaccount.yaml @@ -0,0 +1,15 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: opentelemetry-operator + namespace: default + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-certmanager-connection.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-certmanager-connection.yaml new file mode 100644 index 000000000..b95641331 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-certmanager-connection.yaml @@ -0,0 +1,38 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/tests/test-certmanager-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "example-opentelemetry-operator-cert-manager" + namespace: default + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: webhook + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: "busybox:latest" + env: + - name: CERT_MANAGER_CLUSTERIP + value: "cert-manager-webhook" + - name: CERT_MANAGER_PORT + value: "443" + command: + - sh + - -c + # The following shell script tests if the cert-manager service is up. If the service is up, when we try + # to wget its exposed port, we will get an HTTP error 400. + - | + wget_output=$(wget -q "$CERT_MANAGER_CLUSTERIP:$CERT_MANAGER_PORT") + if wget_output=="wget: server returned error: HTTP/1.0 400 Bad Request" + then exit 0 + else exit 1 + fi + restartPolicy: Never diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-service-connection.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-service-connection.yaml new file mode 100644 index 000000000..ac8cf507a --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/opentelemetry-operator/tests/test-service-connection.yaml @@ -0,0 +1,76 @@ +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/tests/test-service-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "example-opentelemetry-operator-metrics" + namespace: default + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: "busybox:latest" + env: + - name: MANAGER_METRICS_SERVICE_CLUSTERIP + value: "example-opentelemetry-operator" + - name: MANAGER_METRICS_SERVICE_PORT + value: "8443" + command: + - sh + - -c + # The following shell script tests if the controller-manager-metrics-service is up. + # If the service is up, when we try to wget its exposed port, we will get an HTTP error 400. + - | + wget_output=$(wget -q "$MANAGER_METRICS_SERVICE_CLUSTERIP:$MANAGER_METRICS_SERVICE_PORT") + if wget_output=="wget: server returned error: HTTP/1.0 400 Bad Request" + then exit 0 + else exit 1 + fi + restartPolicy: Never +--- +# Source: opentelemetry-kube-stack/charts/opentelemetry-operator/templates/tests/test-service-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "example-opentelemetry-operator-webhook" + namespace: default + labels: + helm.sh/chart: opentelemetry-operator-0.63.2 + app.kubernetes.io/name: opentelemetry-operator + app.kubernetes.io/version: "0.102.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: example + + app.kubernetes.io/component: controller-manager + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: "busybox:latest" + env: + - name: WEBHOOK_SERVICE_CLUSTERIP + value: "example-opentelemetry-operator-webhook" + - name: WEBHOOK_SERVICE_PORT + value: "443" + command: + - sh + - -c + # The following shell script tests if the webhook service is up. If the service is up, when we try + # to wget its exposed port, we will get an HTTP error 400. + - | + wget_output=$(wget -q "$WEBHOOK_SERVICE_CLUSTERIP:$WEBHOOK_SERVICE_PORT") + if wget_output=="wget: server returned error: HTTP/1.0 400 Bad Request" + then exit 0 + else exit 1 + fi + restartPolicy: Never diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml new file mode 100644 index 000000000..f549c137d --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml @@ -0,0 +1,70 @@ +clusterName: demo +opentelemetry-operator: + enabled: true +defaultCRConfig: + targetAllocator: + enabled: true + image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main + prometheusCR: + enabled: true + podMonitorSelector: + matchLabels: + app: my-app + scrapeInterval: "30s" + serviceMonitorSelector: + matchExpressions: + - key: kubernetes.io/app-name + operator: In + values: ["my-app"] +collectors: + daemon: + enabled: true + config: + receivers: + prometheus: + config: + scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 30s + static_configs: + - targets: [ '0.0.0.0:8888' ] + target_allocator: + endpoint: http://otelcol-targetallocator + interval: 30s + collector_id: "${POD_NAME}" + exporters: + otlp: + endpoint: ingest.example.com:443 + headers: + "access-token": "${ACCESS_TOKEN}" + service: + pipelines: + metrics: + receivers: [prometheus] + exporters: [debug, otlp] + traces: + exporters: [debug, otlp] + logs: + exporters: [debug, otlp] + env: + - name: ACCESS_TOKEN + valueFrom: + secretKeyRef: + key: access_token + name: otel-collector-secret + scrape_configs_file: "" + presets: + logsCollection: + enabled: false + kubeletMetrics: + enabled: false + hostMetrics: + enabled: false + kubernetesAttributes: + enabled: false + cluster: + enabled: false +instrumentation: + enabled: false +opAMPBridge: + enabled: false diff --git a/charts/opentelemetry-kube-stack/values.schema.json b/charts/opentelemetry-kube-stack/values.schema.json index 12d8a7ba3..dca899ae1 100644 --- a/charts/opentelemetry-kube-stack/values.schema.json +++ b/charts/opentelemetry-kube-stack/values.schema.json @@ -1,6 +1,5 @@ { - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1/open-telemetry-collector-spec", + "$schema": "http://json-schema.org/schema#", "$defs": { "AWSElasticBlockStoreVolumeSource": { "properties": { @@ -1079,7 +1078,7 @@ "enableMetrics": { "type": "boolean" }, - "DisablePrometheusAnnotations": { + "disablePrometheusAnnotations": { "type": "boolean" } }, @@ -1606,27 +1605,7 @@ "type": "string" }, "image": { - "description": "Image use in both standalone and agent configs", - "type": "object", - "additionalProperties": false, - "properties": { - "registry": { - "type": "string" - }, - "repository": { - "type": "string" - }, - "tag": { - "type": "string" - }, - "digest": { - "type": "string" - }, - "pullPolicy": { - "type": "string", - "enum": ["IfNotPresent", "Always", "Never"] - } - } + "type": "string" }, "enabled": { "type": "boolean" @@ -1680,16 +1659,10 @@ "type": "string" }, "podMonitorSelector": { - "additionalProperties": { - "type": "string" - }, - "type": "object" + "$ref": "#/$defs/LabelSelector" }, "serviceMonitorSelector": { - "additionalProperties": { - "type": "string" - }, - "type": "object" + "$ref": "#/$defs/LabelSelector" } }, "additionalProperties": false,