From efa9d14e7d3048e349fe584d89bf2a0a7ab2005b Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 30 Jul 2024 12:13:54 -0400 Subject: [PATCH 1/3] Add exporters --- .../examples/targetallocator-prom/README.md | 5 + .../kubelet_scrape_configs.yaml | 244 +++++++ .../rendered/collector.yaml | 245 ++++++- .../kube-api-server/servicemonitor.yaml | 36 + .../kube-controller-manager/service.yaml | 23 + .../servicemonitor.yaml | 29 + .../rendered/exporters/kube-dns/service.yaml | 26 + .../exporters/kube-dns/servicemonitor.yaml | 27 + .../rendered/exporters/kube-etcd/service.yaml | 23 + .../exporters/kube-etcd/servicemonitor.yaml | 25 + .../exporters/kube-proxy/service.yaml | 23 + .../exporters/kube-proxy/servicemonitor.yaml | 25 + .../exporters/kube-scheduler/service.yaml | 23 + .../kube-scheduler/servicemonitor.yaml | 29 + .../examples/targetallocator-prom/values.yaml | 67 +- .../templates/_helpers.tpl | 45 ++ .../templates/collector.yaml | 2 +- .../templates/exporters/core-dns/service.yaml | 24 + .../exporters/core-dns/servicemonitor.yaml | 48 ++ .../kube-api-server/servicemonitor.yaml | 47 ++ .../kube-controller-manager/endpoints.yaml | 22 + .../kube-controller-manager/service.yaml | 29 + .../servicemonitor.yaml | 59 ++ .../templates/exporters/kube-dns/service.yaml | 28 + .../exporters/kube-dns/servicemonitor.yaml | 61 ++ .../exporters/kube-etcd/endpoints.yaml | 20 + .../exporters/kube-etcd/service.yaml | 27 + .../exporters/kube-etcd/servicemonitor.yaml | 65 ++ .../exporters/kube-proxy/endpoints.yaml | 20 + .../exporters/kube-proxy/service.yaml | 27 + .../exporters/kube-proxy/servicemonitor.yaml | 53 ++ .../exporters/kube-scheduler/endpoints.yaml | 22 + .../exporters/kube-scheduler/service.yaml | 29 + .../kube-scheduler/servicemonitor.yaml | 59 ++ .../values.schema.json | 344 +++++++++ charts/opentelemetry-kube-stack/values.yaml | 659 ++++++++++++++++++ 36 files changed, 2484 insertions(+), 56 deletions(-) create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/README.md create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/kubelet_scrape_configs.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml create mode 100644 charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/core-dns/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/core-dns/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-api-server/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/endpoints.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-dns/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-dns/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/endpoints.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/endpoints.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/servicemonitor.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/endpoints.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/service.yaml create mode 100644 charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/servicemonitor.yaml diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/README.md b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/README.md new file mode 100644 index 000000000..abae0bc2b --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/README.md @@ -0,0 +1,5 @@ +# Prometheus Replacement example +This example contains files to allow a user to replace an installation of kube-prometheus-stack. The opentelemetry-kube-stack chart aims to make the replacement process straightforward by utilizing the target allocator to pull any servicemonitors and podmonitors. + +> [!INFO] +> This chart has most of the same configurations as the kube-prometheus-stack chart, but requires that kubelet monitoring is done via a manual scrape config. This is because of how the prometheus-operator manages endpoints for the Kubelet service. If you'd like to avoid a scrape-config altogether, it's recommended to use the kubelet receiver in the opentelemetry collector. diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/kubelet_scrape_configs.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/kubelet_scrape_configs.yaml new file mode 100644 index 000000000..368f4aacf --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/kubelet_scrape_configs.yaml @@ -0,0 +1,244 @@ +# This is used for scrape kubelet +{{- if .kubelet.enabled }} +- authorization: + credentials_file: "/var/run/secrets/kubernetes.io/serviceaccount/token" + type: Bearer + follow_redirects: true + honor_labels: {{ .kubelet.serviceMonitor.honorLabels }} + honor_timestamps: {{ .kubelet.serviceMonitor.honorTimestamps }} + job_name: serviceMonitor/{{ .namespace }}/{{ .Chart.Name }}-kubelet/0 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: '' + role: node + metrics_path: "/metrics" + relabel_configs: + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: "kubelet" + target_label: job + - action: replace + regex: "(.*)" + replacement: "${1}" + separator: ";" + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: "(.*)" + replacement: https-metrics + separator: ";" + target_label: endpoint + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: "$(SHARD)" + replacement: "$1" + separator: ";" + source_labels: + - __tmp_hash + {{- if .kubelet.serviceMonitor.https }} + scheme: https + {{- else }} + schema: http + {{- end }} + scrape_interval: {{ .kubelet.serviceMonitor.interval | default "30s" }} + scrape_timeout: {{ .kubelet.serviceMonitor.scrapeTimeout | default "10s" }} + tls_config: + ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify: true +{{- if .kubelet.serviceMonitor.cAdvisor }} +- authorization: + credentials_file: "/var/run/secrets/kubernetes.io/serviceaccount/token" + type: Bearer + follow_redirects: true + honor_labels: true + honor_timestamps: true + job_name: serviceMonitor/{{ .namespace }}/{{ .Chart.Name }}-kubelet/1 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: '' + role: node + metric_relabel_configs: + - action: drop + regex: container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total) + replacement: "$1" + separator: ";" + source_labels: + - __name__ + - action: drop + regex: container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total) + replacement: "$1" + separator: ";" + source_labels: + - __name__ + - action: drop + regex: container_memory_(mapped_file|swap) + replacement: "$1" + separator: ";" + source_labels: + - __name__ + - action: drop + regex: container_(file_descriptors|tasks_state|threads_max) + replacement: "$1" + separator: ";" + source_labels: + - __name__ + - action: drop + regex: container_spec.* + replacement: "$1" + separator: ";" + source_labels: + - __name__ + - action: drop + regex: ".+;" + replacement: "$1" + separator: ";" + source_labels: + - id + - pod + metrics_path: "/metrics/cadvisor" + relabel_configs: + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: "kubelet" + target_label: job + - action: replace + regex: "(.*)" + replacement: "${1}" + separator: ";" + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: "(.*)" + replacement: https-metrics + separator: ";" + target_label: endpoint + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: "$(SHARD)" + replacement: "$1" + separator: ";" + source_labels: + - __tmp_hash + {{- if .kubelet.serviceMonitor.https }} + scheme: https + {{- else }} + schema: http + {{- end }} + scrape_interval: {{ .kubelet.serviceMonitor.scrapeTimeout | default "30s" }} + scrape_timeout: 10s + tls_config: + ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify: true + +{{- end }} +{{- if .kubelet.serviceMonitor.probes }} +- authorization: + credentials_file: "/var/run/secrets/kubernetes.io/serviceaccount/token" + type: Bearer + follow_redirects: true + honor_labels: true + honor_timestamps: true + job_name: serviceMonitor/{{ .namespace }}/{{ .Chart.Name }}-kubelet/2 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: '' + role: node + metrics_path: "/metrics/probes" + relabel_configs: + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: "kubelet" + target_label: job + - action: replace + regex: "(.*)" + replacement: "${1}" + separator: ";" + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: "(.*)" + replacement: https-metrics + separator: ";" + target_label: endpoint + - action: replace + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: "(.*)" + replacement: "$1" + separator: ";" + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: "$(SHARD)" + replacement: "$1" + separator: ";" + source_labels: + - __tmp_hash + {{- if .kubelet.serviceMonitor.https }} + scheme: https + {{- else }} + schema: http + {{- end }} + scrape_interval: {{ .kubelet.serviceMonitor.scrapeTimeout | default "30s" }} + scrape_timeout: 10s + tls_config: + ca_file: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify: true +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml index e998d863e..3332af7e6 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml @@ -39,21 +39,235 @@ spec: prometheus: config: scrape_configs: - - job_name: otel-collector + - authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + type: Bearer + follow_redirects: true + honor_labels: true + honor_timestamps: true + job_name: serviceMonitor/default/opentelemetry-kube-stack-kubelet/0 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: "" + role: node + metrics_path: /metrics + relabel_configs: + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: kubelet + target_label: job + - action: replace + regex: (.*) + replacement: ${1} + separator: ; + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: (.*) + replacement: https-metrics + separator: ; + target_label: endpoint + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: $(SHARD) + replacement: $1 + separator: ; + source_labels: + - __tmp_hash + scheme: https scrape_interval: 30s - static_configs: - - targets: - - 0.0.0.0:8888 - target_allocator: - collector_id: ${POD_NAME} - endpoint: http://otelcol-targetallocator - interval: 30s + scrape_timeout: 10s + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + type: Bearer + follow_redirects: true + honor_labels: true + honor_timestamps: true + job_name: serviceMonitor/default/opentelemetry-kube-stack-kubelet/1 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: "" + role: node + metric_relabel_configs: + - action: drop + regex: container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total) + replacement: $1 + separator: ; + source_labels: + - __name__ + - action: drop + regex: container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total) + replacement: $1 + separator: ; + source_labels: + - __name__ + - action: drop + regex: container_memory_(mapped_file|swap) + replacement: $1 + separator: ; + source_labels: + - __name__ + - action: drop + regex: container_(file_descriptors|tasks_state|threads_max) + replacement: $1 + separator: ; + source_labels: + - __name__ + - action: drop + regex: container_spec.* + replacement: $1 + separator: ; + source_labels: + - __name__ + - action: drop + regex: .+; + replacement: $1 + separator: ; + source_labels: + - id + - pod + metrics_path: /metrics/cadvisor + relabel_configs: + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: kubelet + target_label: job + - action: replace + regex: (.*) + replacement: ${1} + separator: ; + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: (.*) + replacement: https-metrics + separator: ; + target_label: endpoint + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: $(SHARD) + replacement: $1 + separator: ; + source_labels: + - __tmp_hash + scheme: https + scrape_interval: 30s + scrape_timeout: 10s + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + - authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + type: Bearer + follow_redirects: true + honor_labels: true + honor_timestamps: true + job_name: serviceMonitor/default/opentelemetry-kube-stack-kubelet/2 + kubernetes_sd_configs: + - follow_redirects: true + kubeconfig_file: "" + role: node + metrics_path: /metrics/probes + relabel_configs: + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: replace + replacement: kubelet + target_label: job + - action: replace + regex: (.*) + replacement: ${1} + separator: ; + source_labels: + - __meta_kubernetes_node_name + target_label: node + - action: replace + regex: (.*) + replacement: https-metrics + separator: ; + target_label: endpoint + - action: replace + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __metrics_path__ + target_label: metrics_path + - action: hashmod + modulus: 1 + regex: (.*) + replacement: $1 + separator: ; + source_labels: + - __address__ + target_label: __tmp_hash + - action: keep + regex: $(SHARD) + replacement: $1 + separator: ; + source_labels: + - __tmp_hash + scheme: https + scrape_interval: 30s + scrape_timeout: 10s + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true service: pipelines: logs: exporters: - debug - - otlp processors: - resourcedetection/env - batch @@ -71,7 +285,6 @@ spec: traces: exporters: - debug - - otlp processors: - resourcedetection/env - batch @@ -93,20 +306,14 @@ spec: securityContext: {} targetAllocator: + allocationStrategy: per-node enabled: true image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main prometheusCR: enabled: true - podMonitorSelector: - matchLabels: - app: my-app + podMonitorSelector: {} scrapeInterval: 30s - serviceMonitorSelector: - matchExpressions: - - key: kubernetes.io/app-name - operator: In - values: - - my-app + serviceMonitorSelector: {} volumeMounts: env: - name: OTEL_K8S_NODE_NAME diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml new file mode 100644 index 000000000..53b0e8c8a --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml @@ -0,0 +1,36 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-api-server/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-apiserver + namespace: default + labels: + app: opentelemetry-kube-stack-apiserver + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + port: https + scheme: https + metricRelabelings: + - action: drop + regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50) + sourceLabels: + - __name__ + - le + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + serverName: kubernetes + insecureSkipVerify: false + jobLabel: component + namespaceSelector: + matchNames: + - default + selector: + matchLabels: + component: apiserver + provider: kubernetes diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml new file mode 100644 index 000000000..d88bc9744 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-controller-manager/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-kube-controller-manager + labels: + app: opentelemetry-kube-stack-kube-controller-manager + jobLabel: kube-controller-manager + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 10257 + protocol: TCP + targetPort: 10257 + selector: + component: kube-controller-manager + type: ClusterIP diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml new file mode 100644 index 000000000..174b11a99 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml @@ -0,0 +1,29 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-kube-controller-manager + namespace: default + labels: + app: opentelemetry-kube-stack-kube-controller-manager + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: opentelemetry-kube-stack-kube-controller-manager + release: "example" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml new file mode 100644 index 000000000..f0dddac8d --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml @@ -0,0 +1,26 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-dns/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-kube-dns + labels: + app: opentelemetry-kube-stack-kube-dns + jobLabel: kube-dns + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics-dnsmasq + port: 10054 + protocol: TCP + targetPort: 10054 + - name: http-metrics-skydns + port: 10055 + protocol: TCP + targetPort: 10055 + selector: + k8s-app: kube-dns diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml new file mode 100644 index 000000000..bb7bff177 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml @@ -0,0 +1,27 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-dns/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-kube-dns + namespace: default + labels: + app: opentelemetry-kube-stack-kube-dns + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: opentelemetry-kube-stack-kube-dns + release: "example" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics-dnsmasq + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + - port: http-metrics-skydns + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml new file mode 100644 index 000000000..3dd4dc021 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-etcd/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-kube-etcd + labels: + app: opentelemetry-kube-stack-kube-etcd + jobLabel: kube-etcd + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 2381 + protocol: TCP + targetPort: 2381 + selector: + component: etcd + type: ClusterIP diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml new file mode 100644 index 000000000..2ac7ce3d7 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml @@ -0,0 +1,25 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-etcd/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-kube-etcd + namespace: default + labels: + app: opentelemetry-kube-stack-kube-etcd + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: opentelemetry-kube-stack-kube-etcd + release: "example" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml new file mode 100644 index 000000000..c8470e288 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-proxy/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-kube-proxy + labels: + app: opentelemetry-kube-stack-kube-proxy + jobLabel: kube-proxy + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 10249 + protocol: TCP + targetPort: 10249 + selector: + k8s-app: kube-proxy + type: ClusterIP diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml new file mode 100644 index 000000000..ba447bb01 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml @@ -0,0 +1,25 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-proxy/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-kube-proxy + namespace: default + labels: + app: opentelemetry-kube-stack-kube-proxy + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: opentelemetry-kube-stack-kube-proxy + release: "example" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml new file mode 100644 index 000000000..639126326 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-scheduler/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-kube-scheduler + labels: + app: opentelemetry-kube-stack-kube-scheduler + jobLabel: kube-scheduler + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: 10259 + protocol: TCP + targetPort: 10259 + selector: + component: kube-scheduler + type: ClusterIP diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml new file mode 100644 index 000000000..39d254692 --- /dev/null +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml @@ -0,0 +1,29 @@ +--- +# Source: opentelemetry-kube-stack/templates/exporters/kube-scheduler/servicemonitor.yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: example-kube-scheduler + namespace: default + labels: + app: opentelemetry-kube-stack-kube-scheduler + helm.sh/chart: opentelemetry-kube-stack-0.0.12 + app.kubernetes.io/version: "0.103.0" + app.kubernetes.io/managed-by: Helm +spec: + jobLabel: jobLabel + + selector: + matchLabels: + app: opentelemetry-kube-stack-kube-scheduler + release: "example" + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml index 65638411f..66c3bc1b4 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/values.yaml @@ -1,41 +1,21 @@ clusterName: demo -kubeStateMetrics: - enabled: true -nodeExporter: - enabled: true opentelemetry-operator: enabled: true -defaultCRConfig: - targetAllocator: - enabled: true - image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main - prometheusCR: - enabled: true - podMonitorSelector: - matchLabels: - app: my-app - scrapeInterval: "30s" - serviceMonitorSelector: - matchExpressions: - - key: kubernetes.io/app-name - operator: In - values: ["my-app"] collectors: daemon: enabled: true + # because this file is inside the examples folder, we need to reference it directly. + scrape_configs_file: "examples/targetallocator-prom/kubelet_scrape_configs.yaml" + targetAllocator: + enabled: true + image: ghcr.io/open-telemetry/opentelemetry-operator/target-allocator:main + allocationStrategy: per-node + prometheusCR: + enabled: true + podMonitorSelector: {} + scrapeInterval: "30s" + serviceMonitorSelector: {} config: - receivers: - prometheus: - config: - scrape_configs: - - job_name: "otel-collector" - scrape_interval: 30s - static_configs: - - targets: ["0.0.0.0:8888"] - target_allocator: - endpoint: http://otelcol-targetallocator - interval: 30s - collector_id: "${POD_NAME}" exporters: otlp: endpoint: ingest.example.com:443 @@ -46,17 +26,12 @@ collectors: metrics: receivers: [prometheus] exporters: [debug, otlp] - traces: - exporters: [debug, otlp] - logs: - exporters: [debug, otlp] env: - name: ACCESS_TOKEN valueFrom: secretKeyRef: key: access_token name: otel-collector-secret - scrape_configs_file: "" presets: logsCollection: enabled: false @@ -72,3 +47,23 @@ instrumentation: enabled: false opAMPBridge: enabled: false +kubernetesServiceMonitors: + enabled: true +kubeApiServer: + enabled: true +kubelet: + enabled: true +kubeControllerManager: + enabled: true +kubeDns: + enabled: true +kubeEtcd: + enabled: true +kubeScheduler: + enabled: true +kubeProxy: + enabled: true +kubeStateMetrics: + enabled: true +nodeExporter: + enabled: true diff --git a/charts/opentelemetry-kube-stack/templates/_helpers.tpl b/charts/opentelemetry-kube-stack/templates/_helpers.tpl index f0b5d7273..eb24a625a 100644 --- a/charts/opentelemetry-kube-stack/templates/_helpers.tpl +++ b/charts/opentelemetry-kube-stack/templates/_helpers.tpl @@ -240,3 +240,48 @@ Optionally include the RBAC for the k8sCluster receiver verbs: ["watch", "list"] {{- end }} {{- end }} + +{{/* +Helpers for prometheus servicemonitors +*/}} +{{/* Prometheus specific stuff. */}} +{{/* Allow KubeVersion to be overridden. */}} +{{- define "opentelemetry-kube-stack.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} +{{- end -}} + +{{/* Get value based on current Kubernetes version */}} +{{- define "opentelemetry-kube-stack.kubeVersionDefaultValue" -}} + {{- $values := index . 0 -}} + {{- $kubeVersion := index . 1 -}} + {{- $old := index . 2 -}} + {{- $new := index . 3 -}} + {{- $default := index . 4 -}} + {{- if kindIs "invalid" $default -}} + {{- if semverCompare $kubeVersion (include "opentelemetry-kube-stack.kubeVersion" $values) -}} + {{- print $new -}} + {{- else -}} + {{- print $old -}} + {{- end -}} + {{- else -}} + {{- print $default }} + {{- end -}} +{{- end -}} + +{{/* Get value for kube-controller-manager depending on insecure scraping availability */}} +{{- define "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" -}} + {{- $values := index . 0 -}} + {{- $insecure := index . 1 -}} + {{- $secure := index . 2 -}} + {{- $userValue := index . 3 -}} + {{- include "opentelemetry-kube-stack.kubeVersionDefaultValue" (list $values ">= 1.22-0" $insecure $secure $userValue) -}} +{{- end -}} + +{{/* Get value for kube-scheduler depending on insecure scraping availability */}} +{{- define "opentelemetry-kube-stack.kubeScheduler.insecureScrape" -}} + {{- $values := index . 0 -}} + {{- $insecure := index . 1 -}} + {{- $secure := index . 2 -}} + {{- $userValue := index . 3 -}} + {{- include "opentelemetry-kube-stack.kubeVersionDefaultValue" (list $values ">= 1.23-0" $insecure $secure $userValue) -}} +{{- end -}} diff --git a/charts/opentelemetry-kube-stack/templates/collector.yaml b/charts/opentelemetry-kube-stack/templates/collector.yaml index e61c4a338..98908f608 100644 --- a/charts/opentelemetry-kube-stack/templates/collector.yaml +++ b/charts/opentelemetry-kube-stack/templates/collector.yaml @@ -1,7 +1,7 @@ {{ range $_, $collector := $.Values.collectors -}} {{- if $collector.enabled -}} {{- $collector := (mergeOverwrite (deepCopy $.Values.defaultCRConfig) $collector) }} -{{- $merged := (dict "Template" $.Template "Files" $.Files "Chart" $.Chart "clusterRole" $.Values.clusterRole "collector" $collector "Release" $.Release "fullnameOverride" $.Values.fullnameOverride "presets" $.Values.presets "namespace" (include "opentelemetry-kube-stack.namespace" $)) }} +{{- $merged := (dict "Template" $.Template "Files" $.Files "Chart" $.Chart "clusterRole" $.Values.clusterRole "collector" $collector "Release" $.Release "fullnameOverride" $.Values.fullnameOverride "presets" $.Values.presets "namespace" (include "opentelemetry-kube-stack.namespace" $) "kubelet" $.Values.kubelet) }} {{- $fullname := (include "opentelemetry-kube-stack.collectorFullname" $merged) }} --- apiVersion: opentelemetry.io/v1beta1 diff --git a/charts/opentelemetry-kube-stack/templates/exporters/core-dns/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/core-dns/service.yaml new file mode 100644 index 000000000..04e1c9d1c --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/core-dns/service.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.coreDns.enabled .Values.coreDns.service.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-coredns + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-coredns + jobLabel: coredns +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: {{ .Values.coreDns.serviceMonitor.port }} + port: {{ .Values.coreDns.service.port }} + protocol: TCP + targetPort: {{ .Values.coreDns.service.targetPort }} + selector: + {{- if .Values.coreDns.service.selector }} +{{ toYaml .Values.coreDns.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-dns + {{- end}} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/core-dns/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/core-dns/servicemonitor.yaml new file mode 100644 index 000000000..9d8dc087e --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/core-dns/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.coreDns.enabled .Values.coreDns.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-coredns + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-coredns + {{- with .Values.coreDns.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.coreDns.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.coreDns.serviceMonitor | nindent 2 }} + selector: + {{- if .Values.coreDns.serviceMonitor.selector }} + {{ tpl (toYaml .Values.coreDns.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-coredns + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: {{ .Values.coreDns.serviceMonitor.port }} + {{- if .Values.coreDns.serviceMonitor.interval}} + interval: {{ .Values.coreDns.serviceMonitor.interval }} + {{- end }} + {{- if .Values.coreDns.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.coreDns.serviceMonitor.proxyUrl}} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.coreDns.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.coreDns.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.coreDns.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.coreDns.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-api-server/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-api-server/servicemonitor.yaml new file mode 100644 index 000000000..6f8aecd83 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-api-server/servicemonitor.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.kubeApiServer.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-apiserver + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: default + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-apiserver + {{- with .Values.kubeApiServer.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + {{- include "servicemonitor.scrapeLimits" .Values.kubeApiServer.serviceMonitor | nindent 2 }} + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeApiServer.serviceMonitor.interval }} + interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} + {{- end }} + {{- if .Values.kubeApiServer.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl }} + {{- end }} + port: https + scheme: https +{{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.kubeApiServer.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.relabelings | indent 6) . }} +{{- end }} + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + serverName: {{ .Values.kubeApiServer.tlsConfig.serverName }} + insecureSkipVerify: {{ .Values.kubeApiServer.tlsConfig.insecureSkipVerify }} + jobLabel: {{ .Values.kubeApiServer.serviceMonitor.jobLabel }} + namespaceSelector: + matchNames: + - default + selector: +{{ toYaml .Values.kubeApiServer.serviceMonitor.selector | indent 4 }} +{{- end}} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/endpoints.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/endpoints.yaml new file mode 100644 index 000000000..d792766ae --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/endpoints.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-controller-manager + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-controller-manager + k8s-app: kube-controller-manager +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeControllerManager.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.kubeControllerManager.serviceMonitor.port }} + {{- $kubeControllerManagerDefaultInsecurePort := 10252 }} + {{- $kubeControllerManagerDefaultSecurePort := 10257 }} + port: {{ include "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }} + protocol: TCP +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/service.yaml new file mode 100644 index 000000000..103eeb5d1 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/service.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.service.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-controller-manager + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-controller-manager + jobLabel: kube-controller-manager +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: {{ .Values.kubeControllerManager.serviceMonitor.port }} + {{- $kubeControllerManagerDefaultInsecurePort := 10252 }} + {{- $kubeControllerManagerDefaultSecurePort := 10257 }} + port: {{ include "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }} + protocol: TCP + targetPort: {{ include "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.targetPort) }} +{{- if .Values.kubeControllerManager.endpoints }}{{- else }} + selector: + {{- if .Values.kubeControllerManager.service.selector }} +{{ toYaml .Values.kubeControllerManager.service.selector | indent 4 }} + {{- else}} + component: kube-controller-manager + {{- end}} +{{- end }} + type: ClusterIP +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml new file mode 100644 index 000000000..ade0a194c --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml @@ -0,0 +1,59 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-controller-manager + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-controller-manager + {{- with .Values.kubeControllerManager.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.kubeControllerManager.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.kubeControllerManager.serviceMonitor | nindent 2 }} + selector: + {{- if .Values.kubeControllerManager.serviceMonitor.selector }} + {{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-controller-manager + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: {{ .Values.kubeControllerManager.serviceMonitor.port }} + {{- if .Values.kubeControllerManager.serviceMonitor.interval }} + interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeControllerManager.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeControllerManager.serviceMonitor.proxyUrl}} + {{- end }} + {{- if eq (include "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" (list . false true .Values.kubeControllerManager.serviceMonitor.https )) "true" }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- if eq (include "opentelemetry-kube-stack.kubeControllerManager.insecureScrape" (list . nil true .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify)) "true" }} + insecureSkipVerify: true + {{- end }} + {{- if .Values.kubeControllerManager.serviceMonitor.serverName }} + serverName: {{ .Values.kubeControllerManager.serviceMonitor.serverName }} + {{- end }} + {{- end }} +{{- if .Values.kubeControllerManager.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeControllerManager.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/service.yaml new file mode 100644 index 000000000..4f35cfcd8 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/service.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.kubeDns.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-dns + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-dns + jobLabel: kube-dns +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics-dnsmasq + port: {{ .Values.kubeDns.service.dnsmasq.port }} + protocol: TCP + targetPort: {{ .Values.kubeDns.service.dnsmasq.targetPort }} + - name: http-metrics-skydns + port: {{ .Values.kubeDns.service.skydns.port }} + protocol: TCP + targetPort: {{ .Values.kubeDns.service.skydns.targetPort }} + selector: + {{- if .Values.kubeDns.service.selector }} +{{ toYaml .Values.kubeDns.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-dns + {{- end}} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/servicemonitor.yaml new file mode 100644 index 000000000..e49099196 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-dns/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{- if and .Values.kubeDns.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-dns + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-dns + {{- with .Values.kubeDns.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.kubeDns.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.kubeDns.serviceMonitor | nindent 2 }} + selector: + {{- if .Values.kubeDns.serviceMonitor.selector }} + {{ tpl (toYaml .Values.kubeDns.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-dns + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics-dnsmasq + {{- if .Values.kubeDns.serviceMonitor.interval }} + interval: {{ .Values.kubeDns.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeDns.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeDns.serviceMonitor.proxyUrl}} + {{- end }} +{{- if .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeDns.serviceMonitor.dnsmasqRelabelings }} + relabelings: +{{ toYaml .Values.kubeDns.serviceMonitor.dnsmasqRelabelings | indent 4 }} +{{- end }} + - port: http-metrics-skydns + {{- if .Values.kubeDns.serviceMonitor.interval }} + interval: {{ .Values.kubeDns.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubeDns.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeDns.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeDns.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeDns.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/endpoints.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/endpoints.yaml new file mode 100644 index 000000000..669f538ba --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-etcd + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-etcd + k8s-app: etcd-server +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeEtcd.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.kubeEtcd.serviceMonitor.port }} + port: {{ .Values.kubeEtcd.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/service.yaml new file mode 100644 index 000000000..f0d155457 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.service.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-etcd + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-etcd + jobLabel: kube-etcd +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: {{ .Values.kubeEtcd.serviceMonitor.port }} + port: {{ .Values.kubeEtcd.service.port }} + protocol: TCP + targetPort: {{ .Values.kubeEtcd.service.targetPort }} +{{- if .Values.kubeEtcd.endpoints }}{{- else }} + selector: + {{- if .Values.kubeEtcd.service.selector }} +{{ toYaml .Values.kubeEtcd.service.selector | indent 4 }} + {{- else}} + component: etcd + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/servicemonitor.yaml new file mode 100644 index 000000000..bf622f204 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-etcd/servicemonitor.yaml @@ -0,0 +1,65 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-etcd + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-etcd + {{- with .Values.kubeEtcd.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.kubeEtcd.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.kubeEtcd.serviceMonitor | nindent 4 }} + selector: + {{- if .Values.kubeEtcd.serviceMonitor.selector }} + {{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-etcd + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: {{ .Values.kubeEtcd.serviceMonitor.port }} + {{- if .Values.kubeEtcd.serviceMonitor.interval }} + interval: {{ .Values.kubeEtcd.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeEtcd.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeEtcd.serviceMonitor.proxyUrl}} + {{- end }} + {{- if eq .Values.kubeEtcd.serviceMonitor.scheme "https" }} + scheme: https + tlsConfig: + {{- if .Values.kubeEtcd.serviceMonitor.serverName }} + serverName: {{ .Values.kubeEtcd.serviceMonitor.serverName }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.caFile }} + caFile: {{ .Values.kubeEtcd.serviceMonitor.caFile }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.certFile }} + certFile: {{ .Values.kubeEtcd.serviceMonitor.certFile }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.keyFile }} + keyFile: {{ .Values.kubeEtcd.serviceMonitor.keyFile }} + {{- end}} + insecureSkipVerify: {{ .Values.kubeEtcd.serviceMonitor.insecureSkipVerify }} + {{- end }} +{{- if .Values.kubeEtcd.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeEtcd.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/endpoints.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/endpoints.yaml new file mode 100644 index 000000000..42c7225a3 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.endpoints .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-proxy + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-proxy + k8s-app: kube-proxy +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeProxy.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.kubeProxy.serviceMonitor.port }} + port: {{ .Values.kubeProxy.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/service.yaml new file mode 100644 index 000000000..886d87eb8 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.service.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-proxy + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-proxy + jobLabel: kube-proxy +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: {{ .Values.kubeProxy.serviceMonitor.port }} + port: {{ .Values.kubeProxy.service.port }} + protocol: TCP + targetPort: {{ .Values.kubeProxy.service.targetPort }} +{{- if .Values.kubeProxy.endpoints }}{{- else }} + selector: + {{- if .Values.kubeProxy.service.selector }} +{{ toYaml .Values.kubeProxy.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-proxy + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/servicemonitor.yaml new file mode 100644 index 000000000..00f0aab11 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-proxy/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-proxy + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-proxy + {{- with .Values.kubeProxy.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.kubeProxy.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.kubeProxy.serviceMonitor | nindent 2 }} + selector: + {{- if .Values.kubeProxy.serviceMonitor.selector }} + {{ tpl (toYaml .Values.kubeProxy.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-proxy + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: {{ .Values.kubeProxy.serviceMonitor.port }} + {{- if .Values.kubeProxy.serviceMonitor.interval }} + interval: {{ .Values.kubeProxy.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeProxy.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeProxy.serviceMonitor.proxyUrl}} + {{- end }} + {{- if .Values.kubeProxy.serviceMonitor.https }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- end}} +{{- if .Values.kubeProxy.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeProxy.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeProxy.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeProxy.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/endpoints.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/endpoints.yaml new file mode 100644 index 000000000..b7baccc36 --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/endpoints.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-scheduler + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-scheduler + k8s-app: kube-scheduler +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeScheduler.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.kubeScheduler.serviceMonitor.port }} + {{- $kubeSchedulerDefaultInsecurePort := 10251 }} + {{- $kubeSchedulerDefaultSecurePort := 10259 }} + port: {{ include "opentelemetry-kube-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }} + protocol: TCP +{{- end }} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/service.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/service.yaml new file mode 100644 index 000000000..c4912eb2c --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/service.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-scheduler + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-scheduler + jobLabel: kube-scheduler +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: {{ .Values.kubeScheduler.serviceMonitor.port }} + {{- $kubeSchedulerDefaultInsecurePort := 10251 }} + {{- $kubeSchedulerDefaultSecurePort := 10259 }} + port: {{ include "opentelemetry-kube-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }} + protocol: TCP + targetPort: {{ include "opentelemetry-kube-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.targetPort) }} +{{- if .Values.kubeScheduler.endpoints }}{{- else }} + selector: + {{- if .Values.kubeScheduler.service.selector }} +{{ toYaml .Values.kubeScheduler.service.selector | indent 4 }} + {{- else}} + component: kube-scheduler + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/servicemonitor.yaml b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/servicemonitor.yaml new file mode 100644 index 000000000..3ba2e27fb --- /dev/null +++ b/charts/opentelemetry-kube-stack/templates/exporters/kube-scheduler/servicemonitor.yaml @@ -0,0 +1,59 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "opentelemetry-kube-stack.fullname" . }}-kube-scheduler + {{- if .Values.kubernetesServiceMonitors.ignoreNamespaceSelectors }} + namespace: kube-system + {{- else }} + namespace: {{ template "opentelemetry-kube-stack.namespace" . }} + {{- end }} + labels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-scheduler + {{- with .Values.kubeScheduler.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{ include "opentelemetry-kube-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.kubeScheduler.serviceMonitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.kubeScheduler.serviceMonitor | nindent 2 }} + selector: + {{- if .Values.kubeScheduler.serviceMonitor.selector }} + {{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.selector | nindent 4) . }} + {{- else }} + matchLabels: + app: {{ template "opentelemetry-kube-stack.name" . }}-kube-scheduler + release: {{ $.Release.Name | quote }} + {{- end }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: {{ .Values.kubeScheduler.serviceMonitor.port }} + {{- if .Values.kubeScheduler.serviceMonitor.interval }} + interval: {{ .Values.kubeScheduler.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeScheduler.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeScheduler.serviceMonitor.proxyUrl}} + {{- end }} + {{- if eq (include "opentelemetry-kube-stack.kubeScheduler.insecureScrape" (list . false true .Values.kubeScheduler.serviceMonitor.https )) "true" }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- if eq (include "opentelemetry-kube-stack.kubeScheduler.insecureScrape" (list . nil true .Values.kubeScheduler.serviceMonitor.insecureSkipVerify)) "true" }} + insecureSkipVerify: true + {{- end }} + {{- if .Values.kubeScheduler.serviceMonitor.serverName }} + serverName: {{ .Values.kubeScheduler.serviceMonitor.serverName }} + {{- end}} + {{- end}} +{{- if .Values.kubeScheduler.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeScheduler.serviceMonitor.relabelings }} + relabelings: +{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.relabelings | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/opentelemetry-kube-stack/values.schema.json b/charts/opentelemetry-kube-stack/values.schema.json index 4ba8d26fa..f79d5d855 100644 --- a/charts/opentelemetry-kube-stack/values.schema.json +++ b/charts/opentelemetry-kube-stack/values.schema.json @@ -3276,6 +3276,313 @@ "required": ["enabled"], "title": "EnableConfigBlock" }, + "AttachMetadata": { + "type": "object", + "additionalProperties": false, + "properties": { + "node": { + "type": "boolean" + } + }, + "required": ["node"], + "title": "AttachMetadata" + }, + "KubeAPIServer": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "tlsConfig": { + "$ref": "#/$defs/TLSConfig" + }, + "serviceMonitor": { + "$ref": "#/$defs/Monitor" + } + }, + "required": ["enabled", "serviceMonitor", "tlsConfig"], + "title": "KubeAPIServer" + }, + "TLSConfig": { + "type": "object", + "additionalProperties": false, + "properties": { + "serverName": { + "type": "string" + }, + "insecureSkipVerify": { + "type": "boolean" + } + }, + "required": ["insecureSkipVerify", "serverName"], + "title": "TLSConfig" + }, + "KubeDNS": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "service": { + "$ref": "#/$defs/KubeDNSService" + }, + "serviceMonitor": { + "$ref": "#/$defs/KubeServiceMonitor" + } + }, + "required": ["enabled", "service", "serviceMonitor"], + "title": "KubeDNS" + }, + "KubeDNSService": { + "type": "object", + "additionalProperties": false, + "properties": { + "dnsmasq": { + "$ref": "#/$defs/Dnsmasq" + }, + "skydns": { + "$ref": "#/$defs/Dnsmasq" + } + }, + "required": ["dnsmasq", "skydns"], + "title": "KubeDNSService" + }, + "Dnsmasq": { + "type": "object", + "additionalProperties": false, + "properties": { + "port": { + "type": "integer" + }, + "targetPort": { + "type": "integer" + } + }, + "required": ["port", "targetPort"], + "title": "Dnsmasq" + }, + "Kubelet": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "namespace": { + "type": "string" + }, + "serviceMonitor": { + "$ref": "#/$defs/KubeletServiceMonitor" + } + }, + "required": ["enabled", "namespace", "serviceMonitor"], + "title": "Kubelet" + }, + "KubeletServiceMonitor": { + "type": "object", + "additionalProperties": false, + "properties": { + "attachMetadata": { + "$ref": "#/$defs/AttachMetadata" + }, + "interval": { + "type": "string" + }, + "honorLabels": { + "type": "boolean" + }, + "honorTimestamps": { + "type": "boolean" + }, + "https": { + "type": "boolean" + }, + "cAdvisor": { + "type": "boolean" + }, + "probes": { + "type": "boolean" + } + }, + "required": [ + "cAdvisor", + "honorLabels", + "honorTimestamps", + "https", + "interval", + "probes" + ], + "title": "KubeletServiceMonitor" + }, + "KubeServiceMonitorDefinition": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "endpoints": { + "type": "array", + "items": {} + }, + "service": { + "$ref": "#/$defs/KubeService" + }, + "serviceMonitor": { + "$ref": "#/$defs/KubeServiceMonitor" + } + }, + "required": ["enabled", "endpoints", "service", "serviceMonitor"], + "title": "KubeServiceMonitorDefinition" + }, + "KubeService": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "port": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + }, + "targetPort": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ] + } + }, + "required": ["enabled"], + "title": "KubeService" + }, + "KubeServiceMonitor": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "interval": { + "type": "string" + }, + "sampleLimit": { + "type": "integer" + }, + "targetLimit": { + "type": "integer" + }, + "labelLimit": { + "type": "integer" + }, + "labelNameLengthLimit": { + "type": "integer" + }, + "labelValueLengthLimit": { + "type": "integer" + }, + "proxyUrl": { + "type": "string" + }, + "port": { + "type": "string" + }, + "jobLabel": { + "type": "string" + }, + "selector": { + "$ref": "#/$defs/AdditionalLabels" + }, + "metricRelabelings": { + "type": "array", + "items": {} + }, + "relabelings": { + "type": "array", + "items": {} + }, + "additionalLabels": { + "$ref": "#/$defs/AdditionalLabels" + }, + "https": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "insecureSkipVerify": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ] + }, + "serverName": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string" + } + ] + }, + "dnsmasqMetricRelabelings": { + "type": "array", + "items": {} + }, + "dnsmasqRelabelings": { + "type": "array", + "items": {} + }, + "scheme": { + "type": "string" + }, + "caFile": { + "type": "string" + }, + "certFile": { + "type": "string" + }, + "keyFile": { + "type": "string" + } + }, + "required": [ + "additionalLabels", + "interval", + "jobLabel", + "labelLimit", + "labelNameLengthLimit", + "labelValueLengthLimit", + "metricRelabelings", + "proxyUrl", + "relabelings", + "sampleLimit", + "selector", + "targetLimit" + ], + "title": "KubeServiceMonitor" + }, "KubeStateMetrics": { "type": "object", "additionalProperties": true, @@ -3585,6 +3892,43 @@ "opentelemetry-operator": { "type": "object" }, + "kubernetesServiceMonitors": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "ignoreNamespaceSelectors": { + "type": "boolean" + } + }, + "required": ["enabled", "ignoreNamespaceSelectors"] + }, + "kubeApiServer": { + "$ref": "#/$defs/KubeAPIServer" + }, + "kubelet": { + "$ref": "#/$defs/Kubelet" + }, + "kubeControllerManager": { + "$ref": "#/$defs/KubeServiceMonitorDefinition" + }, + "coreDns": { + "$ref": "#/$defs/KubeServiceMonitorDefinition" + }, + "kubeDns": { + "$ref": "#/$defs/KubeDNS" + }, + "kubeEtcd": { + "$ref": "#/$defs/KubeServiceMonitorDefinition" + }, + "kubeScheduler": { + "$ref": "#/$defs/KubeServiceMonitorDefinition" + }, + "kubeProxy": { + "$ref": "#/$defs/KubeServiceMonitorDefinition" + }, "kubeStateMetrics": { "$ref": "#/$defs/EnableConfigBlock" }, diff --git a/charts/opentelemetry-kube-stack/values.yaml b/charts/opentelemetry-kube-stack/values.yaml index 48644c56e..cda2bcaf1 100644 --- a/charts/opentelemetry-kube-stack/values.yaml +++ b/charts/opentelemetry-kube-stack/values.yaml @@ -899,6 +899,665 @@ opAMPBridge: # This configuration sections allows for a direct replacement of the kube-prometheus-stack # chart where the collector scrapes the same metrics as the default prometheus installation. +## Flag to disable all the kubernetes component scrapers +## +kubernetesServiceMonitors: + enabled: false + ignoreNamespaceSelectors: false + +## Component scraping the kube api server +## +kubeApiServer: + enabled: false + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: + # Drop excessively noisy apiserver buckets. + - action: drop + regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50) + sourceLabels: + - __name__ + - le + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## the configuration for this is currently only in kubelet_scrape_configs.yaml +## This is because kubelet doesn't have a service and can only be scraped manually. +kubelet: + enabled: false + namespace: kube-system + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## If true, Prometheus use (respect) labels provided by exporter. + ## + honorLabels: true + + ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape. + ## + honorTimestamps: true + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: false + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.22. + ## + port: null + targetPort: null + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-controller-manager + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: false + endpoints: [] + service: + enabled: true + port: 9153 + targetPort: 9153 + # selector: + # k8s-app: kube-dns + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping etcd +## +kubeEtcd: + enabled: false + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2381 + targetPort: 2381 + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: etcd + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: false + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.23. + ## + port: null + targetPort: null + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-scheduler + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + +## Component scraping kube proxy +## +kubeProxy: + enabled: false + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-proxy + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + ## Controls whether the kube-state-metrics chart should be created. ## This block matches the configuration for the kube-prometheus-stack chart for compatibility. kubeStateMetrics: From 4b111dd6eff07280538920274d0010c2ed5403f0 Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 30 Jul 2024 12:15:01 -0400 Subject: [PATCH 2/3] bump chart version --- charts/opentelemetry-kube-stack/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/opentelemetry-kube-stack/Chart.yaml b/charts/opentelemetry-kube-stack/Chart.yaml index 5710b73ce..739ace003 100644 --- a/charts/opentelemetry-kube-stack/Chart.yaml +++ b/charts/opentelemetry-kube-stack/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: opentelemetry-kube-stack -version: 0.0.12 +version: 0.0.13 description: | OpenTelemetry Quickstart chart for Kubernetes. Installs an operator and collector for an easy way to get started with Kubernetes observability. From a4fa57ab614b77566234e6dd261b925aa9032194 Mon Sep 17 00:00:00 2001 From: Jacob Aronoff Date: Tue, 30 Jul 2024 12:18:27 -0400 Subject: [PATCH 3/3] v bump --- .../examples/cloud-demo/rendered/bridge.yaml | 2 +- .../examples/cloud-demo/rendered/collector.yaml | 4 ++-- .../examples/cloud-demo/rendered/instrumentation.yaml | 2 +- .../examples/targetallocator-prom/rendered/collector.yaml | 2 +- .../rendered/exporters/kube-api-server/servicemonitor.yaml | 2 +- .../rendered/exporters/kube-controller-manager/service.yaml | 2 +- .../exporters/kube-controller-manager/servicemonitor.yaml | 2 +- .../rendered/exporters/kube-dns/service.yaml | 2 +- .../rendered/exporters/kube-dns/servicemonitor.yaml | 2 +- .../rendered/exporters/kube-etcd/service.yaml | 2 +- .../rendered/exporters/kube-etcd/servicemonitor.yaml | 2 +- .../rendered/exporters/kube-proxy/service.yaml | 2 +- .../rendered/exporters/kube-proxy/servicemonitor.yaml | 2 +- .../rendered/exporters/kube-scheduler/service.yaml | 2 +- .../rendered/exporters/kube-scheduler/servicemonitor.yaml | 2 +- 15 files changed, 16 insertions(+), 16 deletions(-) diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml index 765553b32..4e8d4427c 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/bridge.yaml @@ -5,7 +5,7 @@ kind: OpAMPBridge metadata: name: example labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml index dcd22d1d5..8e7597359 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/collector.yaml @@ -6,7 +6,7 @@ metadata: name: example-cluster-stats namespace: default labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm opentelemetry.io/opamp-reporting: "true" @@ -189,7 +189,7 @@ metadata: name: example-daemon namespace: default labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm opentelemetry.io/opamp-reporting: "true" diff --git a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml index dcf3b882e..3252e42d5 100644 --- a/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml +++ b/charts/opentelemetry-kube-stack/examples/cloud-demo/rendered/instrumentation.yaml @@ -5,7 +5,7 @@ kind: Instrumentation metadata: name: example labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml index 3332af7e6..17c0dc23e 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/collector.yaml @@ -6,7 +6,7 @@ metadata: name: example-daemon namespace: default labels: - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml index 53b0e8c8a..b9809b1e9 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-api-server/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-apiserver - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml index d88bc9744..a3f461c88 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/service.yaml @@ -7,7 +7,7 @@ metadata: labels: app: opentelemetry-kube-stack-kube-controller-manager jobLabel: kube-controller-manager - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm namespace: kube-system diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml index 174b11a99..240ae32b3 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-controller-manager/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-kube-controller-manager - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml index f0dddac8d..c3002f3a1 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/service.yaml @@ -7,7 +7,7 @@ metadata: labels: app: opentelemetry-kube-stack-kube-dns jobLabel: kube-dns - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm namespace: kube-system diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml index bb7bff177..e093b8e18 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-dns/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-kube-dns - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml index 3dd4dc021..487ff9c04 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/service.yaml @@ -7,7 +7,7 @@ metadata: labels: app: opentelemetry-kube-stack-kube-etcd jobLabel: kube-etcd - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm namespace: kube-system diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml index 2ac7ce3d7..0cba261bf 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-etcd/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-kube-etcd - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml index c8470e288..0d290be91 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/service.yaml @@ -7,7 +7,7 @@ metadata: labels: app: opentelemetry-kube-stack-kube-proxy jobLabel: kube-proxy - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm namespace: kube-system diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml index ba447bb01..7efb59826 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-proxy/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-kube-proxy - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml index 639126326..2cde42dab 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/service.yaml @@ -7,7 +7,7 @@ metadata: labels: app: opentelemetry-kube-stack-kube-scheduler jobLabel: kube-scheduler - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm namespace: kube-system diff --git a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml index 39d254692..19910466e 100644 --- a/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml +++ b/charts/opentelemetry-kube-stack/examples/targetallocator-prom/rendered/exporters/kube-scheduler/servicemonitor.yaml @@ -7,7 +7,7 @@ metadata: namespace: default labels: app: opentelemetry-kube-stack-kube-scheduler - helm.sh/chart: opentelemetry-kube-stack-0.0.12 + helm.sh/chart: opentelemetry-kube-stack-0.0.13 app.kubernetes.io/version: "0.103.0" app.kubernetes.io/managed-by: Helm spec: